summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/calico/defaults/main.yaml8
-rw-r--r--roles/calico_master/defaults/main.yaml1
-rw-r--r--roles/calico_master/templates/calico-policy-controller.yml.j22
-rw-r--r--roles/etcd/tasks/main.yml3
-rw-r--r--roles/etcd_common/defaults/main.yml10
-rw-r--r--roles/etcd_common/tasks/backup.yml (renamed from roles/etcd_upgrade/tasks/backup.yml)33
-rw-r--r--roles/etcd_common/tasks/drop_etcdctl.yml (renamed from roles/etcd_common/tasks/etcdctl.yml)0
-rw-r--r--roles/etcd_common/tasks/main.yml9
-rw-r--r--roles/etcd_upgrade/defaults/main.yml6
-rw-r--r--roles/etcd_upgrade/tasks/main.yml4
-rw-r--r--roles/lib_openshift/library/oc_adm_ca_server_cert.py44
-rw-r--r--roles/lib_openshift/library/oc_adm_manage_node.py44
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_group.py44
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_user.py44
-rw-r--r--roles/lib_openshift/library/oc_adm_registry.py44
-rw-r--r--roles/lib_openshift/library/oc_adm_router.py44
-rw-r--r--roles/lib_openshift/library/oc_clusterrole.py44
-rw-r--r--roles/lib_openshift/library/oc_configmap.py44
-rw-r--r--roles/lib_openshift/library/oc_edit.py44
-rw-r--r--roles/lib_openshift/library/oc_env.py44
-rw-r--r--roles/lib_openshift/library/oc_group.py44
-rw-r--r--roles/lib_openshift/library/oc_image.py44
-rw-r--r--roles/lib_openshift/library/oc_label.py44
-rw-r--r--roles/lib_openshift/library/oc_obj.py54
-rw-r--r--roles/lib_openshift/library/oc_objectvalidator.py44
-rw-r--r--roles/lib_openshift/library/oc_process.py44
-rw-r--r--roles/lib_openshift/library/oc_project.py44
-rw-r--r--roles/lib_openshift/library/oc_pvc.py44
-rw-r--r--roles/lib_openshift/library/oc_route.py44
-rw-r--r--roles/lib_openshift/library/oc_scale.py44
-rw-r--r--roles/lib_openshift/library/oc_secret.py44
-rw-r--r--roles/lib_openshift/library/oc_service.py44
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount.py44
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount_secret.py44
-rw-r--r--roles/lib_openshift/library/oc_user.py44
-rw-r--r--roles/lib_openshift/library/oc_version.py44
-rw-r--r--roles/lib_openshift/library/oc_volume.py44
-rw-r--r--roles/lib_openshift/src/class/oc_obj.py10
-rw-r--r--roles/lib_openshift/src/lib/base.py44
-rw-r--r--roles/openshift_excluder/tasks/install.yml36
-rw-r--r--roles/openshift_facts/tasks/main.yml23
-rw-r--r--roles/openshift_logging/defaults/main.yml4
-rw-r--r--roles/openshift_metrics/README.md3
-rw-r--r--roles/openshift_metrics/defaults/main.yaml9
-rw-r--r--roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml2
-rw-r--r--roles/openshift_metrics/tasks/install_cassandra.yaml4
-rw-r--r--roles/openshift_metrics/tasks/install_hosa.yaml44
-rw-r--r--roles/openshift_metrics/tasks/install_metrics.yaml37
-rw-r--r--roles/openshift_metrics/tasks/main.yaml3
-rw-r--r--roles/openshift_metrics/tasks/oc_apply.yaml2
-rw-r--r--roles/openshift_metrics/tasks/uninstall_hosa.yaml15
-rw-r--r--roles/openshift_metrics/templates/hawkular_openshift_agent_cm.j254
-rw-r--r--roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j291
-rw-r--r--roles/openshift_metrics/templates/hawkular_openshift_agent_role.j225
-rw-r--r--roles/openshift_metrics/templates/hawkular_openshift_agent_sa.j27
-rw-r--r--roles/openshift_metrics/templates/pvc.j27
-rw-r--r--roles/openshift_node_dnsmasq/templates/origin-dns.conf.j22
-rw-r--r--roles/openshift_persistent_volumes/templates/persistent-volume.yml.j26
-rw-r--r--roles/openshift_repos/tasks/main.yaml52
-rw-r--r--roles/openshift_storage_glusterfs/README.md83
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml16
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml50
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml31
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml41
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml108
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml6
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml36
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml29
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml21
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml68
-rw-r--r--roles/openshift_storage_glusterfs/tasks/main.yml5
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j23
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-service.yml.j2 (renamed from roles/openshift_storage_glusterfs/files/v3.6/glusterfs-registry-service.yml)2
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/heketi-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/heketi-service.yml.j210
76 files changed, 1243 insertions, 1042 deletions
diff --git a/roles/calico/defaults/main.yaml b/roles/calico/defaults/main.yaml
index c7eea46f2..207dee068 100644
--- a/roles/calico/defaults/main.yaml
+++ b/roles/calico/defaults/main.yaml
@@ -3,13 +3,13 @@ kubeconfig: "{{openshift.common.config_base}}/node/{{ 'system:node:' + openshif
cni_conf_dir: "/etc/cni/net.d/"
cni_bin_dir: "/opt/cni/bin/"
-cni_url: "https://github.com/containernetworking/cni/releases/download/v0.4.0/cni-amd64-v0.4.0.tgz"
+cni_url: "https://github.com/containernetworking/cni/releases/download/v0.5.2/cni-amd64-v0.5.2.tgz"
-calico_url_cni: "https://github.com/projectcalico/cni-plugin/releases/download/v1.5.5/calico"
-calico_url_ipam: "https://github.com/projectcalico/cni-plugin/releases/download/v1.5.5/calico-ipam"
+calico_url_cni: "https://github.com/projectcalico/cni-plugin/releases/download/v1.8.3/calico"
+calico_url_ipam: "https://github.com/projectcalico/cni-plugin/releases/download/v1.8.3/calico-ipam"
calico_ipv4pool_ipip: "always"
calico_ipv4pool_cidr: "192.168.0.0/16"
calico_log_dir: "/var/log/calico"
-calico_node_image: "calico/node:v1.1.0"
+calico_node_image: "calico/node:v1.2.1"
diff --git a/roles/calico_master/defaults/main.yaml b/roles/calico_master/defaults/main.yaml
index 5b324bce5..b2df0105f 100644
--- a/roles/calico_master/defaults/main.yaml
+++ b/roles/calico_master/defaults/main.yaml
@@ -4,3 +4,4 @@ kubeconfig: "{{ openshift.common.config_base }}/master/openshift-master.kubeconf
calicoctl_bin_dir: "/usr/local/bin/"
calico_url_calicoctl: "https://github.com/projectcalico/calicoctl/releases/download/v1.1.3/calicoctl"
+calico_url_policy_controller: "quay.io/calico/kube-policy-controller:v0.5.4"
diff --git a/roles/calico_master/templates/calico-policy-controller.yml.j2 b/roles/calico_master/templates/calico-policy-controller.yml.j2
index 1b87758ce..811884473 100644
--- a/roles/calico_master/templates/calico-policy-controller.yml.j2
+++ b/roles/calico_master/templates/calico-policy-controller.yml.j2
@@ -74,7 +74,7 @@ spec:
serviceAccountName: calico
containers:
- name: calico-policy-controller
- image: quay.io/calico/kube-policy-controller:v0.5.4
+ image: {{ calico_url_policy_controller }}
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index fa2f44609..586aebb11 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -122,7 +122,8 @@
- include_role:
name: etcd_common
- tasks_from: etcdctl.yml
+ vars:
+ r_etcd_common_action: drop_etcdctl
when: openshift_etcd_etcdctl_profile | default(true) | bool
- name: Set fact etcd_service_status_changed
diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml
index 14e712fcf..8cc7a9c20 100644
--- a/roles/etcd_common/defaults/main.yml
+++ b/roles/etcd_common/defaults/main.yml
@@ -1,8 +1,18 @@
---
+# Default action when calling this role
+r_etcd_common_action: noop
+r_etcd_common_backup_tag: ''
+r_etcd_common_backup_sufix_name: ''
+
# runc, docker, host
r_etcd_common_etcd_runtime: "docker"
r_etcd_common_embedded_etcd: false
+# etcd run on a host => use etcdctl command directly
+# etcd run as a docker container => use docker exec
+# etcd run as a runc container => use runc exec
+r_etcd_common_etcdctl_command: "{{ 'etcdctl' if r_etcd_common_etcd_runtime == 'host' or r_etcd_common_embedded_etcd | bool else 'docker exec etcd_container etcdctl' if r_etcd_common_etcd_runtime == 'docker' else 'runc exec etcd etcdctl' }}"
+
# etcd server vars
etcd_conf_dir: '/etc/etcd'
r_etcd_common_system_container_host_dir: /var/lib/etcd/etcd.etcd
diff --git a/roles/etcd_upgrade/tasks/backup.yml b/roles/etcd_common/tasks/backup.yml
index 1ea6fc59f..4a4832275 100644
--- a/roles/etcd_upgrade/tasks/backup.yml
+++ b/roles/etcd_common/tasks/backup.yml
@@ -1,15 +1,11 @@
---
-# INPUT r_etcd_backup_sufix_name
-# INPUT r_etcd_backup_tag
-# OUTPUT r_etcd_upgrade_backup_complete
- set_fact:
- # ORIGIN etcd_data_dir etcd_common.defaults
- l_etcd_backup_dir: "{{ etcd_data_dir }}/openshift-backup-{{ r_etcd_backup_tag | default('') }}{{ r_etcd_backup_sufix_name }}"
+ l_etcd_backup_dir: "{{ etcd_data_dir }}/openshift-backup-{{ r_etcd_common_backup_tag }}{{ r_etcd_common_backup_sufix_name }}"
# TODO: replace shell module with command and update later checks
- name: Check available disk space for etcd backup
shell: df --output=avail -k {{ etcd_data_dir }} | tail -n 1
- register: avail_disk
+ register: l_avail_disk
# AUDIT:changed_when: `false` because we are only inspecting
# state, not manipulating anything
changed_when: false
@@ -17,8 +13,8 @@
# TODO: replace shell module with command and update later checks
- name: Check current etcd disk usage
shell: du --exclude='*openshift-backup*' -k {{ etcd_data_dir }} | tail -n 1 | cut -f1
- register: etcd_disk_usage
- when: r_etcd_upgrade_embedded_etcd | bool
+ register: l_etcd_disk_usage
+ when: r_etcd_common_embedded_etcd | bool
# AUDIT:changed_when: `false` because we are only inspecting
# state, not manipulating anything
changed_when: false
@@ -26,9 +22,9 @@
- name: Abort if insufficient disk space for etcd backup
fail:
msg: >
- {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
- {{ avail_disk.stdout }} Kb available.
- when: (r_etcd_upgrade_embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
+ {{ l_etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
+ {{ l_avail_disk.stdout }} Kb available.
+ when: (r_etcd_common_embedded_etcd | bool) and (l_etcd_disk_usage.stdout|int > l_avail_disk.stdout|int)
# For non containerized and non embedded we should have the correct version of
# etcd installed already. So don't do anything.
@@ -37,17 +33,22 @@
#
# For embedded non containerized we need to ensure we have the latest version
# etcd on the host.
+- name: Detecting Atomic Host Operating System
+ stat:
+ path: /run/ostree-booted
+ register: l_ostree_booted
+
- name: Install latest etcd for embedded
package:
name: etcd
state: latest
when:
- - r_etcd_upgrade_embedded_etcd | bool
+ - r_etcd_common_embedded_etcd | bool
- not l_ostree_booted.stat.exists | bool
- name: Generate etcd backup
command: >
- {{ etcdctl_command }} backup --data-dir={{ etcd_data_dir }}
+ {{ r_etcd_common_etcdctl_command }} backup --data-dir={{ etcd_data_dir }}
--backup-dir={{ l_etcd_backup_dir }}
# According to the docs change you can simply copy snap/db
@@ -55,16 +56,16 @@
- name: Check for v3 data store
stat:
path: "{{ etcd_data_dir }}/member/snap/db"
- register: v3_db
+ register: l_v3_db
- name: Copy etcd v3 data store
command: >
cp -a {{ etcd_data_dir }}/member/snap/db
{{ l_etcd_backup_dir }}/member/snap/
- when: v3_db.stat.exists
+ when: l_v3_db.stat.exists
- set_fact:
- r_etcd_upgrade_backup_complete: True
+ r_etcd_common_backup_complete: True
- name: Display location of etcd backup
debug:
diff --git a/roles/etcd_common/tasks/etcdctl.yml b/roles/etcd_common/tasks/drop_etcdctl.yml
index 6cb456677..6cb456677 100644
--- a/roles/etcd_common/tasks/etcdctl.yml
+++ b/roles/etcd_common/tasks/drop_etcdctl.yml
diff --git a/roles/etcd_common/tasks/main.yml b/roles/etcd_common/tasks/main.yml
new file mode 100644
index 000000000..6ed87e6c7
--- /dev/null
+++ b/roles/etcd_common/tasks/main.yml
@@ -0,0 +1,9 @@
+---
+- name: Fail if invalid r_etcd_common_action provided
+ fail:
+ msg: "etcd_common role can only be called with 'noop' or 'backup' or 'drop_etcdctl'"
+ when: r_etcd_common_action not in ['noop', 'backup', 'drop_etcdctl']
+
+- name: Include main action task file
+ include: "{{ r_etcd_common_action }}.yml"
+ when: r_etcd_common_action != "noop"
diff --git a/roles/etcd_upgrade/defaults/main.yml b/roles/etcd_upgrade/defaults/main.yml
index b61bf526c..61bbba225 100644
--- a/roles/etcd_upgrade/defaults/main.yml
+++ b/roles/etcd_upgrade/defaults/main.yml
@@ -1,9 +1,3 @@
---
r_etcd_upgrade_action: upgrade
r_etcd_upgrade_mechanism: rpm
-r_etcd_upgrade_embedded_etcd: false
-r_etcd_common_embedded_etcd: "{{ r_etcd_upgrade_embedded_etcd }}"
-# etcd run on a host => use etcdctl command directly
-# etcd run as a docker container => use docker exec
-# etcd run as a runc container => use runc exec
-etcdctl_command: "{{ 'etcdctl' if r_etcd_common_etcd_runtime == 'host' or r_etcd_upgrade_embedded_etcd | bool else 'docker exec etcd_container etcdctl' if r_etcd_common_etcd_runtime == 'docker' else 'runc exec etcd etcdctl' }}"
diff --git a/roles/etcd_upgrade/tasks/main.yml b/roles/etcd_upgrade/tasks/main.yml
index 5178c14e3..129c69d6b 100644
--- a/roles/etcd_upgrade/tasks/main.yml
+++ b/roles/etcd_upgrade/tasks/main.yml
@@ -2,9 +2,9 @@
# INPUT r_etcd_upgrade_action
- name: Fail if invalid etcd_upgrade_action provided
fail:
- msg: "etcd_upgrade role can only be called with 'upgrade' or 'backup'"
+ msg: "etcd_upgrade role can only be called with 'upgrade'"
when:
- - r_etcd_upgrade_action not in ['upgrade', 'backup']
+ - r_etcd_upgrade_action not in ['upgrade']
- name: Detecting Atomic Host Operating System
stat:
diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
index 3974cc4dd..1b73bfd0e 100644
--- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py
+++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
@@ -1097,10 +1097,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1110,34 +1106,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py
index 320eac17e..b09321a5b 100644
--- a/roles/lib_openshift/library/oc_adm_manage_node.py
+++ b/roles/lib_openshift/library/oc_adm_manage_node.py
@@ -1083,10 +1083,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1096,34 +1092,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py
index f9658d6e1..221ef5094 100644
--- a/roles/lib_openshift/library/oc_adm_policy_group.py
+++ b/roles/lib_openshift/library/oc_adm_policy_group.py
@@ -1069,10 +1069,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1082,34 +1078,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py
index 0bdfd0bad..071562875 100644
--- a/roles/lib_openshift/library/oc_adm_policy_user.py
+++ b/roles/lib_openshift/library/oc_adm_policy_user.py
@@ -1069,10 +1069,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1082,34 +1078,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py
index df0e40d20..bf2650460 100644
--- a/roles/lib_openshift/library/oc_adm_registry.py
+++ b/roles/lib_openshift/library/oc_adm_registry.py
@@ -1187,10 +1187,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1200,34 +1196,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py
index 8af8cb196..a2b7d12c0 100644
--- a/roles/lib_openshift/library/oc_adm_router.py
+++ b/roles/lib_openshift/library/oc_adm_router.py
@@ -1212,10 +1212,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1225,34 +1221,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py
index 3ed0d65dc..289f08b83 100644
--- a/roles/lib_openshift/library/oc_clusterrole.py
+++ b/roles/lib_openshift/library/oc_clusterrole.py
@@ -1061,10 +1061,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1074,34 +1070,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py
index 5c8ed48d2..7cd29215f 100644
--- a/roles/lib_openshift/library/oc_configmap.py
+++ b/roles/lib_openshift/library/oc_configmap.py
@@ -1067,10 +1067,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1080,34 +1076,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py
index f3b6d552d..5b11f45ba 100644
--- a/roles/lib_openshift/library/oc_edit.py
+++ b/roles/lib_openshift/library/oc_edit.py
@@ -1111,10 +1111,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1124,34 +1120,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py
index c6421128a..d3834ce0c 100644
--- a/roles/lib_openshift/library/oc_env.py
+++ b/roles/lib_openshift/library/oc_env.py
@@ -1078,10 +1078,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1091,34 +1087,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py
index a791c29af..0d751fe28 100644
--- a/roles/lib_openshift/library/oc_group.py
+++ b/roles/lib_openshift/library/oc_group.py
@@ -1051,10 +1051,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1064,34 +1060,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py
index bbc123ce0..3a6ba3e56 100644
--- a/roles/lib_openshift/library/oc_image.py
+++ b/roles/lib_openshift/library/oc_image.py
@@ -1070,10 +1070,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1083,34 +1079,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py
index cd1afd0d2..5db036b23 100644
--- a/roles/lib_openshift/library/oc_label.py
+++ b/roles/lib_openshift/library/oc_label.py
@@ -1087,10 +1087,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1100,34 +1096,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py
index 215723cc8..56af303cc 100644
--- a/roles/lib_openshift/library/oc_obj.py
+++ b/roles/lib_openshift/library/oc_obj.py
@@ -1090,10 +1090,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1103,34 +1099,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
@@ -1473,7 +1461,12 @@ class OCObject(OpenShiftCLI):
def delete(self):
'''delete the object'''
- return self._delete(self.kind, name=self.name, selector=self.selector)
+ results = self._delete(self.kind, name=self.name, selector=self.selector)
+ if (results['returncode'] != 0 and 'stderr' in results and
+ '\"{}\" not found'.format(self.name) in results['stderr']):
+ results['returncode'] = 0
+
+ return results
def create(self, files=None, content=None):
'''
@@ -1557,7 +1550,8 @@ class OCObject(OpenShiftCLI):
if state == 'absent':
# verify its not in our results
if (params['name'] is not None or params['selector'] is not None) and \
- (len(api_rval['results']) == 0 or len(api_rval['results'][0].get('items', [])) == 0):
+ (len(api_rval['results']) == 0 or \
+ ('items' in api_rval['results'][0] and len(api_rval['results'][0]['items']) == 0)):
return {'changed': False, 'state': state}
if check_mode:
diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py
index 358ef5130..130521761 100644
--- a/roles/lib_openshift/library/oc_objectvalidator.py
+++ b/roles/lib_openshift/library/oc_objectvalidator.py
@@ -1022,10 +1022,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1035,34 +1031,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py
index 025b846c6..c6568d520 100644
--- a/roles/lib_openshift/library/oc_process.py
+++ b/roles/lib_openshift/library/oc_process.py
@@ -1079,10 +1079,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1092,34 +1088,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py
index 05dfddab8..a78bc06d2 100644
--- a/roles/lib_openshift/library/oc_project.py
+++ b/roles/lib_openshift/library/oc_project.py
@@ -1076,10 +1076,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1089,34 +1085,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py
index d7de4964c..a88639bfc 100644
--- a/roles/lib_openshift/library/oc_pvc.py
+++ b/roles/lib_openshift/library/oc_pvc.py
@@ -1071,10 +1071,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1084,34 +1080,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py
index 3090b4cad..0c0bc9386 100644
--- a/roles/lib_openshift/library/oc_route.py
+++ b/roles/lib_openshift/library/oc_route.py
@@ -1121,10 +1121,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1134,34 +1130,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py
index 6a505fb6b..f112b6dd0 100644
--- a/roles/lib_openshift/library/oc_scale.py
+++ b/roles/lib_openshift/library/oc_scale.py
@@ -1065,10 +1065,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1078,34 +1074,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py
index db6e682d0..d762e0c38 100644
--- a/roles/lib_openshift/library/oc_secret.py
+++ b/roles/lib_openshift/library/oc_secret.py
@@ -1117,10 +1117,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1130,34 +1126,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py
index 308f45488..769b75e15 100644
--- a/roles/lib_openshift/library/oc_service.py
+++ b/roles/lib_openshift/library/oc_service.py
@@ -1124,10 +1124,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1137,34 +1133,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py
index 68c1fc51c..446987eff 100644
--- a/roles/lib_openshift/library/oc_serviceaccount.py
+++ b/roles/lib_openshift/library/oc_serviceaccount.py
@@ -1063,10 +1063,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1076,34 +1072,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py
index ebc5bf8a2..c7eb1986a 100644
--- a/roles/lib_openshift/library/oc_serviceaccount_secret.py
+++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py
@@ -1063,10 +1063,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1076,34 +1072,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py
index d1a20fddc..3a98693b7 100644
--- a/roles/lib_openshift/library/oc_user.py
+++ b/roles/lib_openshift/library/oc_user.py
@@ -1123,10 +1123,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1136,34 +1132,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py
index 548c9d8e0..939261526 100644
--- a/roles/lib_openshift/library/oc_version.py
+++ b/roles/lib_openshift/library/oc_version.py
@@ -1035,10 +1035,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1048,34 +1044,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py
index 3826cd8e5..41e7d0ab8 100644
--- a/roles/lib_openshift/library/oc_volume.py
+++ b/roles/lib_openshift/library/oc_volume.py
@@ -1112,10 +1112,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1125,34 +1121,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/src/class/oc_obj.py b/roles/lib_openshift/src/class/oc_obj.py
index 6f0da3d5c..5e423bea9 100644
--- a/roles/lib_openshift/src/class/oc_obj.py
+++ b/roles/lib_openshift/src/class/oc_obj.py
@@ -33,7 +33,12 @@ class OCObject(OpenShiftCLI):
def delete(self):
'''delete the object'''
- return self._delete(self.kind, name=self.name, selector=self.selector)
+ results = self._delete(self.kind, name=self.name, selector=self.selector)
+ if (results['returncode'] != 0 and 'stderr' in results and
+ '\"{}\" not found'.format(self.name) in results['stderr']):
+ results['returncode'] = 0
+
+ return results
def create(self, files=None, content=None):
'''
@@ -117,7 +122,8 @@ class OCObject(OpenShiftCLI):
if state == 'absent':
# verify its not in our results
if (params['name'] is not None or params['selector'] is not None) and \
- (len(api_rval['results']) == 0 or len(api_rval['results'][0].get('items', [])) == 0):
+ (len(api_rval['results']) == 0 or \
+ ('items' in api_rval['results'][0] and len(api_rval['results'][0]['items']) == 0)):
return {'changed': False, 'state': state}
if check_mode:
diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py
index b3f01008b..16770b22d 100644
--- a/roles/lib_openshift/src/lib/base.py
+++ b/roles/lib_openshift/src/lib/base.py
@@ -273,10 +273,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -286,34 +282,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/openshift_excluder/tasks/install.yml b/roles/openshift_excluder/tasks/install.yml
index d09358bee..3a866cedf 100644
--- a/roles/openshift_excluder/tasks/install.yml
+++ b/roles/openshift_excluder/tasks/install.yml
@@ -1,14 +1,24 @@
---
-- name: Install docker excluder
- package:
- name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
- state: "{{ r_openshift_excluder_docker_package_state }}"
- when:
- - r_openshift_excluder_enable_docker_excluder | bool
-
-- name: Install openshift excluder
- package:
- name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
- state: "{{ r_openshift_excluder_package_state }}"
- when:
- - r_openshift_excluder_enable_openshift_excluder | bool
+
+- when:
+ - not openshift.common.is_atomic | bool
+ - r_openshift_excluder_install_ran is not defined
+
+ block:
+
+ - name: Install docker excluder
+ package:
+ name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
+ state: "{{ r_openshift_excluder_docker_package_state }}"
+ when:
+ - r_openshift_excluder_enable_docker_excluder | bool
+
+ - name: Install openshift excluder
+ package:
+ name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
+ state: "{{ r_openshift_excluder_package_state }}"
+ when:
+ - r_openshift_excluder_enable_openshift_excluder | bool
+
+ - set_fact:
+ r_openshift_excluder_install_ran: True
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index 1b9bda67e..50ed3e964 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -24,12 +24,18 @@
msg: |
openshift-ansible requires Python 3 for {{ ansible_distribution }};
For information on enabling Python 3 with Ansible, see https://docs.ansible.com/ansible/python_3_support.html
- when: ansible_distribution == 'Fedora' and ansible_python['version']['major'] != 3
+ when:
+ - ansible_distribution == 'Fedora'
+ - ansible_python['version']['major'] != 3
+ - r_openshift_facts_ran is not defined
- name: Validate python version
fail:
msg: "openshift-ansible requires Python 2 for {{ ansible_distribution }}"
- when: ansible_distribution != 'Fedora' and ansible_python['version']['major'] != 2
+ when:
+ - ansible_distribution != 'Fedora'
+ - ansible_python['version']['major'] != 2
+ - r_openshift_facts_ran is not defined
# Fail as early as possible if Atomic and old version of Docker
- block:
@@ -48,7 +54,9 @@
that:
- l_atomic_docker_version.stdout | replace('"', '') | version_compare('1.12','>=')
- when: l_is_atomic | bool
+ when:
+ - l_is_atomic | bool
+ - r_openshift_facts_ran is not defined
- name: Load variables
include_vars: "{{ item }}"
@@ -59,7 +67,9 @@
- name: Ensure various deps are installed
package: name={{ item }} state=present
with_items: "{{ required_packages }}"
- when: not l_is_atomic | bool
+ when:
+ - not l_is_atomic | bool
+ - r_openshift_facts_ran is not defined
- name: Ensure various deps for running system containers are installed
package: name={{ item }} state=present
@@ -67,6 +77,7 @@
when:
- not l_is_atomic | bool
- l_any_system_container | bool
+ - r_openshift_facts_ran is not defined
- name: Gather Cluster facts and set is_containerized if needed
openshift_facts:
@@ -99,3 +110,7 @@
- name: Set repoquery command
set_fact:
repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
+
+- name: Register that this already ran
+ set_fact:
+ r_openshift_facts_ran: True
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 573cbdd09..3c343c9dc 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -87,7 +87,7 @@ openshift_logging_es_cpu_limit: null
# the logging appenders for the root loggers to write ES logs. Valid values: 'file', 'console'
openshift_logging_es_log_appenders: ['file']
openshift_logging_es_memory_limit: "{{ openshift_hosted_logging_elasticsearch_instance_ram | default('8Gi') }}"
-openshift_logging_es_pv_selector: null
+openshift_logging_es_pv_selector: "{{ openshift_hosted_logging_storage_labels | default(null) }}"
openshift_logging_es_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_pvc_dynamic | default(False) }}"
openshift_logging_es_pvc_size: "{{ openshift_hosted_logging_elasticsearch_pvc_size | default('') }}"
openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default('logging-es') }}"
@@ -126,7 +126,7 @@ openshift_logging_es_ops_client_key: /etc/fluent/keys/key
openshift_logging_es_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}"
openshift_logging_es_ops_cpu_limit: null
openshift_logging_es_ops_memory_limit: "{{ openshift_hosted_logging_elasticsearch_ops_instance_ram | default('8Gi') }}"
-openshift_logging_es_ops_pv_selector: None
+openshift_logging_es_ops_pv_selector: "{{ openshift_hosted_loggingops_storage_labels | default(null) }}"
openshift_logging_es_ops_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | default(False) }}"
openshift_logging_es_ops_pvc_size: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_size | default('') }}"
openshift_logging_es_ops_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_prefix | default('logging-es-ops') }}"
diff --git a/roles/openshift_metrics/README.md b/roles/openshift_metrics/README.md
index 84503217b..1f10de4a2 100644
--- a/roles/openshift_metrics/README.md
+++ b/roles/openshift_metrics/README.md
@@ -68,6 +68,9 @@ For default values, see [`defaults/main.yaml`](defaults/main.yaml).
- `openshift_metrics_resolution`: How often metrics should be gathered.
+- `openshift_metrics_install_hawkular_agent`: Install the Hawkular OpenShift Agent (HOSA). HOSA can be used
+ to collect custom metrics from your pods. This component is currently in tech-preview and is not installed by default.
+
## Additional variables to control resource limits
Each metrics component (hawkular, cassandra, heapster) can specify a cpu and memory limits and requests by setting
the corresponding role variable:
diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml
index 1d3db8a1a..ba50566e9 100644
--- a/roles/openshift_metrics/defaults/main.yaml
+++ b/roles/openshift_metrics/defaults/main.yaml
@@ -16,6 +16,7 @@ openshift_metrics_hawkular_nodeselector: ""
openshift_metrics_cassandra_replicas: 1
openshift_metrics_cassandra_storage_type: "{{ openshift_hosted_metrics_storage_kind | default('emptydir') }}"
openshift_metrics_cassandra_pvc_size: "{{ openshift_hosted_metrics_storage_volume_size | default('10Gi') }}"
+openshift_metrics_cassandra_pv_selector: "{{ openshift_hosted_metrics_storage_labels | default(null) }}"
openshift_metrics_cassandra_limits_memory: 2G
openshift_metrics_cassandra_limits_cpu: null
openshift_metrics_cassandra_requests_memory: 1G
@@ -30,6 +31,14 @@ openshift_metrics_heapster_requests_memory: 0.9375G
openshift_metrics_heapster_requests_cpu: null
openshift_metrics_heapster_nodeselector: ""
+openshift_metrics_install_hawkular_agent: False
+openshift_metrics_hawkular_agent_limits_memory: null
+openshift_metrics_hawkular_agent_limits_cpu: null
+openshift_metrics_hawkular_agent_requests_memory: null
+openshift_metrics_hawkular_agent_requests_cpu: null
+openshift_metrics_hawkular_agent_nodeselector: ""
+openshift_metrics_hawkular_agent_namespace: "default"
+
openshift_metrics_hawkular_hostname: "hawkular-metrics.{{openshift_master_default_subdomain}}"
openshift_metrics_duration: 7
diff --git a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
index fb4fe2f03..7b81b3c10 100644
--- a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
+++ b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
@@ -73,6 +73,8 @@
{{ hawkular_secrets['hawkular-metrics.key'] }}
tls.truststore.crt: >
{{ hawkular_secrets['hawkular-cassandra.crt'] }}
+ ca.crt: >
+ {{ hawkular_secrets['ca.crt'] }}
when: name not in metrics_secrets.stdout_lines
changed_when: no
diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml
index 0b6fe7c9e..62b7f52cb 100644
--- a/roles/openshift_metrics/tasks/install_cassandra.yaml
+++ b/roles/openshift_metrics/tasks/install_cassandra.yaml
@@ -23,7 +23,7 @@
changed_when: false
- set_fact: openshift_metrics_cassandra_pvc_prefix="hawkular-metrics"
- when: not openshift_metrics_cassandra_pvc_prefix or openshift_metrics_cassandra_pvc_prefix == ''
+ when: "not openshift_metrics_cassandra_pvc_prefix or openshift_metrics_cassandra_pvc_prefix == ''"
- name: generate hawkular-cassandra persistent volume claims
template:
@@ -35,6 +35,7 @@
metrics-infra: hawkular-cassandra
access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"
size: "{{ openshift_metrics_cassandra_pvc_size }}"
+ pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}"
with_sequence: count={{ openshift_metrics_cassandra_replicas }}
when:
- openshift_metrics_cassandra_storage_type != 'emptydir'
@@ -53,6 +54,7 @@
volume.beta.kubernetes.io/storage-class: dynamic
access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"
size: "{{ openshift_metrics_cassandra_pvc_size }}"
+ pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}"
with_sequence: count={{ openshift_metrics_cassandra_replicas }}
when: openshift_metrics_cassandra_storage_type == 'dynamic'
changed_when: false
diff --git a/roles/openshift_metrics/tasks/install_hosa.yaml b/roles/openshift_metrics/tasks/install_hosa.yaml
new file mode 100644
index 000000000..cc533a68b
--- /dev/null
+++ b/roles/openshift_metrics/tasks/install_hosa.yaml
@@ -0,0 +1,44 @@
+---
+- name: Generate Hawkular Agent (HOSA) Cluster Role
+ template:
+ src: hawkular_openshift_agent_role.j2
+ dest: "{{mktemp.stdout}}/templates/metrics-hawkular-openshift-agent-role.yaml"
+ changed_when: no
+
+- name: Generate Hawkular Agent (HOSA) Service Account
+ template:
+ src: hawkular_openshift_agent_sa.j2
+ dest: "{{mktemp.stdout}}/templates/metrics-hawkular-openshift-agent-sa.yaml"
+ changed_when: no
+
+- name: Generate Hawkular Agent (HOSA) Daemon Set
+ template:
+ src: hawkular_openshift_agent_ds.j2
+ dest: "{{mktemp.stdout}}/templates/metrics-hawkular-openshift-agent-ds.yaml"
+ vars:
+ node_selector: "{{openshift_metrics_hawkular_agent_nodeselector | default('') }}"
+ changed_when: no
+
+- name: Generate the Hawkular Agent (HOSA) Configmap
+ template:
+ src: hawkular_openshift_agent_cm.j2
+ dest: "{{mktemp.stdout}}/templates/metrics-hawkular-openshift-agent-cm.yaml"
+ changed_when: no
+
+- name: Generate role binding for the hawkular-openshift-agent service account
+ template:
+ src: rolebinding.j2
+ dest: "{{ mktemp.stdout }}/templates/metrics-hawkular-agent-rolebinding.yaml"
+ vars:
+ cluster: True
+ obj_name: hawkular-openshift-agent-rb
+ labels:
+ metrics-infra: hawkular-agent
+ roleRef:
+ kind: ClusterRole
+ name: hawkular-openshift-agent
+ subjects:
+ - kind: ServiceAccount
+ name: hawkular-openshift-agent
+ namespace: "{{openshift_metrics_hawkular_agent_namespace}}"
+ changed_when: no
diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml
index 74eb56713..fdf4ae57f 100644
--- a/roles/openshift_metrics/tasks/install_metrics.yaml
+++ b/roles/openshift_metrics/tasks/install_metrics.yaml
@@ -16,11 +16,19 @@
include: install_heapster.yaml
when: openshift_metrics_heapster_standalone | bool
-- find: paths={{ mktemp.stdout }}/templates patterns=*.yaml
+- name: Install Hawkular OpenShift Agent (HOSA)
+ include: install_hosa.yaml
+ when: openshift_metrics_install_hawkular_agent | default(false) | bool
+
+- find:
+ paths: "{{ mktemp.stdout }}/templates"
+ patterns: "^(?!metrics-hawkular-openshift-agent).*.yaml"
+ use_regex: true
register: object_def_files
changed_when: no
-- slurp: src={{item.path}}
+- slurp:
+ src: "{{item.path}}"
register: object_defs
with_items: "{{object_def_files.files}}"
changed_when: no
@@ -34,6 +42,31 @@
file_content: "{{ item.content | b64decode | from_yaml }}"
with_items: "{{ object_defs.results }}"
+- find:
+ paths: "{{ mktemp.stdout }}/templates"
+ patterns: "^metrics-hawkular-openshift-agent.*.yaml"
+ use_regex: true
+ register: hawkular_agent_object_def_files
+ when: openshift_metrics_install_hawkular_agent | bool
+ changed_when: no
+
+- slurp:
+ src: "{{item.path}}"
+ register: hawkular_agent_object_defs
+ with_items: "{{ hawkular_agent_object_def_files.files }}"
+ when: openshift_metrics_install_hawkular_agent | bool
+ changed_when: no
+
+- name: Create Hawkular Agent objects
+ include: oc_apply.yaml
+ vars:
+ kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
+ namespace: "{{ openshift_metrics_hawkular_agent_namespace }}"
+ file_name: "{{ item.source }}"
+ file_content: "{{ item.content | b64decode | from_yaml }}"
+ with_items: "{{ hawkular_agent_object_defs.results }}"
+ when: openshift_metrics_install_hawkular_agent | bool
+
- include: update_master_config.yaml
- command: >
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index 5d8506a73..0b5f23c24 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -44,6 +44,9 @@
- include: "{{ (openshift_metrics_install_metrics | bool) | ternary('install_metrics.yaml','uninstall_metrics.yaml') }}"
+- include: uninstall_hosa.yaml
+ when: not openshift_metrics_install_hawkular_agent | bool
+
- name: Delete temp directory
local_action: file path=local_tmp.stdout state=absent
tags: metrics_cleanup
diff --git a/roles/openshift_metrics/tasks/oc_apply.yaml b/roles/openshift_metrics/tasks/oc_apply.yaml
index dd67703b4..1e1af40e8 100644
--- a/roles/openshift_metrics/tasks/oc_apply.yaml
+++ b/roles/openshift_metrics/tasks/oc_apply.yaml
@@ -14,7 +14,7 @@
command: >
{{ openshift.common.client_binary }} --config={{ kubeconfig }}
apply -f {{ file_name }}
- -n {{ openshift_metrics_project }}
+ -n {{namespace}}
register: generation_apply
failed_when: "'error' in generation_apply.stderr"
changed_when: no
diff --git a/roles/openshift_metrics/tasks/uninstall_hosa.yaml b/roles/openshift_metrics/tasks/uninstall_hosa.yaml
new file mode 100644
index 000000000..42ed02460
--- /dev/null
+++ b/roles/openshift_metrics/tasks/uninstall_hosa.yaml
@@ -0,0 +1,15 @@
+---
+- name: remove Hawkular Agent (HOSA) components
+ command: >
+ {{ openshift.common.client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete --ignore-not-found --selector=metrics-infra=agent
+ all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings
+ register: delete_metrics
+ changed_when: delete_metrics.stdout != 'No resources found'
+
+- name: remove rolebindings
+ command: >
+ {{ openshift.common.client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete --ignore-not-found
+ clusterrolebinding/hawkular-openshift-agent-rb
+ changed_when: delete_metrics.stdout != 'No resources found'
diff --git a/roles/openshift_metrics/templates/hawkular_openshift_agent_cm.j2 b/roles/openshift_metrics/templates/hawkular_openshift_agent_cm.j2
new file mode 100644
index 000000000..bf472c066
--- /dev/null
+++ b/roles/openshift_metrics/templates/hawkular_openshift_agent_cm.j2
@@ -0,0 +1,54 @@
+id: hawkular-openshift-agent
+kind: ConfigMap
+apiVersion: v1
+name: Hawkular OpenShift Agent Configuration
+metadata:
+ name: hawkular-openshift-agent-configuration
+ labels:
+ metrics-infra: agent
+ namespace: {{openshift_metrics_hawkular_agent_namespace}}
+data:
+ config.yaml: |
+ kubernetes:
+ tenant: ${POD:namespace_name}
+ hawkular_server:
+ url: https://hawkular-metrics.openshift-infra.svc.cluster.local
+ credentials:
+ username: secret:openshift-infra/hawkular-metrics-account/hawkular-metrics.username
+ password: secret:openshift-infra/hawkular-metrics-account/hawkular-metrics.password
+ ca_cert_file: secret:openshift-infra/hawkular-metrics-certs/ca.crt
+ emitter:
+ status_enabled: false
+ collector:
+ minimum_collection_interval: 10s
+ default_collection_interval: 30s
+ metric_id_prefix: pod/${POD:uid}/custom/
+ tags:
+ metric_name: ${METRIC:name}
+ description: ${METRIC:description}
+ units: ${METRIC:units}
+ namespace_id: ${POD:namespace_uid}
+ namespace_name: ${POD:namespace_name}
+ node_name: ${POD:node_name}
+ pod_id: ${POD:uid}
+ pod_ip: ${POD:ip}
+ pod_name: ${POD:name}
+ pod_namespace: ${POD:namespace_name}
+ hostname: ${POD:hostname}
+ host_ip: ${POD:host_ip}
+ labels: ${POD:labels}
+ type: pod
+ collector: hawkular_openshift_agent
+ custom_metric: true
+ hawkular-openshift-agent: |
+ endpoints:
+ - type: prometheus
+ protocol: "http"
+ port: 8080
+ path: /metrics
+ collection_interval: 30s
+ metrics:
+ - name: hawkular_openshift_agent_metric_data_points_collected_total
+ - name: hawkular_openshift_agent_monitored_endpoints
+ - name: hawkular_openshift_agent_monitored_pods
+ - name: hawkular_openshift_agent_monitored_metrics
diff --git a/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2 b/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2
new file mode 100644
index 000000000..d65eaf9ae
--- /dev/null
+++ b/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2
@@ -0,0 +1,91 @@
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: hawkular-openshift-agent
+ labels:
+ name: hawkular-openshift-agent
+ metrics-infra: agent
+ namespace: {{openshift_metrics_hawkular_agent_namespace}}
+spec:
+ selector:
+ matchLabels:
+ name: hawkular-openshift-agent
+ template:
+ metadata:
+ labels:
+ name: hawkular-openshift-agent
+ metrics-infra: agent
+ spec:
+ serviceAccount: hawkular-openshift-agent
+{% if node_selector is iterable and node_selector | length > 0 %}
+ nodeSelector:
+{% for key, value in node_selector.iteritems() %}
+ {{key}}: "{{value}}"
+{% endfor %}
+{% endif %}
+ containers:
+ - image: {{openshift_metrics_image_prefix}}metrics-hawkular-openshift-agent:{{openshift_metrics_image_version}}
+ imagePullPolicy: Always
+ name: hawkular-openshift-agent
+{% if ((openshift_metrics_hawkular_agent_limits_cpu is defined and openshift_metrics_hawkular_agent_limits_cpu is not none)
+ or (openshift_metrics_hawkular_agent_limits_memory is defined and openshift_metrics_hawkular_agent_limits_memory is not none)
+ or (openshift_metrics_hawkular_agent_requests_cpu is defined and openshift_metrics_hawkular_agent_requests_cpu is not none)
+ or (openshift_metrics_hawkular_agent_requests_memory is defined and openshift_metrics_hawkular_agent_requests_memory is not none))
+%}
+ resources:
+{% if (openshift_metrics_hawkular_agent_limits_cpu is not none
+ or openshift_metrics_hawkular_agent_limits_memory is not none)
+%}
+ limits:
+{% if openshift_metrics_hawkular_agent_limits_cpu is not none %}
+ cpu: "{{openshift_metrics_hawkular_agent_limits_cpu}}"
+{% endif %}
+{% if openshift_metrics_hawkular_agent_limits_memory is not none %}
+ memory: "{{openshift_metrics_hawkular_agent_limits_memory}}"
+{% endif %}
+{% endif %}
+{% if (openshift_metrics_hawkular_agent_requests_cpu is not none
+ or openshift_metrics_hawkular_agent_requests_memory is not none)
+%}
+ requests:
+{% if openshift_metrics_hawkular_agent_requests_cpu is not none %}
+ cpu: "{{openshift_metrics_hawkular_agent_requests_cpu}}"
+{% endif %}
+{% if openshift_metrics_hawkular_agent_requests_memory is not none %}
+ memory: "{{openshift_metrics_hawkular_agent_requests_memory}}"
+{% endif %}
+{% endif %}
+{% endif %}
+
+ livenessProbe:
+ httpGet:
+ scheme: HTTP
+ path: /health
+ port: 8080
+ initialDelaySeconds: 30
+ periodSeconds: 30
+ command:
+ - "hawkular-openshift-agent"
+ - "-config"
+ - "/hawkular-openshift-agent-configuration/config.yaml"
+ - "-v"
+ - "3"
+ env:
+ - name: K8S_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: K8S_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ volumeMounts:
+ - name: hawkular-openshift-agent-configuration
+ mountPath: "/hawkular-openshift-agent-configuration"
+ volumes:
+ - name: hawkular-openshift-agent-configuration
+ configMap:
+ name: hawkular-openshift-agent-configuration
+ - name: hawkular-openshift-agent
+ configMap:
+ name: hawkular-openshift-agent-configuration
diff --git a/roles/openshift_metrics/templates/hawkular_openshift_agent_role.j2 b/roles/openshift_metrics/templates/hawkular_openshift_agent_role.j2
new file mode 100644
index 000000000..24b8cd801
--- /dev/null
+++ b/roles/openshift_metrics/templates/hawkular_openshift_agent_role.j2
@@ -0,0 +1,25 @@
+apiVersion: v1
+kind: ClusterRole
+metadata:
+ name: hawkular-openshift-agent
+ labels:
+ metrics-infra: agent
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - namespaces
+ - nodes
+ - pods
+ - projects
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
diff --git a/roles/openshift_metrics/templates/hawkular_openshift_agent_sa.j2 b/roles/openshift_metrics/templates/hawkular_openshift_agent_sa.j2
new file mode 100644
index 000000000..ec604d73c
--- /dev/null
+++ b/roles/openshift_metrics/templates/hawkular_openshift_agent_sa.j2
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: hawkular-openshift-agent
+ labels:
+ metrics-infra: agent
+ namespace: {{openshift_metrics_hawkular_agent_namespace}}
diff --git a/roles/openshift_metrics/templates/pvc.j2 b/roles/openshift_metrics/templates/pvc.j2
index c2e56ba21..0b801b33f 100644
--- a/roles/openshift_metrics/templates/pvc.j2
+++ b/roles/openshift_metrics/templates/pvc.j2
@@ -18,6 +18,13 @@ metadata:
{% endfor %}
{% endif %}
spec:
+{% if pv_selector is defined and pv_selector is mapping %}
+ selector:
+ matchLabels:
+{% for key,value in pv_selector.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
accessModes:
{% for mode in access_modes %}
- {{ mode }}
diff --git a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
index f397cbbf1..8bae9aaac 100644
--- a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
+++ b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
@@ -1,3 +1,5 @@
no-resolv
domain-needed
server=/{{ openshift.common.dns_domain }}/{{ openshift.common.kube_svc_ip }}
+no-negcache
+max-cache-ttl=1
diff --git a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
index 877e88002..9c5103597 100644
--- a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
+++ b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
@@ -7,6 +7,12 @@ items:
kind: PersistentVolume
metadata:
name: "{{ volume.name }}"
+{% if volume.labels is defined and volume.labels is mapping %}
+ labels:
+{% for key,value in volume.labels.iteritems() %}
+ {{ key }}: {{ value }}
+{% endfor %}
+{% endif %}
spec:
capacity:
storage: "{{ volume.capacity }}"
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index 023b1a9b7..8f8550e2d 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -4,7 +4,8 @@
path: /run/ostree-booted
register: ostree_booted
-- block:
+- when: not ostree_booted.stat.exists
+ block:
- name: Ensure libselinux-python is installed
package: name=libselinux-python state=present
@@ -24,41 +25,40 @@
- openshift_additional_repos | length == 0
notify: refresh cache
- # Note: OpenShift repositories under CentOS may be shipped through the
- # "centos-release-openshift-origin" package which configures the repository.
- # This task matches the file names provided by the package so that they are
- # not installed twice in different files and remains idempotent.
- - name: Configure origin gpg keys if needed
- copy:
- src: "{{ item.src }}"
- dest: "{{ item.dest }}"
- with_items:
- - src: origin/gpg_keys/openshift-ansible-CentOS-SIG-PaaS
- dest: /etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
- - src: origin/repos/openshift-ansible-centos-paas-sig.repo
- dest: /etc/yum.repos.d/CentOS-OpenShift-Origin.repo
- notify: refresh cache
- when:
- - ansible_os_family == "RedHat"
- - ansible_distribution != "Fedora"
- - openshift_deployment_type == 'origin'
- - openshift_enable_origin_repo | default(true) | bool
-
# Singleton block
- - when: r_osr_first_run | default(true)
+ - when: r_openshift_repos_has_run is not defined
block:
+
+ # Note: OpenShift repositories under CentOS may be shipped through the
+ # "centos-release-openshift-origin" package which configures the repository.
+ # This task matches the file names provided by the package so that they are
+ # not installed twice in different files and remains idempotent.
+ - name: Configure origin gpg keys if needed
+ copy:
+ src: "{{ item.src }}"
+ dest: "{{ item.dest }}"
+ with_items:
+ - src: origin/gpg_keys/openshift-ansible-CentOS-SIG-PaaS
+ dest: /etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
+ - src: origin/repos/openshift-ansible-centos-paas-sig.repo
+ dest: /etc/yum.repos.d/CentOS-OpenShift-Origin.repo
+ notify: refresh cache
+ when:
+ - ansible_os_family == "RedHat"
+ - ansible_distribution != "Fedora"
+ - openshift_deployment_type == 'origin'
+ - openshift_enable_origin_repo | default(true) | bool
+
- name: Ensure clean repo cache in the event repos have been changed manually
debug:
msg: "First run of openshift_repos"
changed_when: true
notify: refresh cache
- - name: Set fact r_osr_first_run false
+ - name: Record that openshift_repos already ran
set_fact:
- r_osr_first_run: false
+ r_openshift_repos_has_run: True
# Force running ALL handlers now, because we expect repo cache to be cleared
# if changes have been made.
- meta: flush_handlers
-
- when: not ostree_booted.stat.exists
diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md
index 7b310dbf8..62fc35299 100644
--- a/roles/openshift_storage_glusterfs/README.md
+++ b/roles/openshift_storage_glusterfs/README.md
@@ -1,7 +1,31 @@
OpenShift GlusterFS Cluster
===========================
-OpenShift GlusterFS Cluster Installation
+OpenShift GlusterFS Cluster Configuration
+
+This role handles the configuration of GlusterFS clusters. It can handle
+two primary configuration scenarios:
+
+* Configuring a new, natively-hosted GlusterFS cluster. In this scenario,
+ GlusterFS pods are deployed on nodes in the OpenShift cluster which are
+ configured to provide storage.
+* Configuring a new, external GlusterFS cluster. In this scenario, the
+ cluster nodes have the GlusterFS software pre-installed but have not
+ been configured yet. The installer will take care of configuring the
+ cluster(s) for use by OpenShift applications.
+* Using existing GlusterFS clusters. In this scenario, one or more
+ GlusterFS clusters are assumed to be already setup. These clusters can
+ be either natively-hosted or external, but must be managed by a
+ [heketi service](https://github.com/heketi/heketi).
+
+As part of the configuration, a particular GlusterFS cluster may be
+specified to provide backend storage for a natively-hosted Docker
+registry.
+
+Unless configured otherwise, a StorageClass will be automatically
+created for each non-registry GlusterFS cluster. This will allow
+applications which can mount PersistentVolumes to request
+dynamically-provisioned GlusterFS volumes.
Requirements
------------
@@ -21,26 +45,50 @@ hosted Docker registry:
* `[glusterfs_registry]`
+Host Variables
+--------------
+
+For configuring new clusters, the following role variables are available.
+
+Each host in either of the above groups must have the following variable
+defined:
+
+| Name | Default value | Description |
+|-------------------|---------------|-----------------------------------------|
+| glusterfs_devices | None | A list of block devices that will be completely managed as part of a GlusterFS cluster. There must be at least one device listed. Each device must be bare, e.g. no partitions or LVM PVs. **Example:** '[ "/dev/sdb" ]'
+
+In addition, each host may specify the following variables to further control
+their configuration as GlusterFS nodes:
+
+| Name | Default value | Description |
+|--------------------|---------------------------|-----------------------------------------|
+| glusterfs_cluster | 1 | The ID of the cluster this node should belong to. This is useful when a single heketi service is expected to manage multiple distinct clusters. **NOTE:** For natively-hosted clusters, all pods will be in the same OpenShift namespace
+| glusterfs_hostname | openshift.common.hostname | A hostname (or IP address) that will be used for internal GlusterFS communication
+| glusterfs_ip | openshift.common.ip | An IP address that will be used by pods to communicate with the GlusterFS node
+| glusterfs_zone | 1 | A zone number for the node. Zones are used within the cluster for determining how to distribute the bricks of GlusterFS volumes. heketi will try to spread each volumes' bricks as evenly as possible across all zones
+
Role Variables
--------------
This role has the following variables that control the integration of a
GlusterFS cluster into a new or existing OpenShift cluster:
-| Name | Default value | |
+| Name | Default value | Description |
|--------------------------------------------------|-------------------------|-----------------------------------------|
| openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready
| openshift_storage_glusterfs_namespace | 'default' | Namespace in which to create GlusterFS resources
| openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized
-| openshift_storage_glusterfs_nodeselector | 'storagenode=glusterfs' | Selector to determine which nodes will host GlusterFS pods in native mode
+| openshift_storage_glusterfs_name | 'storage' | A name to identify the GlusterFS cluster, which will be used in resource names
+| openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name
+| openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster
| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7'
| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods
| openshift_storage_glusterfs_wipe | False | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.**
| openshift_storage_glusterfs_heketi_is_native | True | heketi should be containerized
| openshift_storage_glusterfs_heketi_image | 'heketi/heketi' | Container image to use for heketi pods, enterprise default is 'rhgs3/rhgs-volmanager-rhel7'
| openshift_storage_glusterfs_heketi_version | 'latest' | Container image version to use for heketi pods
-| openshift_storage_glusterfs_heketi_admin_key | '' | String to use as secret key for performing heketi commands as admin
-| openshift_storage_glusterfs_heketi_user_key | '' | String to use as secret key for performing heketi commands as user that can only view or modify volumes
+| openshift_storage_glusterfs_heketi_admin_key | auto-generated | String to use as secret key for performing heketi commands as admin
+| openshift_storage_glusterfs_heketi_user_key | auto-generated | String to use as secret key for performing heketi commands as user that can only view or modify volumes
| openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi
| openshift_storage_glusterfs_heketi_url | Undefined | URL for the heketi REST API, dynamically determined in native mode
| openshift_storage_glusterfs_heketi_wipe | False | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe`
@@ -52,17 +100,24 @@ registry. These variables start with the prefix
values in their corresponding non-registry variables. The following variables
are an exception:
-| Name | Default value | |
-|---------------------------------------------------|-----------------------|-----------------------------------------|
-| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'default'
-| openshift_storage_glusterfs_registry_nodeselector | 'storagenode=registry'| This allows for the logical separation of the registry GlusterFS cluster from any regular-use GlusterFS clusters
+| Name | Default value | Description |
+|-------------------------------------------------------|-----------------------|-----------------------------------------|
+| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'default'
+| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters
+| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties
+| openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above
+| openshift_storage_glusterfs_registry_heketi_user_key | auto-generated | Separate from the above
Additionally, this role's behavior responds to the following registry-specific
-variable:
-
-| Name | Default value | Description |
-|----------------------------------------------|---------------|------------------------------------------------------------------------------|
-| openshift_hosted_registry_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume |
+variables:
+
+| Name | Default value | Description |
+|-----------------------------------------------|------------------------------|-----------------------------------------|
+| openshift_hosted_registry_glusterfs_endpoints | glusterfs-registry-endpoints | The name for the Endpoints resource that will point the registry to the GlusterFS nodes
+| openshift_hosted_registry_glusterfs_path | glusterfs-registry-volume | The name for the GlusterFS volume that will provide registry storage
+| openshift_hosted_registry_glusterfs_readonly | False | Whether the GlusterFS volume should be read-only
+| openshift_hosted_registry_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume
+| openshift_hosted_registry_glusterfs_swapcopy | True | If swapping, copy the contents of the pre-existing registry storage to the new GlusterFS volume
Dependencies
------------
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
index ebe9ca30b..468877e57 100644
--- a/roles/openshift_storage_glusterfs/defaults/main.yml
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -2,7 +2,9 @@
openshift_storage_glusterfs_timeout: 300
openshift_storage_glusterfs_namespace: 'default'
openshift_storage_glusterfs_is_native: True
-openshift_storage_glusterfs_nodeselector: 'storagenode=glusterfs'
+openshift_storage_glusterfs_name: 'storage'
+openshift_storage_glusterfs_nodeselector: "glusterfs={{ openshift_storage_glusterfs_name }}-host"
+openshift_storage_glusterfs_storageclass: True
openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}"
openshift_storage_glusterfs_version: 'latest'
openshift_storage_glusterfs_wipe: False
@@ -11,8 +13,8 @@ openshift_storage_glusterfs_heketi_is_missing: True
openshift_storage_glusterfs_heketi_deploy_is_missing: True
openshift_storage_glusterfs_heketi_image: "{{ 'rhgs3/rhgs-volmanager-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'heketi/heketi' | quote }}"
openshift_storage_glusterfs_heketi_version: 'latest'
-openshift_storage_glusterfs_heketi_admin_key: ''
-openshift_storage_glusterfs_heketi_user_key: ''
+openshift_storage_glusterfs_heketi_admin_key: "{{ 32 | oo_generate_secret }}"
+openshift_storage_glusterfs_heketi_user_key: "{{ 32 | oo_generate_secret }}"
openshift_storage_glusterfs_heketi_topology_load: True
openshift_storage_glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_wipe }}"
openshift_storage_glusterfs_heketi_url: "{{ omit }}"
@@ -20,7 +22,9 @@ openshift_storage_glusterfs_heketi_url: "{{ omit }}"
openshift_storage_glusterfs_registry_timeout: "{{ openshift_storage_glusterfs_timeout }}"
openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}"
-openshift_storage_glusterfs_registry_nodeselector: 'storagenode=registry'
+openshift_storage_glusterfs_registry_name: 'registry'
+openshift_storage_glusterfs_registry_nodeselector: "glusterfs={{ openshift_storage_glusterfs_registry_name }}-host"
+openshift_storage_glusterfs_registry_storageclass: False
openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}"
openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}"
openshift_storage_glusterfs_registry_wipe: "{{ openshift_storage_glusterfs_wipe }}"
@@ -29,8 +33,8 @@ openshift_storage_glusterfs_registry_heketi_is_missing: "{{ openshift_storage_gl
openshift_storage_glusterfs_registry_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing }}"
openshift_storage_glusterfs_registry_heketi_image: "{{ openshift_storage_glusterfs_heketi_image }}"
openshift_storage_glusterfs_registry_heketi_version: "{{ openshift_storage_glusterfs_heketi_version }}"
-openshift_storage_glusterfs_registry_heketi_admin_key: "{{ openshift_storage_glusterfs_heketi_admin_key }}"
-openshift_storage_glusterfs_registry_heketi_user_key: "{{ openshift_storage_glusterfs_heketi_user_key }}"
+openshift_storage_glusterfs_registry_heketi_admin_key: "{{ 32 | oo_generate_secret }}"
+openshift_storage_glusterfs_registry_heketi_user_key: "{{ 32 | oo_generate_secret }}"
openshift_storage_glusterfs_registry_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}"
openshift_storage_glusterfs_registry_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}"
openshift_storage_glusterfs_registry_heketi_url: "{{ openshift_storage_glusterfs_heketi_url | default(omit) }}"
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
index c9945be13..81b4fa5dc 100644
--- a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
@@ -9,49 +9,47 @@ metadata:
annotations:
description: Bootstrap Heketi installation
tags: glusterfs,heketi,installation
-labels:
- template: deploy-heketi
objects:
- kind: Service
apiVersion: v1
metadata:
- name: deploy-heketi
+ name: deploy-heketi-${CLUSTER_NAME}
labels:
- glusterfs: deploy-heketi-service
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-service
deploy-heketi: support
annotations:
description: Exposes Heketi service
spec:
ports:
- - name: deploy-heketi
+ - name: deploy-heketi-${CLUSTER_NAME}
port: 8080
targetPort: 8080
selector:
- name: deploy-heketi
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
- kind: Route
apiVersion: v1
metadata:
- name: deploy-heketi
+ name: deploy-heketi-${CLUSTER_NAME}
labels:
- glusterfs: deploy-heketi-route
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-route
deploy-heketi: support
spec:
to:
kind: Service
- name: deploy-heketi
+ name: deploy-heketi-${CLUSTER_NAME}
- kind: DeploymentConfig
apiVersion: v1
metadata:
- name: deploy-heketi
+ name: deploy-heketi-${CLUSTER_NAME}
labels:
- glusterfs: deploy-heketi-dc
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-dc
deploy-heketi: support
annotations:
description: Defines how to deploy Heketi
spec:
replicas: 1
selector:
- name: deploy-heketi
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
triggers:
- type: ConfigChange
strategy:
@@ -60,13 +58,12 @@ objects:
metadata:
name: deploy-heketi
labels:
- name: deploy-heketi
- glusterfs: deploy-heketi-pod
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
deploy-heketi: support
spec:
- serviceAccountName: heketi-service-account
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
containers:
- - name: deploy-heketi
+ - name: heketi
image: ${IMAGE_NAME}:${IMAGE_VERSION}
env:
- name: HEKETI_USER_KEY
@@ -81,11 +78,15 @@ objects:
value: '14'
- name: HEKETI_KUBE_GLUSTER_DAEMONSET
value: '1'
+ - name: HEKETI_KUBE_NAMESPACE
+ value: ${HEKETI_KUBE_NAMESPACE}
ports:
- containerPort: 8080
volumeMounts:
- name: db
mountPath: /var/lib/heketi
+ - name: topology
+ mountPath: ${TOPOLOGY_PATH}
readinessProbe:
timeoutSeconds: 3
initialDelaySeconds: 3
@@ -100,6 +101,9 @@ objects:
port: 8080
volumes:
- name: db
+ - name: topology
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-topology-secret
parameters:
- name: HEKETI_USER_KEY
displayName: Heketi User Secret
@@ -107,9 +111,19 @@ parameters:
- name: HEKETI_ADMIN_KEY
displayName: Heketi Administrator Secret
description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_KUBE_NAMESPACE
+ displayName: Namespace
+ description: Set the namespace where the GlusterFS pods reside
+ value: default
- name: IMAGE_NAME
- displayName: GlusterFS container name
+ displayName: heketi container name
required: True
- name: IMAGE_VERSION
- displayName: GlusterFS container versiona
+ displayName: heketi container versiona
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ value: glusterfs
+- name: TOPOLOGY_PATH
+ displayName: heketi topology file location
required: True
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml
index c66705752..dc3d2250a 100644
--- a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml
@@ -12,24 +12,24 @@ objects:
- kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
- name: glusterfs
+ name: glusterfs-${CLUSTER_NAME}
labels:
- glusterfs: daemonset
+ glusterfs: ${CLUSTER_NAME}-daemonset
annotations:
description: GlusterFS DaemonSet
tags: glusterfs
spec:
selector:
matchLabels:
- glusterfs-node: pod
+ glusterfs: ${CLUSTER_NAME}-pod
template:
metadata:
- name: glusterfs
+ name: glusterfs-${CLUSTER_NAME}
labels:
+ glusterfs: ${CLUSTER_NAME}-pod
glusterfs-node: pod
spec:
- nodeSelector:
- storagenode: glusterfs
+ nodeSelector: "${{NODE_LABELS}}"
hostNetwork: true
containers:
- name: glusterfs
@@ -63,26 +63,26 @@ objects:
privileged: true
readinessProbe:
timeoutSeconds: 3
- initialDelaySeconds: 100
+ initialDelaySeconds: 40
exec:
command:
- "/bin/bash"
- "-c"
- systemctl status glusterd.service
- periodSeconds: 10
+ periodSeconds: 25
successThreshold: 1
- failureThreshold: 3
+ failureThreshold: 15
livenessProbe:
timeoutSeconds: 3
- initialDelaySeconds: 100
+ initialDelaySeconds: 40
exec:
command:
- "/bin/bash"
- "-c"
- systemctl status glusterd.service
- periodSeconds: 10
+ periodSeconds: 25
successThreshold: 1
- failureThreshold: 3
+ failureThreshold: 15
resources: {}
terminationMessagePath: "/dev/termination-log"
volumes:
@@ -120,9 +120,16 @@ objects:
dnsPolicy: ClusterFirst
securityContext: {}
parameters:
+- name: NODE_LABELS
+ displayName: Daemonset Node Labels
+ description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\'
+ value: '{ "glusterfs": "storage-host" }'
- name: IMAGE_NAME
displayName: GlusterFS container name
required: True
- name: IMAGE_VERSION
displayName: GlusterFS container versiona
required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml
index df045c170..1d8f1abdf 100644
--- a/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml
@@ -8,15 +8,13 @@ metadata:
annotations:
description: Heketi service deployment template
tags: glusterfs,heketi
-labels:
- template: heketi
objects:
- kind: Service
apiVersion: v1
metadata:
- name: heketi
+ name: heketi-${CLUSTER_NAME}
labels:
- glusterfs: heketi-service
+ glusterfs: heketi-${CLUSTER_NAME}-service
annotations:
description: Exposes Heketi service
spec:
@@ -25,40 +23,40 @@ objects:
port: 8080
targetPort: 8080
selector:
- glusterfs: heketi-pod
+ glusterfs: heketi-${CLUSTER_NAME}-pod
- kind: Route
apiVersion: v1
metadata:
- name: heketi
+ name: heketi-${CLUSTER_NAME}
labels:
- glusterfs: heketi-route
+ glusterfs: heketi-${CLUSTER_NAME}-route
spec:
to:
kind: Service
- name: heketi
+ name: heketi-${CLUSTER_NAME}
- kind: DeploymentConfig
apiVersion: v1
metadata:
- name: heketi
+ name: heketi-${CLUSTER_NAME}
labels:
- glusterfs: heketi-dc
+ glusterfs: heketi-${CLUSTER_NAME}-dc
annotations:
description: Defines how to deploy Heketi
spec:
replicas: 1
selector:
- glusterfs: heketi-pod
+ glusterfs: heketi-${CLUSTER_NAME}-pod
triggers:
- type: ConfigChange
strategy:
type: Recreate
template:
metadata:
- name: heketi
+ name: heketi-${CLUSTER_NAME}
labels:
- glusterfs: heketi-pod
+ glusterfs: heketi-${CLUSTER_NAME}-pod
spec:
- serviceAccountName: heketi-service-account
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
containers:
- name: heketi
image: ${IMAGE_NAME}:${IMAGE_VERSION}
@@ -76,6 +74,8 @@ objects:
value: '14'
- name: HEKETI_KUBE_GLUSTER_DAEMONSET
value: '1'
+ - name: HEKETI_KUBE_NAMESPACE
+ value: ${HEKETI_KUBE_NAMESPACE}
ports:
- containerPort: 8080
volumeMounts:
@@ -96,7 +96,7 @@ objects:
volumes:
- name: db
glusterfs:
- endpoints: heketi-storage-endpoints
+ endpoints: heketi-db-${CLUSTER_NAME}-endpoints
path: heketidbstorage
parameters:
- name: HEKETI_USER_KEY
@@ -105,9 +105,16 @@ parameters:
- name: HEKETI_ADMIN_KEY
displayName: Heketi Administrator Secret
description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_KUBE_NAMESPACE
+ displayName: Namespace
+ description: Set the namespace where the GlusterFS pods reside
+ value: default
- name: IMAGE_NAME
- displayName: GlusterFS container name
+ displayName: heketi container name
required: True
- name: IMAGE_VERSION
- displayName: GlusterFS container versiona
+ displayName: heketi container versiona
required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ value: glusterfs
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
index fa5fa2cb0..829c1f51b 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
@@ -5,12 +5,6 @@
name: "{{ glusterfs_namespace }}"
when: glusterfs_is_native or glusterfs_heketi_is_native
-- include: glusterfs_deploy.yml
- when: glusterfs_is_native
-
-- name: Make sure heketi-client is installed
- package: name=heketi-client state=present
-
- name: Delete pre-existing heketi resources
oc_obj:
namespace: "{{ glusterfs_namespace }}"
@@ -21,12 +15,18 @@
with_items:
- kind: "template,route,service,dc,jobs,secret"
selector: "deploy-heketi"
- - kind: "template,route,service,dc"
- name: "heketi"
- - kind: "svc,ep"
+ - kind: "svc"
name: "heketi-storage-endpoints"
+ - kind: "secret"
+ name: "heketi-{{ glusterfs_name }}-topology-secret"
+ - kind: "template,route,service,dc"
+ name: "heketi-{{ glusterfs_name }}"
+ - kind: "svc"
+ name: "heketi-db-{{ glusterfs_name }}-endpoints"
- kind: "sa"
- name: "heketi-service-account"
+ name: "heketi-{{ glusterfs_name }}-service-account"
+ - kind: "secret"
+ name: "heketi-{{ glusterfs_name }}-user-secret"
failed_when: False
when: glusterfs_heketi_wipe
@@ -35,11 +35,11 @@
namespace: "{{ glusterfs_namespace }}"
kind: pod
state: list
- selector: "glusterfs=deploy-heketi-pod"
+ selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod"
register: heketi_pod
until: "heketi_pod.results.results[0]['items'] | count == 0"
delay: 10
- retries: "{{ (glusterfs_timeout / 10) | int }}"
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
when: glusterfs_heketi_wipe
- name: Wait for heketi pods to terminate
@@ -47,23 +47,26 @@
namespace: "{{ glusterfs_namespace }}"
kind: pod
state: list
- selector: "glusterfs=heketi-pod"
+ selector: "glusterfs=heketi-{{ glusterfs_name }}-pod"
register: heketi_pod
until: "heketi_pod.results.results[0]['items'] | count == 0"
delay: 10
- retries: "{{ (glusterfs_timeout / 10) | int }}"
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
when: glusterfs_heketi_wipe
+- include: glusterfs_deploy.yml
+ when: glusterfs_is_native
+
- name: Create heketi service account
oc_serviceaccount:
namespace: "{{ glusterfs_namespace }}"
- name: heketi-service-account
+ name: "heketi-{{ glusterfs_name }}-service-account"
state: present
when: glusterfs_heketi_is_native
- name: Add heketi service account to privileged SCC
oc_adm_policy_user:
- user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-service-account"
+ user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-{{ glusterfs_name }}-service-account"
resource_kind: scc
resource_name: privileged
state: present
@@ -71,7 +74,7 @@
- name: Allow heketi service account to view/edit pods
oc_adm_policy_user:
- user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-service-account"
+ user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-{{ glusterfs_name }}-service-account"
resource_kind: role
resource_name: edit
state: present
@@ -82,7 +85,7 @@
namespace: "{{ glusterfs_namespace }}"
state: list
kind: pod
- selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support"
+ selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod"
register: heketi_pod
when: glusterfs_heketi_is_native
@@ -100,7 +103,7 @@
namespace: "{{ glusterfs_namespace }}"
state: list
kind: pod
- selector: "glusterfs=heketi-pod"
+ selector: "glusterfs=heketi-{{ glusterfs_name }}-pod"
register: heketi_pod
when: glusterfs_heketi_is_native
@@ -113,48 +116,35 @@
# heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
- "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
+- name: Generate topology file
+ template:
+ src: "{{ openshift.common.examples_content_version }}/topology.json.j2"
+ dest: "{{ mktemp.stdout }}/topology.json"
+ when:
+ - glusterfs_heketi_topology_load
+
- include: heketi_deploy_part1.yml
when:
- glusterfs_heketi_is_native
- glusterfs_heketi_deploy_is_missing
- glusterfs_heketi_is_missing
-- name: Determine heketi URL
- oc_obj:
- namespace: "{{ glusterfs_namespace }}"
- state: list
- kind: ep
- selector: "glusterfs in (deploy-heketi-service, heketi-service)"
- register: heketi_url
- until:
- - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''"
- - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''"
- delay: 10
- retries: "{{ (glusterfs_timeout / 10) | int }}"
- when:
- - glusterfs_heketi_is_native
- - glusterfs_heketi_url is undefined
-
- name: Set heketi URL
set_fact:
- glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}"
+ glusterfs_heketi_url: "localhost:8080"
when:
- glusterfs_heketi_is_native
- - glusterfs_heketi_url is undefined
+
+- name: Set heketi-cli command
+ set_fact:
+ glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}oc rsh {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {% endif %}heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
- name: Verify heketi service
- command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' cluster list"
+ command: "{{ glusterfs_heketi_client }} cluster list"
changed_when: False
-- name: Generate topology file
- template:
- src: "{{ openshift.common.examples_content_version }}/topology.json.j2"
- dest: "{{ mktemp.stdout }}/topology.json"
- when:
- - glusterfs_heketi_topology_load
-
- name: Load heketi topology
- command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' topology load --json={{ mktemp.stdout }}/topology.json 2>&1"
+ command: "{{ glusterfs_heketi_client }} topology load --json={{ mktemp.stdout }}/topology.json 2>&1"
register: topology_load
failed_when: "topology_load.rc != 0 or 'Unable' in topology_load.stdout"
when:
@@ -164,3 +154,29 @@
when:
- glusterfs_heketi_is_native
- glusterfs_heketi_is_missing
+
+- name: Create heketi user secret
+ oc_secret:
+ namespace: "{{ glusterfs_namespace }}"
+ state: present
+ name: "heketi-{{ glusterfs_name }}-user-secret"
+ type: "kubernetes.io/glusterfs"
+ force: True
+ contents:
+ - path: key
+ data: "{{ glusterfs_heketi_user_key }}"
+
+- name: Generate GlusterFS StorageClass file
+ template:
+ src: "{{ openshift.common.examples_content_version }}/glusterfs-storageclass.yml.j2"
+ dest: "{{ mktemp.stdout }}/glusterfs-storageclass.yml"
+
+- name: Create GlusterFS StorageClass
+ oc_obj:
+ state: present
+ kind: storageclass
+ name: "glusterfs-{{ glusterfs_name }}"
+ files:
+ - "{{ mktemp.stdout }}/glusterfs-storageclass.yml"
+ when:
+ - glusterfs_storageclass
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
index 451990240..aa303d126 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
@@ -3,7 +3,9 @@
glusterfs_timeout: "{{ openshift_storage_glusterfs_timeout }}"
glusterfs_namespace: "{{ openshift_storage_glusterfs_namespace }}"
glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native }}"
- glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | map_from_pairs }}"
+ glusterfs_name: "{{ openshift_storage_glusterfs_name }}"
+ glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | default(['storagenode', openshift_storage_glusterfs_name] | join('=')) | map_from_pairs }}"
+ glusterfs_storageclass: "{{ openshift_storage_glusterfs_storageclass }}"
glusterfs_image: "{{ openshift_storage_glusterfs_image }}"
glusterfs_version: "{{ openshift_storage_glusterfs_version }}"
glusterfs_wipe: "{{ openshift_storage_glusterfs_wipe }}"
@@ -17,6 +19,6 @@
glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}"
glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}"
glusterfs_heketi_url: "{{ openshift_storage_glusterfs_heketi_url }}"
- glusterfs_nodes: "{{ g_glusterfs_hosts }}"
+ glusterfs_nodes: "{{ groups.glusterfs }}"
- include: glusterfs_common.yml
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
index 579112349..ea4dcc510 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
@@ -1,23 +1,24 @@
---
- assert:
- that: "glusterfs_nodeselector.keys() | count == 1"
- msg: Only one GlusterFS nodeselector key pair should be provided
-
-- assert:
that: "glusterfs_nodes | count >= 3"
msg: There must be at least three GlusterFS nodes specified
- name: Delete pre-existing GlusterFS resources
oc_obj:
namespace: "{{ glusterfs_namespace }}"
- kind: "template,daemonset"
- name: glusterfs
+ kind: "{{ item.kind }}"
+ name: "{{ item.name }}"
state: absent
+ with_items:
+ - kind: template
+ name: glusterfs
+ - kind: daemonset
+ name: "glusterfs-{{ glusterfs_name }}"
when: glusterfs_wipe
- name: Unlabel any existing GlusterFS nodes
oc_label:
- name: "{{ item }}"
+ name: "{{ hostvars[item].openshift.common.hostname }}"
kind: node
state: absent
labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
@@ -40,11 +41,16 @@
failed_when: False
when: glusterfs_wipe
- # Runs "vgremove -fy <vg>; pvremove -fy <pv>" for every device found to be a physical volume.
+ # Runs "lvremove -ff <vg>; vgremove -fy <vg>; pvremove -fy <pv>" for every device found to be a physical volume.
- name: Clear GlusterFS storage device contents
- shell: "{% for line in item.stdout_lines %}{% set fields = line.split() %}{% if fields | count > 1 %}vgremove -fy {{ fields[1] }}; {% endif %}pvremove -fy {{ fields[0] }}; {% endfor %}"
+ shell: "{% for line in item.stdout_lines %}{% set fields = line.split() %}{% if fields | count > 1 %}lvremove -ff {{ fields[1] }}; vgremove -fy {{ fields[1] }}; {% endif %}pvremove -fy {{ fields[0] }}; {% endfor %}"
delegate_to: "{{ item.item }}"
with_items: "{{ devices_info.results }}"
+ register: clear_devices
+ until:
+ - "'contains a filesystem in use' not in clear_devices.stderr"
+ delay: 1
+ retries: 30
when:
- glusterfs_wipe
- item.stdout_lines | count > 0
@@ -61,13 +67,11 @@
- name: Label GlusterFS nodes
oc_label:
- name: "{{ glusterfs_host }}"
+ name: "{{ hostvars[item].openshift.common.hostname }}"
kind: node
state: add
labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
with_items: "{{ glusterfs_nodes | default([]) }}"
- loop_control:
- loop_var: glusterfs_host
- name: Copy GlusterFS DaemonSet template
copy:
@@ -78,7 +82,7 @@
oc_obj:
namespace: "{{ glusterfs_namespace }}"
kind: template
- name: glusterfs
+ name: "glusterfs"
state: present
files:
- "{{ mktemp.stdout }}/glusterfs-template.yml"
@@ -91,17 +95,19 @@
params:
IMAGE_NAME: "{{ glusterfs_image }}"
IMAGE_VERSION: "{{ glusterfs_version }}"
+ NODE_LABELS: "{{ glusterfs_nodeselector }}"
+ CLUSTER_NAME: "{{ glusterfs_name }}"
- name: Wait for GlusterFS pods
oc_obj:
namespace: "{{ glusterfs_namespace }}"
kind: pod
state: list
- selector: "glusterfs-node=pod"
+ selector: "glusterfs={{ glusterfs_name }}-pod"
register: glusterfs_pods
until:
- "glusterfs_pods.results.results[0]['items'] | count > 0"
# There must be as many pods with 'Ready' staus True as there are nodes expecting those pods
- "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == glusterfs_nodes | count"
delay: 10
- retries: "{{ (glusterfs_timeout / 10) | int }}"
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
index 392f4b65b..4c6891eeb 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -3,7 +3,9 @@
glusterfs_timeout: "{{ openshift_storage_glusterfs_registry_timeout }}"
glusterfs_namespace: "{{ openshift_storage_glusterfs_registry_namespace }}"
glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native }}"
- glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | map_from_pairs }}"
+ glusterfs_name: "{{ openshift_storage_glusterfs_registry_name }}"
+ glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | default(['storagenode', openshift_storage_glusterfs_registry_name] | join('=')) | map_from_pairs }}"
+ glusterfs_storageclass: "{{ openshift_storage_glusterfs_registry_storageclass }}"
glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}"
glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}"
glusterfs_wipe: "{{ openshift_storage_glusterfs_registry_wipe }}"
@@ -17,21 +19,22 @@
glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_registry_heketi_topology_load }}"
glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_registry_heketi_wipe }}"
glusterfs_heketi_url: "{{ openshift_storage_glusterfs_registry_heketi_url }}"
- glusterfs_nodes: "{{ g_glusterfs_registry_hosts }}"
+ glusterfs_nodes: "{{ groups.glusterfs_registry }}"
- include: glusterfs_common.yml
- when: g_glusterfs_registry_hosts != g_glusterfs_hosts
+ when:
+ - groups.glusterfs_registry | default([]) | count > 0
+ - "'glusterfs' not in groups or groups.glusterfs_registry != groups.glusterfs"
- name: Delete pre-existing GlusterFS registry resources
oc_obj:
namespace: "{{ glusterfs_namespace }}"
kind: "{{ item.kind }}"
- name: "{{ item.name | default(omit) }}"
- selector: "{{ item.selector | default(omit) }}"
+ name: "{{ item.name }}"
state: absent
with_items:
- - kind: "svc,ep"
- name: "glusterfs-registry-endpoints"
+ - kind: "svc"
+ name: "glusterfs-{{ glusterfs_name }}-endpoints"
failed_when: False
- name: Generate GlusterFS registry endpoints
@@ -40,8 +43,8 @@
dest: "{{ mktemp.stdout }}/glusterfs-registry-endpoints.yml"
- name: Copy GlusterFS registry service
- copy:
- src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-service.yml"
+ template:
+ src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-service.yml.j2"
dest: "{{ mktemp.stdout }}/glusterfs-registry-service.yml"
- name: Create GlusterFS registry endpoints
@@ -49,7 +52,7 @@
namespace: "{{ glusterfs_namespace }}"
state: present
kind: endpoints
- name: glusterfs-registry-endpoints
+ name: "glusterfs-{{ glusterfs_name }}-endpoints"
files:
- "{{ mktemp.stdout }}/glusterfs-registry-endpoints.yml"
@@ -58,14 +61,14 @@
namespace: "{{ glusterfs_namespace }}"
state: present
kind: service
- name: glusterfs-registry-endpoints
+ name: "glusterfs-{{ glusterfs_name }}-endpoints"
files:
- "{{ mktemp.stdout }}/glusterfs-registry-service.yml"
- name: Check if GlusterFS registry volume exists
- command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' volume list"
+ command: "{{ glusterfs_heketi_client }} volume list"
register: registry_volume
- name: Create GlusterFS registry volume
- command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}"
+ command: "{{ glusterfs_heketi_client }} volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}"
when: "openshift.hosted.registry.storage.glusterfs.path not in registry_volume.stdout"
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
index c14fcfb15..318d34b5d 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
@@ -6,11 +6,21 @@
with_items:
- "deploy-heketi-template.yml"
-- name: Create deploy-heketi resources
+- name: Create heketi topology secret
+ oc_secret:
+ namespace: "{{ glusterfs_namespace }}"
+ state: present
+ name: "heketi-{{ glusterfs_name }}-topology-secret"
+ force: True
+ files:
+ - name: topology.json
+ path: "{{ mktemp.stdout }}/topology.json"
+
+- name: Create deploy-heketi template
oc_obj:
namespace: "{{ glusterfs_namespace }}"
kind: template
- name: deploy-heketi
+ name: "deploy-heketi"
state: present
files:
- "{{ mktemp.stdout }}/deploy-heketi-template.yml"
@@ -25,17 +35,20 @@
IMAGE_VERSION: "{{ glusterfs_heketi_version }}"
HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}"
HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}"
+ HEKETI_KUBE_NAMESPACE: "{{ glusterfs_namespace }}"
+ CLUSTER_NAME: "{{ glusterfs_name }}"
+ TOPOLOGY_PATH: "{{ mktemp.stdout }}"
- name: Wait for deploy-heketi pod
oc_obj:
namespace: "{{ glusterfs_namespace }}"
kind: pod
state: list
- selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support"
+ selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod"
register: heketi_pod
until:
- "heketi_pod.results.results[0]['items'] | count > 0"
# Pod's 'Ready' status must be True
- "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
delay: 10
- retries: "{{ (glusterfs_timeout / 10) | int }}"
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
index 64410a9ab..3a9619d9d 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
@@ -1,8 +1,10 @@
---
- name: Create heketi DB volume
- command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' setup-openshift-heketi-storage --listfile {{ mktemp.stdout }}/heketi-storage.json"
+ command: "{{ glusterfs_heketi_client }} setup-openshift-heketi-storage --listfile /tmp/heketi-storage.json"
register: setup_storage
- failed_when: False
+
+- name: Copy heketi-storage list
+ shell: "{{ openshift.common.client_binary }} rsh {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json"
# This is used in the subsequent task
- name: Copy the admin client config
@@ -28,7 +30,7 @@
# Pod's 'Complete' status must be True
- "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1"
delay: 10
- retries: "{{ (glusterfs_timeout / 10) | int }}"
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
failed_when:
- "'results' in heketi_job.results"
- "heketi_job.results.results | count > 0"
@@ -46,14 +48,45 @@
with_items:
- kind: "template,route,service,jobs,dc,secret"
selector: "deploy-heketi"
- failed_when: False
+ - kind: "svc"
+ name: "heketi-storage-endpoints"
+ - kind: "secret"
+ name: "heketi-{{ glusterfs_name }}-topology-secret"
+
+- name: Generate heketi endpoints
+ template:
+ src: "{{ openshift.common.examples_content_version }}/heketi-endpoints.yml.j2"
+ dest: "{{ mktemp.stdout }}/heketi-endpoints.yml"
+
+- name: Generate heketi service
+ template:
+ src: "{{ openshift.common.examples_content_version }}/heketi-service.yml.j2"
+ dest: "{{ mktemp.stdout }}/heketi-service.yml"
+
+- name: Create heketi endpoints
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ state: present
+ kind: endpoints
+ name: "heketi-db-{{ glusterfs_name }}-endpoints"
+ files:
+ - "{{ mktemp.stdout }}/heketi-endpoints.yml"
+
+- name: Create heketi service
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ state: present
+ kind: service
+ name: "heketi-db-{{ glusterfs_name }}-endpoints"
+ files:
+ - "{{ mktemp.stdout }}/heketi-service.yml"
- name: Copy heketi template
copy:
src: "{{ openshift.common.examples_content_version }}/heketi-template.yml"
dest: "{{ mktemp.stdout }}/heketi-template.yml"
-- name: Create heketi resources
+- name: Create heketi template
oc_obj:
namespace: "{{ glusterfs_namespace }}"
kind: template
@@ -72,38 +105,27 @@
IMAGE_VERSION: "{{ glusterfs_heketi_version }}"
HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}"
HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}"
+ HEKETI_KUBE_NAMESPACE: "{{ glusterfs_namespace }}"
+ CLUSTER_NAME: "{{ glusterfs_name }}"
- name: Wait for heketi pod
oc_obj:
namespace: "{{ glusterfs_namespace }}"
kind: pod
state: list
- selector: "glusterfs=heketi-pod"
+ selector: "glusterfs=heketi-{{ glusterfs_name }}-pod"
register: heketi_pod
until:
- "heketi_pod.results.results[0]['items'] | count > 0"
# Pod's 'Ready' status must be True
- "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
delay: 10
- retries: "{{ (glusterfs_timeout / 10) | int }}"
-
-- name: Determine heketi URL
- oc_obj:
- namespace: "{{ glusterfs_namespace }}"
- state: list
- kind: ep
- selector: "glusterfs=heketi-service"
- register: heketi_url
- until:
- - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''"
- - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''"
- delay: 10
- retries: "{{ (glusterfs_timeout / 10) | int }}"
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
-- name: Set heketi URL
+- name: Set heketi-cli command
set_fact:
- glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}"
+ glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}oc rsh {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {% endif %}heketi-cli -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
- name: Verify heketi service
- command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' cluster list"
+ command: "{{ glusterfs_heketi_client }} cluster list"
changed_when: False
diff --git a/roles/openshift_storage_glusterfs/tasks/main.yml b/roles/openshift_storage_glusterfs/tasks/main.yml
index ebd8db453..c9bfdd1cd 100644
--- a/roles/openshift_storage_glusterfs/tasks/main.yml
+++ b/roles/openshift_storage_glusterfs/tasks/main.yml
@@ -7,12 +7,11 @@
- include: glusterfs_config.yml
when:
- - g_glusterfs_hosts | default([]) | count > 0
+ - groups.glusterfs | default([]) | count > 0
- include: glusterfs_registry.yml
when:
- - g_glusterfs_registry_hosts | default([]) | count > 0
- - "openshift.hosted.registry.storage.kind == 'glusterfs' or openshift.hosted.registry.glusterfs.swap"
+ - "groups.glusterfs_registry | default([]) | count > 0 or openshift.hosted.registry.storage.kind == 'glusterfs' or openshift.hosted.registry.glusterfs.swap"
- name: Delete temp directory
file:
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2
index 605627ab5..11c9195bb 100644
--- a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2
@@ -1,7 +1,8 @@
+---
apiVersion: v1
kind: Endpoints
metadata:
- name: glusterfs-registry-endpoints
+ name: glusterfs-{{ glusterfs_name }}-endpoints
subsets:
- addresses:
{% for node in glusterfs_nodes %}
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-registry-service.yml b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-service.yml.j2
index 3f8d8f507..3f869d2b7 100644
--- a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-registry-service.yml
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-service.yml.j2
@@ -2,7 +2,7 @@
apiVersion: v1
kind: Service
metadata:
- name: glusterfs-registry-endpoints
+ name: glusterfs-{{ glusterfs_name }}-endpoints
spec:
ports:
- port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
new file mode 100644
index 000000000..9b8fae310
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: glusterfs-{{ glusterfs_name }}
+provisioner: kubernetes.io/glusterfs
+parameters:
+ resturl: "http://{{ glusterfs_heketi_url }}:8081"
+ secretNamespace: "{{ glusterfs_namespace }}"
+ secretName: "heketi-{{ glusterfs_name }}-user-secret"
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/heketi-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/heketi-endpoints.yml.j2
new file mode 100644
index 000000000..99cbdf748
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/heketi-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/heketi-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/heketi-service.yml.j2
new file mode 100644
index 000000000..dcb896441
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/heketi-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}