summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/calico/defaults/main.yaml8
-rw-r--r--roles/calico_master/defaults/main.yaml1
-rw-r--r--roles/calico_master/templates/calico-policy-controller.yml.j22
-rw-r--r--roles/docker/README.md2
-rw-r--r--roles/docker/tasks/systemcontainer_docker.yml10
-rw-r--r--roles/etcd/tasks/main.yml3
-rw-r--r--roles/etcd/tasks/system_container.yml52
-rw-r--r--roles/etcd_common/defaults/main.yml16
-rw-r--r--roles/etcd_common/tasks/backup.yml (renamed from roles/etcd_upgrade/tasks/backup.yml)33
-rw-r--r--roles/etcd_common/tasks/drop_etcdctl.yml (renamed from roles/etcd_common/tasks/etcdctl.yml)0
-rw-r--r--roles/etcd_common/tasks/main.yml9
-rw-r--r--roles/etcd_server_certificates/tasks/main.yml37
-rw-r--r--roles/etcd_upgrade/defaults/main.yml6
-rw-r--r--roles/etcd_upgrade/meta/main.yml1
-rw-r--r--roles/etcd_upgrade/tasks/main.yml4
-rw-r--r--roles/lib_openshift/library/oc_adm_ca_server_cert.py44
-rw-r--r--roles/lib_openshift/library/oc_adm_manage_node.py44
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_group.py44
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_user.py44
-rw-r--r--roles/lib_openshift/library/oc_adm_registry.py44
-rw-r--r--roles/lib_openshift/library/oc_adm_router.py44
-rw-r--r--roles/lib_openshift/library/oc_clusterrole.py44
-rw-r--r--roles/lib_openshift/library/oc_configmap.py44
-rw-r--r--roles/lib_openshift/library/oc_edit.py44
-rw-r--r--roles/lib_openshift/library/oc_env.py44
-rw-r--r--roles/lib_openshift/library/oc_group.py44
-rw-r--r--roles/lib_openshift/library/oc_image.py44
-rw-r--r--roles/lib_openshift/library/oc_label.py44
-rw-r--r--roles/lib_openshift/library/oc_obj.py54
-rw-r--r--roles/lib_openshift/library/oc_objectvalidator.py44
-rw-r--r--roles/lib_openshift/library/oc_process.py44
-rw-r--r--roles/lib_openshift/library/oc_project.py44
-rw-r--r--roles/lib_openshift/library/oc_pvc.py44
-rw-r--r--roles/lib_openshift/library/oc_route.py44
-rw-r--r--roles/lib_openshift/library/oc_scale.py44
-rw-r--r--roles/lib_openshift/library/oc_secret.py58
-rw-r--r--roles/lib_openshift/library/oc_service.py44
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount.py44
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount_secret.py44
-rw-r--r--roles/lib_openshift/library/oc_user.py44
-rw-r--r--roles/lib_openshift/library/oc_version.py44
-rw-r--r--roles/lib_openshift/library/oc_volume.py44
-rw-r--r--roles/lib_openshift/src/class/oc_obj.py10
-rw-r--r--roles/lib_openshift/src/class/oc_secret.py14
-rw-r--r--roles/lib_openshift/src/lib/base.py44
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_secret.py1
-rw-r--r--roles/openshift_ca/tasks/main.yml32
-rw-r--r--roles/openshift_ca/vars/main.yml3
-rw-r--r--roles/openshift_default_storage_class/README.md39
-rw-r--r--roles/openshift_default_storage_class/defaults/main.yml14
-rw-r--r--roles/openshift_default_storage_class/meta/main.yml15
-rw-r--r--roles/openshift_default_storage_class/tasks/main.yml19
-rw-r--r--roles/openshift_default_storage_class/vars/main.yml1
-rw-r--r--roles/openshift_etcd_facts/vars/main.yml3
-rw-r--r--roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json28
-rw-r--r--roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json20
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py12
-rw-r--r--roles/openshift_health_checker/library/ocutil.py74
-rw-r--r--roles/openshift_health_checker/openshift_checks/__init__.py22
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_image_availability.py22
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_storage.py185
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/__init__.py0
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/curator.py61
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py217
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/fluentd.py170
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/kibana.py229
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/logging.py96
-rw-r--r--roles/openshift_health_checker/openshift_checks/mixins.py42
-rw-r--r--roles/openshift_health_checker/test/curator_test.py68
-rw-r--r--roles/openshift_health_checker/test/docker_image_availability_test.py24
-rw-r--r--roles/openshift_health_checker/test/docker_storage_test.py224
-rw-r--r--roles/openshift_health_checker/test/elasticsearch_test.py180
-rw-r--r--roles/openshift_health_checker/test/fluentd_test.py109
-rw-r--r--roles/openshift_health_checker/test/kibana_test.py218
-rw-r--r--roles/openshift_health_checker/test/logging_check_test.py137
-rw-r--r--roles/openshift_hosted/tasks/registry/storage/object_storage.yml16
-rw-r--r--roles/openshift_logging/defaults/main.yml4
-rw-r--r--roles/openshift_logging/tasks/delete_logging.yaml3
-rw-r--r--roles/openshift_logging/tasks/generate_certs.yaml2
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml7
-rw-r--r--roles/openshift_logging/tasks/procure_server_certs.yaml2
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j21
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j27
-rw-r--r--roles/openshift_logging_kibana/tasks/main.yaml60
-rw-r--r--roles/openshift_logging_kibana/templates/oauth-client.j26
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml6
-rw-r--r--roles/openshift_metrics/defaults/main.yaml1
-rw-r--r--roles/openshift_metrics/tasks/generate_certificates.yaml2
-rw-r--r--roles/openshift_metrics/tasks/install_cassandra.yaml6
-rw-r--r--roles/openshift_metrics/tasks/setup_certificate.yaml2
-rw-r--r--roles/openshift_metrics/templates/pvc.j27
-rw-r--r--roles/openshift_node/handlers/main.yml11
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml17
-rw-r--r--roles/openshift_node_dnsmasq/templates/origin-dns.conf.j22
-rw-r--r--roles/openshift_node_upgrade/tasks/rpm_upgrade.yml15
l---------roles/openshift_node_upgrade/templates/atomic-openshift-node.service.j21
l---------roles/openshift_node_upgrade/templates/origin-node.service.j21
-rw-r--r--roles/openshift_persistent_volumes/templates/persistent-volume.yml.j26
-rw-r--r--roles/openshift_storage_glusterfs/README.md83
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml16
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml50
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml31
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml41
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml108
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml6
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml36
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml29
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml21
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml68
-rw-r--r--roles/openshift_storage_glusterfs/tasks/main.yml5
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j23
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-service.yml.j2 (renamed from roles/openshift_storage_glusterfs/files/v3.6/glusterfs-registry-service.yml)2
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/heketi-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/heketi-service.yml.j210
116 files changed, 3318 insertions, 1129 deletions
diff --git a/roles/calico/defaults/main.yaml b/roles/calico/defaults/main.yaml
index c7eea46f2..207dee068 100644
--- a/roles/calico/defaults/main.yaml
+++ b/roles/calico/defaults/main.yaml
@@ -3,13 +3,13 @@ kubeconfig: "{{openshift.common.config_base}}/node/{{ 'system:node:' + openshif
cni_conf_dir: "/etc/cni/net.d/"
cni_bin_dir: "/opt/cni/bin/"
-cni_url: "https://github.com/containernetworking/cni/releases/download/v0.4.0/cni-amd64-v0.4.0.tgz"
+cni_url: "https://github.com/containernetworking/cni/releases/download/v0.5.2/cni-amd64-v0.5.2.tgz"
-calico_url_cni: "https://github.com/projectcalico/cni-plugin/releases/download/v1.5.5/calico"
-calico_url_ipam: "https://github.com/projectcalico/cni-plugin/releases/download/v1.5.5/calico-ipam"
+calico_url_cni: "https://github.com/projectcalico/cni-plugin/releases/download/v1.8.3/calico"
+calico_url_ipam: "https://github.com/projectcalico/cni-plugin/releases/download/v1.8.3/calico-ipam"
calico_ipv4pool_ipip: "always"
calico_ipv4pool_cidr: "192.168.0.0/16"
calico_log_dir: "/var/log/calico"
-calico_node_image: "calico/node:v1.1.0"
+calico_node_image: "calico/node:v1.2.1"
diff --git a/roles/calico_master/defaults/main.yaml b/roles/calico_master/defaults/main.yaml
index 5b324bce5..b2df0105f 100644
--- a/roles/calico_master/defaults/main.yaml
+++ b/roles/calico_master/defaults/main.yaml
@@ -4,3 +4,4 @@ kubeconfig: "{{ openshift.common.config_base }}/master/openshift-master.kubeconf
calicoctl_bin_dir: "/usr/local/bin/"
calico_url_calicoctl: "https://github.com/projectcalico/calicoctl/releases/download/v1.1.3/calicoctl"
+calico_url_policy_controller: "quay.io/calico/kube-policy-controller:v0.5.4"
diff --git a/roles/calico_master/templates/calico-policy-controller.yml.j2 b/roles/calico_master/templates/calico-policy-controller.yml.j2
index 1b87758ce..811884473 100644
--- a/roles/calico_master/templates/calico-policy-controller.yml.j2
+++ b/roles/calico_master/templates/calico-policy-controller.yml.j2
@@ -74,7 +74,7 @@ spec:
serviceAccountName: calico
containers:
- name: calico-policy-controller
- image: quay.io/calico/kube-policy-controller:v0.5.4
+ image: {{ calico_url_policy_controller }}
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
diff --git a/roles/docker/README.md b/roles/docker/README.md
index 4a9f21f22..19908c036 100644
--- a/roles/docker/README.md
+++ b/roles/docker/README.md
@@ -3,7 +3,7 @@ Docker
Ensures docker package or system container is installed, and optionally raises timeout for systemd-udevd.service to 5 minutes.
-daemon.json items may be found at https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file
+container-daemon.json items may be found at https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file
Requirements
------------
diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml
index f0f5a40dd..650f06f86 100644
--- a/roles/docker/tasks/systemcontainer_docker.yml
+++ b/roles/docker/tasks/systemcontainer_docker.yml
@@ -130,8 +130,8 @@
dest: "{{ container_engine_systemd_dir }}/custom.conf"
src: systemcontainercustom.conf.j2
-# Set local versions of facts that must be in json format for daemon.json
-# NOTE: When jinja2.9+ is used the daemon.json file can move to using tojson
+# Set local versions of facts that must be in json format for container-daemon.json
+# NOTE: When jinja2.9+ is used the container-daemon.json file can move to using tojson
- set_fact:
l_docker_insecure_registries: "{{ docker_insecure_registries | default([]) | to_json }}"
l_docker_log_options: "{{ docker_log_options | default({}) | to_json }}"
@@ -139,10 +139,12 @@
l_docker_blocked_registries: "{{ docker_blocked_registries | default([]) | to_json }}"
l_docker_selinux_enabled: "{{ docker_selinux_enabled | default(true) | to_json }}"
-# Configure container-engine using the daemon.json file
+# Configure container-engine using the container-daemon.json file
+# NOTE: daemon.json and container-daemon.json have been seperated to avoid
+# collision.
- name: Configure Container Engine
template:
- dest: "{{ docker_conf_dir }}/daemon.json"
+ dest: "{{ docker_conf_dir }}/container-daemon.json"
src: daemon.json
# Enable and start the container-engine service
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index fa2f44609..586aebb11 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -122,7 +122,8 @@
- include_role:
name: etcd_common
- tasks_from: etcdctl.yml
+ vars:
+ r_etcd_common_action: drop_etcdctl
when: openshift_etcd_etcdctl_profile | default(true) | bool
- name: Set fact etcd_service_status_changed
diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml
index 72ffadbd2..f1d948d16 100644
--- a/roles/etcd/tasks/system_container.yml
+++ b/roles/etcd/tasks/system_container.yml
@@ -15,6 +15,56 @@
{%- endif -%}
{% endfor -%}
+- name: Check etcd system container package
+ command: >
+ atomic containers list --no-trunc -a -f container=etcd -f backend=ostree
+ register: etcd_result
+
+- name: Unmask etcd service
+ systemd:
+ name: etcd
+ state: stopped
+ enabled: yes
+ masked: no
+ daemon_reload: yes
+ register: task_result
+ failed_when: task_result|failed and 'could not' not in task_result.msg|lower
+ when: "'etcd' in etcd_result.stdout"
+
+- name: Disable etcd_container
+ systemd:
+ name: etcd_container
+ state: stopped
+ enabled: no
+ masked: yes
+ daemon_reload: yes
+ register: task_result
+ failed_when: task_result|failed and 'could not' not in task_result.msg|lower
+
+- name: Check for previous etcd data store
+ stat:
+ path: "{{ etcd_data_dir }}/member/"
+ register: src_datastore
+
+- name: Check for etcd system container data store
+ stat:
+ path: "{{ r_etcd_common_system_container_host_dir }}/etcd.etcd/member"
+ register: dest_datastore
+
+- name: Ensure that etcd system container data dirs exist
+ file: path="{{ item }}" state=directory
+ with_items:
+ - "{{ r_etcd_common_system_container_host_dir }}/etc"
+ - "{{ r_etcd_common_system_container_host_dir }}/etcd.etcd"
+
+- name: Copy etcd data store
+ command: >
+ cp -a {{ etcd_data_dir }}/member
+ {{ r_etcd_common_system_container_host_dir }}/etcd.etcd/member
+ when:
+ - src_datastore.stat.exists
+ - not dest_datastore.stat.exists
+
- name: Install or Update Etcd system container package
oc_atomic_container:
name: etcd
@@ -35,3 +85,5 @@
- ETCD_PEER_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
- ETCD_PEER_CERT_FILE={{ etcd_system_container_conf_dir }}/peer.crt
- ETCD_PEER_KEY_FILE={{ etcd_system_container_conf_dir }}/peer.key
+ - ETCD_TRUSTED_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
+ - ETCD_PEER_TRUSTED_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml
index e1a080b34..8cc7a9c20 100644
--- a/roles/etcd_common/defaults/main.yml
+++ b/roles/etcd_common/defaults/main.yml
@@ -1,9 +1,21 @@
---
+# Default action when calling this role
+r_etcd_common_action: noop
+r_etcd_common_backup_tag: ''
+r_etcd_common_backup_sufix_name: ''
+
# runc, docker, host
r_etcd_common_etcd_runtime: "docker"
+r_etcd_common_embedded_etcd: false
+
+# etcd run on a host => use etcdctl command directly
+# etcd run as a docker container => use docker exec
+# etcd run as a runc container => use runc exec
+r_etcd_common_etcdctl_command: "{{ 'etcdctl' if r_etcd_common_etcd_runtime == 'host' or r_etcd_common_embedded_etcd | bool else 'docker exec etcd_container etcdctl' if r_etcd_common_etcd_runtime == 'docker' else 'runc exec etcd etcdctl' }}"
# etcd server vars
-etcd_conf_dir: "{{ '/etc/etcd' if r_etcd_common_etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/etc' }}"
+etcd_conf_dir: '/etc/etcd'
+r_etcd_common_system_container_host_dir: /var/lib/etcd/etcd.etcd
etcd_system_container_conf_dir: /var/lib/etcd/etc
etcd_conf_file: "{{ etcd_conf_dir }}/etcd.conf"
etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt"
@@ -40,7 +52,7 @@ etcd_is_containerized: False
etcd_is_thirdparty: False
# etcd dir vars
-etcd_data_dir: /var/lib/etcd/
+etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' }}"
# etcd ports and protocols
etcd_client_port: 2379
diff --git a/roles/etcd_upgrade/tasks/backup.yml b/roles/etcd_common/tasks/backup.yml
index 1ea6fc59f..4a4832275 100644
--- a/roles/etcd_upgrade/tasks/backup.yml
+++ b/roles/etcd_common/tasks/backup.yml
@@ -1,15 +1,11 @@
---
-# INPUT r_etcd_backup_sufix_name
-# INPUT r_etcd_backup_tag
-# OUTPUT r_etcd_upgrade_backup_complete
- set_fact:
- # ORIGIN etcd_data_dir etcd_common.defaults
- l_etcd_backup_dir: "{{ etcd_data_dir }}/openshift-backup-{{ r_etcd_backup_tag | default('') }}{{ r_etcd_backup_sufix_name }}"
+ l_etcd_backup_dir: "{{ etcd_data_dir }}/openshift-backup-{{ r_etcd_common_backup_tag }}{{ r_etcd_common_backup_sufix_name }}"
# TODO: replace shell module with command and update later checks
- name: Check available disk space for etcd backup
shell: df --output=avail -k {{ etcd_data_dir }} | tail -n 1
- register: avail_disk
+ register: l_avail_disk
# AUDIT:changed_when: `false` because we are only inspecting
# state, not manipulating anything
changed_when: false
@@ -17,8 +13,8 @@
# TODO: replace shell module with command and update later checks
- name: Check current etcd disk usage
shell: du --exclude='*openshift-backup*' -k {{ etcd_data_dir }} | tail -n 1 | cut -f1
- register: etcd_disk_usage
- when: r_etcd_upgrade_embedded_etcd | bool
+ register: l_etcd_disk_usage
+ when: r_etcd_common_embedded_etcd | bool
# AUDIT:changed_when: `false` because we are only inspecting
# state, not manipulating anything
changed_when: false
@@ -26,9 +22,9 @@
- name: Abort if insufficient disk space for etcd backup
fail:
msg: >
- {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
- {{ avail_disk.stdout }} Kb available.
- when: (r_etcd_upgrade_embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
+ {{ l_etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
+ {{ l_avail_disk.stdout }} Kb available.
+ when: (r_etcd_common_embedded_etcd | bool) and (l_etcd_disk_usage.stdout|int > l_avail_disk.stdout|int)
# For non containerized and non embedded we should have the correct version of
# etcd installed already. So don't do anything.
@@ -37,17 +33,22 @@
#
# For embedded non containerized we need to ensure we have the latest version
# etcd on the host.
+- name: Detecting Atomic Host Operating System
+ stat:
+ path: /run/ostree-booted
+ register: l_ostree_booted
+
- name: Install latest etcd for embedded
package:
name: etcd
state: latest
when:
- - r_etcd_upgrade_embedded_etcd | bool
+ - r_etcd_common_embedded_etcd | bool
- not l_ostree_booted.stat.exists | bool
- name: Generate etcd backup
command: >
- {{ etcdctl_command }} backup --data-dir={{ etcd_data_dir }}
+ {{ r_etcd_common_etcdctl_command }} backup --data-dir={{ etcd_data_dir }}
--backup-dir={{ l_etcd_backup_dir }}
# According to the docs change you can simply copy snap/db
@@ -55,16 +56,16 @@
- name: Check for v3 data store
stat:
path: "{{ etcd_data_dir }}/member/snap/db"
- register: v3_db
+ register: l_v3_db
- name: Copy etcd v3 data store
command: >
cp -a {{ etcd_data_dir }}/member/snap/db
{{ l_etcd_backup_dir }}/member/snap/
- when: v3_db.stat.exists
+ when: l_v3_db.stat.exists
- set_fact:
- r_etcd_upgrade_backup_complete: True
+ r_etcd_common_backup_complete: True
- name: Display location of etcd backup
debug:
diff --git a/roles/etcd_common/tasks/etcdctl.yml b/roles/etcd_common/tasks/drop_etcdctl.yml
index 6cb456677..6cb456677 100644
--- a/roles/etcd_common/tasks/etcdctl.yml
+++ b/roles/etcd_common/tasks/drop_etcdctl.yml
diff --git a/roles/etcd_common/tasks/main.yml b/roles/etcd_common/tasks/main.yml
new file mode 100644
index 000000000..6ed87e6c7
--- /dev/null
+++ b/roles/etcd_common/tasks/main.yml
@@ -0,0 +1,9 @@
+---
+- name: Fail if invalid r_etcd_common_action provided
+ fail:
+ msg: "etcd_common role can only be called with 'noop' or 'backup' or 'drop_etcdctl'"
+ when: r_etcd_common_action not in ['noop', 'backup', 'drop_etcdctl']
+
+- name: Include main action task file
+ include: "{{ r_etcd_common_action }}.yml"
+ when: r_etcd_common_action != "noop"
diff --git a/roles/etcd_server_certificates/tasks/main.yml b/roles/etcd_server_certificates/tasks/main.yml
index 3ac7f3401..4795188a6 100644
--- a/roles/etcd_server_certificates/tasks/main.yml
+++ b/roles/etcd_server_certificates/tasks/main.yml
@@ -5,11 +5,14 @@
- name: Check status of etcd certificates
stat:
- path: "{{ etcd_cert_config_dir }}/{{ item }}"
+ path: "{{ item }}"
with_items:
- - "{{ etcd_cert_prefix }}server.crt"
- - "{{ etcd_cert_prefix }}peer.crt"
- - "{{ etcd_cert_prefix }}ca.crt"
+ - "{{ etcd_cert_config_dir }}/{{ etcd_cert_prefix }}server.crt"
+ - "{{ etcd_cert_config_dir }}/{{ etcd_cert_prefix }}peer.crt"
+ - "{{ etcd_cert_config_dir }}/{{ etcd_cert_prefix }}ca.crt"
+ - "{{ etcd_system_container_cert_config_dir }}/{{ etcd_cert_prefix }}server.crt"
+ - "{{ etcd_system_container_cert_config_dir }}/{{ etcd_cert_prefix }}peer.crt"
+ - "{{ etcd_system_container_cert_config_dir }}/{{ etcd_cert_prefix }}ca.crt"
register: g_etcd_server_cert_stat_result
when: not etcd_certificates_redeploy | default(false) | bool
@@ -132,8 +135,11 @@
- name: Ensure certificate directory exists
file:
- path: "{{ etcd_cert_config_dir }}"
+ path: "{{ item }}"
state: directory
+ with_items:
+ - "{{ etcd_cert_config_dir }}"
+ - "{{ etcd_system_container_cert_config_dir }}"
when: etcd_server_certs_missing | bool
- name: Unarchive cert tarball
@@ -164,15 +170,28 @@
- name: Ensure ca directory exists
file:
- path: "{{ etcd_ca_dir }}"
+ path: "{{ item }}"
state: directory
+ with_items:
+ - "{{ etcd_ca_dir }}"
+ - "{{ etcd_system_container_cert_config_dir }}/ca"
when: etcd_server_certs_missing | bool
-- name: Unarchive etcd ca cert tarballs
+- name: Unarchive cert tarball for the system container
+ unarchive:
+ src: "{{ g_etcd_server_mktemp.stdout }}/{{ etcd_cert_subdir }}.tgz"
+ dest: "{{ etcd_system_container_cert_config_dir }}"
+ when:
+ - etcd_server_certs_missing | bool
+ - r_etcd_common_etcd_runtime == 'runc'
+
+- name: Unarchive etcd ca cert tarballs for the system container
unarchive:
src: "{{ g_etcd_server_mktemp.stdout }}/{{ etcd_ca_name }}.tgz"
- dest: "{{ etcd_ca_dir }}"
- when: etcd_server_certs_missing | bool
+ dest: "{{ etcd_system_container_cert_config_dir }}/ca"
+ when:
+ - etcd_server_certs_missing | bool
+ - r_etcd_common_etcd_runtime == 'runc'
- name: Delete temporary directory
local_action: file path="{{ g_etcd_server_mktemp.stdout }}" state=absent
diff --git a/roles/etcd_upgrade/defaults/main.yml b/roles/etcd_upgrade/defaults/main.yml
index 01ad8a268..61bbba225 100644
--- a/roles/etcd_upgrade/defaults/main.yml
+++ b/roles/etcd_upgrade/defaults/main.yml
@@ -1,9 +1,3 @@
---
r_etcd_upgrade_action: upgrade
r_etcd_upgrade_mechanism: rpm
-r_etcd_upgrade_embedded_etcd: False
-
-# etcd run on a host => use etcdctl command directly
-# etcd run as a docker container => use docker exec
-# etcd run as a runc container => use runc exec
-etcdctl_command: "{{ 'etcdctl' if r_etcd_common_etcd_runtime == 'host' or r_etcd_upgrade_embedded_etcd | bool else 'docker exec etcd_container etcdctl' if r_etcd_common_etcd_runtime == 'docker' else 'runc exec etcd etcdctl' }}"
diff --git a/roles/etcd_upgrade/meta/main.yml b/roles/etcd_upgrade/meta/main.yml
index 018bdc8d7..afdb0267f 100644
--- a/roles/etcd_upgrade/meta/main.yml
+++ b/roles/etcd_upgrade/meta/main.yml
@@ -14,3 +14,4 @@ galaxy_info:
- system
dependencies:
- role: etcd_common
+ r_etcd_common_embedded_etcd: "{{ r_etcd_upgrade_embedded_etcd }}"
diff --git a/roles/etcd_upgrade/tasks/main.yml b/roles/etcd_upgrade/tasks/main.yml
index 5178c14e3..129c69d6b 100644
--- a/roles/etcd_upgrade/tasks/main.yml
+++ b/roles/etcd_upgrade/tasks/main.yml
@@ -2,9 +2,9 @@
# INPUT r_etcd_upgrade_action
- name: Fail if invalid etcd_upgrade_action provided
fail:
- msg: "etcd_upgrade role can only be called with 'upgrade' or 'backup'"
+ msg: "etcd_upgrade role can only be called with 'upgrade'"
when:
- - r_etcd_upgrade_action not in ['upgrade', 'backup']
+ - r_etcd_upgrade_action not in ['upgrade']
- name: Detecting Atomic Host Operating System
stat:
diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
index 3974cc4dd..1b73bfd0e 100644
--- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py
+++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
@@ -1097,10 +1097,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1110,34 +1106,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py
index 320eac17e..b09321a5b 100644
--- a/roles/lib_openshift/library/oc_adm_manage_node.py
+++ b/roles/lib_openshift/library/oc_adm_manage_node.py
@@ -1083,10 +1083,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1096,34 +1092,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py
index f9658d6e1..221ef5094 100644
--- a/roles/lib_openshift/library/oc_adm_policy_group.py
+++ b/roles/lib_openshift/library/oc_adm_policy_group.py
@@ -1069,10 +1069,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1082,34 +1078,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py
index 0bdfd0bad..071562875 100644
--- a/roles/lib_openshift/library/oc_adm_policy_user.py
+++ b/roles/lib_openshift/library/oc_adm_policy_user.py
@@ -1069,10 +1069,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1082,34 +1078,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py
index df0e40d20..bf2650460 100644
--- a/roles/lib_openshift/library/oc_adm_registry.py
+++ b/roles/lib_openshift/library/oc_adm_registry.py
@@ -1187,10 +1187,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1200,34 +1196,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py
index 8af8cb196..a2b7d12c0 100644
--- a/roles/lib_openshift/library/oc_adm_router.py
+++ b/roles/lib_openshift/library/oc_adm_router.py
@@ -1212,10 +1212,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1225,34 +1221,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py
index 3ed0d65dc..289f08b83 100644
--- a/roles/lib_openshift/library/oc_clusterrole.py
+++ b/roles/lib_openshift/library/oc_clusterrole.py
@@ -1061,10 +1061,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1074,34 +1070,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py
index 5c8ed48d2..7cd29215f 100644
--- a/roles/lib_openshift/library/oc_configmap.py
+++ b/roles/lib_openshift/library/oc_configmap.py
@@ -1067,10 +1067,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1080,34 +1076,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py
index f3b6d552d..5b11f45ba 100644
--- a/roles/lib_openshift/library/oc_edit.py
+++ b/roles/lib_openshift/library/oc_edit.py
@@ -1111,10 +1111,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1124,34 +1120,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py
index c6421128a..d3834ce0c 100644
--- a/roles/lib_openshift/library/oc_env.py
+++ b/roles/lib_openshift/library/oc_env.py
@@ -1078,10 +1078,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1091,34 +1087,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py
index a791c29af..0d751fe28 100644
--- a/roles/lib_openshift/library/oc_group.py
+++ b/roles/lib_openshift/library/oc_group.py
@@ -1051,10 +1051,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1064,34 +1060,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py
index bbc123ce0..3a6ba3e56 100644
--- a/roles/lib_openshift/library/oc_image.py
+++ b/roles/lib_openshift/library/oc_image.py
@@ -1070,10 +1070,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1083,34 +1079,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py
index cd1afd0d2..5db036b23 100644
--- a/roles/lib_openshift/library/oc_label.py
+++ b/roles/lib_openshift/library/oc_label.py
@@ -1087,10 +1087,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1100,34 +1096,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py
index 215723cc8..56af303cc 100644
--- a/roles/lib_openshift/library/oc_obj.py
+++ b/roles/lib_openshift/library/oc_obj.py
@@ -1090,10 +1090,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1103,34 +1099,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
@@ -1473,7 +1461,12 @@ class OCObject(OpenShiftCLI):
def delete(self):
'''delete the object'''
- return self._delete(self.kind, name=self.name, selector=self.selector)
+ results = self._delete(self.kind, name=self.name, selector=self.selector)
+ if (results['returncode'] != 0 and 'stderr' in results and
+ '\"{}\" not found'.format(self.name) in results['stderr']):
+ results['returncode'] = 0
+
+ return results
def create(self, files=None, content=None):
'''
@@ -1557,7 +1550,8 @@ class OCObject(OpenShiftCLI):
if state == 'absent':
# verify its not in our results
if (params['name'] is not None or params['selector'] is not None) and \
- (len(api_rval['results']) == 0 or len(api_rval['results'][0].get('items', [])) == 0):
+ (len(api_rval['results']) == 0 or \
+ ('items' in api_rval['results'][0] and len(api_rval['results'][0]['items']) == 0)):
return {'changed': False, 'state': state}
if check_mode:
diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py
index 358ef5130..130521761 100644
--- a/roles/lib_openshift/library/oc_objectvalidator.py
+++ b/roles/lib_openshift/library/oc_objectvalidator.py
@@ -1022,10 +1022,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1035,34 +1031,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py
index 025b846c6..c6568d520 100644
--- a/roles/lib_openshift/library/oc_process.py
+++ b/roles/lib_openshift/library/oc_process.py
@@ -1079,10 +1079,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1092,34 +1088,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py
index 05dfddab8..a78bc06d2 100644
--- a/roles/lib_openshift/library/oc_project.py
+++ b/roles/lib_openshift/library/oc_project.py
@@ -1076,10 +1076,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1089,34 +1085,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py
index d7de4964c..a88639bfc 100644
--- a/roles/lib_openshift/library/oc_pvc.py
+++ b/roles/lib_openshift/library/oc_pvc.py
@@ -1071,10 +1071,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1084,34 +1080,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py
index 3090b4cad..0c0bc9386 100644
--- a/roles/lib_openshift/library/oc_route.py
+++ b/roles/lib_openshift/library/oc_route.py
@@ -1121,10 +1121,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1134,34 +1130,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py
index 6a505fb6b..f112b6dd0 100644
--- a/roles/lib_openshift/library/oc_scale.py
+++ b/roles/lib_openshift/library/oc_scale.py
@@ -1065,10 +1065,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1078,34 +1074,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py
index 02257500f..d762e0c38 100644
--- a/roles/lib_openshift/library/oc_secret.py
+++ b/roles/lib_openshift/library/oc_secret.py
@@ -1117,10 +1117,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1130,34 +1126,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
@@ -1613,7 +1601,7 @@ class OCSecret(OpenShiftCLI):
'''delete a secret by name'''
return self._delete('secrets', self.name)
- def create(self, files=None, contents=None):
+ def create(self, files=None, contents=None, force=False):
'''Create a secret '''
if not files:
files = Utils.create_tmp_files_from_contents(contents)
@@ -1622,6 +1610,8 @@ class OCSecret(OpenShiftCLI):
cmd = ['secrets', 'new', self.name]
if self.type is not None:
cmd.append("--type=%s" % (self.type))
+ if force:
+ cmd.append('--confirm')
cmd.extend(secrets)
results = self.openshift_cmd(cmd)
@@ -1634,7 +1624,7 @@ class OCSecret(OpenShiftCLI):
This receives a list of file names and converts it into a secret.
The secret is then written to disk and passed into the `oc replace` command.
'''
- secret = self.prep_secret(files)
+ secret = self.prep_secret(files, force)
if secret['returncode'] != 0:
return secret
@@ -1646,7 +1636,7 @@ class OCSecret(OpenShiftCLI):
return self._replace(sfile_path, force=force)
- def prep_secret(self, files=None, contents=None):
+ def prep_secret(self, files=None, contents=None, force=False):
''' return what the secret would look like if created
This is accomplished by passing -ojson. This will most likely change in the future
'''
@@ -1657,6 +1647,8 @@ class OCSecret(OpenShiftCLI):
cmd = ['-ojson', 'secrets', 'new', self.name]
if self.type is not None:
cmd.extend(["--type=%s" % (self.type)])
+ if force:
+ cmd.append('--confirm')
cmd.extend(secrets)
return self.openshift_cmd(cmd, output=True)
@@ -1719,7 +1711,7 @@ class OCSecret(OpenShiftCLI):
return {'changed': True,
'msg': 'Would have performed a create.'}
- api_rval = ocsecret.create(files, params['contents'])
+ api_rval = ocsecret.create(files, params['contents'], force=params['force'])
# Remove files
if files and params['delete_after']:
@@ -1736,7 +1728,7 @@ class OCSecret(OpenShiftCLI):
########
# Update
########
- secret = ocsecret.prep_secret(params['files'], params['contents'])
+ secret = ocsecret.prep_secret(params['files'], params['contents'], force=params['force'])
if secret['returncode'] != 0:
return {'failed': True, 'msg': secret}
diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py
index 308f45488..769b75e15 100644
--- a/roles/lib_openshift/library/oc_service.py
+++ b/roles/lib_openshift/library/oc_service.py
@@ -1124,10 +1124,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1137,34 +1133,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py
index 68c1fc51c..446987eff 100644
--- a/roles/lib_openshift/library/oc_serviceaccount.py
+++ b/roles/lib_openshift/library/oc_serviceaccount.py
@@ -1063,10 +1063,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1076,34 +1072,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py
index ebc5bf8a2..c7eb1986a 100644
--- a/roles/lib_openshift/library/oc_serviceaccount_secret.py
+++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py
@@ -1063,10 +1063,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1076,34 +1072,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py
index d1a20fddc..3a98693b7 100644
--- a/roles/lib_openshift/library/oc_user.py
+++ b/roles/lib_openshift/library/oc_user.py
@@ -1123,10 +1123,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1136,34 +1132,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py
index 548c9d8e0..939261526 100644
--- a/roles/lib_openshift/library/oc_version.py
+++ b/roles/lib_openshift/library/oc_version.py
@@ -1035,10 +1035,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1048,34 +1044,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py
index 3826cd8e5..41e7d0ab8 100644
--- a/roles/lib_openshift/library/oc_volume.py
+++ b/roles/lib_openshift/library/oc_volume.py
@@ -1112,10 +1112,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -1125,34 +1121,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/src/class/oc_obj.py b/roles/lib_openshift/src/class/oc_obj.py
index 6f0da3d5c..5e423bea9 100644
--- a/roles/lib_openshift/src/class/oc_obj.py
+++ b/roles/lib_openshift/src/class/oc_obj.py
@@ -33,7 +33,12 @@ class OCObject(OpenShiftCLI):
def delete(self):
'''delete the object'''
- return self._delete(self.kind, name=self.name, selector=self.selector)
+ results = self._delete(self.kind, name=self.name, selector=self.selector)
+ if (results['returncode'] != 0 and 'stderr' in results and
+ '\"{}\" not found'.format(self.name) in results['stderr']):
+ results['returncode'] = 0
+
+ return results
def create(self, files=None, content=None):
'''
@@ -117,7 +122,8 @@ class OCObject(OpenShiftCLI):
if state == 'absent':
# verify its not in our results
if (params['name'] is not None or params['selector'] is not None) and \
- (len(api_rval['results']) == 0 or len(api_rval['results'][0].get('items', [])) == 0):
+ (len(api_rval['results']) == 0 or \
+ ('items' in api_rval['results'][0] and len(api_rval['results'][0]['items']) == 0)):
return {'changed': False, 'state': state}
if check_mode:
diff --git a/roles/lib_openshift/src/class/oc_secret.py b/roles/lib_openshift/src/class/oc_secret.py
index ee83580df..4ee6443e9 100644
--- a/roles/lib_openshift/src/class/oc_secret.py
+++ b/roles/lib_openshift/src/class/oc_secret.py
@@ -44,7 +44,7 @@ class OCSecret(OpenShiftCLI):
'''delete a secret by name'''
return self._delete('secrets', self.name)
- def create(self, files=None, contents=None):
+ def create(self, files=None, contents=None, force=False):
'''Create a secret '''
if not files:
files = Utils.create_tmp_files_from_contents(contents)
@@ -53,6 +53,8 @@ class OCSecret(OpenShiftCLI):
cmd = ['secrets', 'new', self.name]
if self.type is not None:
cmd.append("--type=%s" % (self.type))
+ if force:
+ cmd.append('--confirm')
cmd.extend(secrets)
results = self.openshift_cmd(cmd)
@@ -65,7 +67,7 @@ class OCSecret(OpenShiftCLI):
This receives a list of file names and converts it into a secret.
The secret is then written to disk and passed into the `oc replace` command.
'''
- secret = self.prep_secret(files)
+ secret = self.prep_secret(files, force)
if secret['returncode'] != 0:
return secret
@@ -77,7 +79,7 @@ class OCSecret(OpenShiftCLI):
return self._replace(sfile_path, force=force)
- def prep_secret(self, files=None, contents=None):
+ def prep_secret(self, files=None, contents=None, force=False):
''' return what the secret would look like if created
This is accomplished by passing -ojson. This will most likely change in the future
'''
@@ -88,6 +90,8 @@ class OCSecret(OpenShiftCLI):
cmd = ['-ojson', 'secrets', 'new', self.name]
if self.type is not None:
cmd.extend(["--type=%s" % (self.type)])
+ if force:
+ cmd.append('--confirm')
cmd.extend(secrets)
return self.openshift_cmd(cmd, output=True)
@@ -150,7 +154,7 @@ class OCSecret(OpenShiftCLI):
return {'changed': True,
'msg': 'Would have performed a create.'}
- api_rval = ocsecret.create(files, params['contents'])
+ api_rval = ocsecret.create(files, params['contents'], force=params['force'])
# Remove files
if files and params['delete_after']:
@@ -167,7 +171,7 @@ class OCSecret(OpenShiftCLI):
########
# Update
########
- secret = ocsecret.prep_secret(params['files'], params['contents'])
+ secret = ocsecret.prep_secret(params['files'], params['contents'], force=params['force'])
if secret['returncode'] != 0:
return {'failed': True, 'msg': secret}
diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py
index b3f01008b..16770b22d 100644
--- a/roles/lib_openshift/src/lib/base.py
+++ b/roles/lib_openshift/src/lib/base.py
@@ -273,10 +273,6 @@ class OpenShiftCLI(object):
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
- rval = {}
- results = ''
- err = None
-
if self.verbose:
print(' '.join(cmds))
@@ -286,34 +282,26 @@ class OpenShiftCLI(object):
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
- "results": results,
"cmd": ' '.join(cmds)}
- if returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as verr:
- if "No JSON object could be decoded" in verr.args:
- err = verr.args
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print("STDOUT: {0}".format(stdout))
- print("STDERR: {0}".format(stderr))
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds})
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
- else:
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {}})
+ "stdout": stdout})
return rval
diff --git a/roles/lib_openshift/src/test/unit/test_oc_secret.py b/roles/lib_openshift/src/test/unit/test_oc_secret.py
index 09cc4a374..323b3423c 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_secret.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_secret.py
@@ -48,6 +48,7 @@ class OCSecretTest(unittest.TestCase):
'debug': False,
'files': None,
'delete_after': True,
+ 'force': False,
}
# Return values of our mocked function call. These get returned once per call.
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
index c7b906949..b9a7ec32f 100644
--- a/roles/openshift_ca/tasks/main.yml
+++ b/roles/openshift_ca/tasks/main.yml
@@ -108,6 +108,38 @@
delegate_to: "{{ openshift_ca_host }}"
run_once: true
+- name: Test local loopback context
+ command: >
+ {{ hostvars[openshift_ca_host].openshift.common.client_binary }} config view
+ --config={{ openshift_master_loopback_config }}
+ changed_when: false
+ register: loopback_config
+ delegate_to: "{{ openshift_ca_host }}"
+ run_once: true
+
+- name: Generate the loopback master client config
+ command: >
+ {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config
+ {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
+ --certificate-authority {{ named_ca_certificate }}
+ {% endfor %}
+ --certificate-authority={{ openshift_ca_cert }}
+ --client-dir={{ openshift_ca_config_dir }}
+ --groups=system:masters,system:openshift-master
+ --master={{ hostvars[openshift_ca_host].openshift.master.loopback_api_url }}
+ --public-master={{ hostvars[openshift_ca_host].openshift.master.loopback_api_url }}
+ --signer-cert={{ openshift_ca_cert }}
+ --signer-key={{ openshift_ca_key }}
+ --signer-serial={{ openshift_ca_serial }}
+ --user=system:openshift-master
+ --basename=openshift-master
+ {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %}
+ --expire-days={{ openshift_master_cert_expire_days }}
+ {% endif %}
+ when: loopback_context_string not in loopback_config.stdout
+ delegate_to: "{{ openshift_ca_host }}"
+ run_once: true
+
- name: Restore original serviceaccount keys
copy:
src: "{{ item }}.keep"
diff --git a/roles/openshift_ca/vars/main.yml b/roles/openshift_ca/vars/main.yml
index a32e385ec..d04c1766d 100644
--- a/roles/openshift_ca/vars/main.yml
+++ b/roles/openshift_ca/vars/main.yml
@@ -4,3 +4,6 @@ openshift_ca_cert: "{{ openshift_ca_config_dir }}/ca.crt"
openshift_ca_key: "{{ openshift_ca_config_dir }}/ca.key"
openshift_ca_serial: "{{ openshift_ca_config_dir }}/ca.serial.txt"
openshift_version: "{{ openshift_pkg_version | default('') }}"
+
+openshift_master_loopback_config: "{{ openshift_ca_config_dir }}/openshift-master.kubeconfig"
+loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}"
diff --git a/roles/openshift_default_storage_class/README.md b/roles/openshift_default_storage_class/README.md
new file mode 100644
index 000000000..198163127
--- /dev/null
+++ b/roles/openshift_default_storage_class/README.md
@@ -0,0 +1,39 @@
+openshift_master_storage_class
+=========
+
+A role that deploys configuratons for Openshift StorageClass
+
+Requirements
+------------
+
+None
+
+Role Variables
+--------------
+
+openshift_storageclass_name: Name of the storage class to create
+openshift_storageclass_provisioner: The kubernetes provisioner to use
+openshift_storageclass_type: type of storage to use. This is different among clouds/providers
+
+Dependencies
+------------
+
+
+Example Playbook
+----------------
+
+- role: openshift_default_storage_class
+ openshift_storageclass_name: awsEBS
+ openshift_storageclass_provisioner: kubernetes.io/aws-ebs
+ openshift_storageclass_type: gp2
+
+
+License
+-------
+
+Apache
+
+Author Information
+------------------
+
+Openshift Operations
diff --git a/roles/openshift_default_storage_class/defaults/main.yml b/roles/openshift_default_storage_class/defaults/main.yml
new file mode 100644
index 000000000..66ffd2a73
--- /dev/null
+++ b/roles/openshift_default_storage_class/defaults/main.yml
@@ -0,0 +1,14 @@
+---
+openshift_storageclass_defaults:
+ aws:
+ name: gp2
+ provisioner: kubernetes.io/aws-ebs
+ type: gp2
+ gce:
+ name: standard
+ provisioner: kubernetes.io/gce-pd
+ type: pd-standard
+
+openshift_storageclass_name: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['name'] }}"
+openshift_storageclass_provisioner: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['provisioner'] }}"
+openshift_storageclass_type: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['type'] }}"
diff --git a/roles/openshift_default_storage_class/meta/main.yml b/roles/openshift_default_storage_class/meta/main.yml
new file mode 100644
index 000000000..d7d57fe39
--- /dev/null
+++ b/roles/openshift_default_storage_class/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: Openshift Operations
+ description: This role configures the StorageClass in Openshift
+ company: Red Hat
+ license: Apache
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ verisons:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
diff --git a/roles/openshift_default_storage_class/tasks/main.yml b/roles/openshift_default_storage_class/tasks/main.yml
new file mode 100644
index 000000000..408fc17c7
--- /dev/null
+++ b/roles/openshift_default_storage_class/tasks/main.yml
@@ -0,0 +1,19 @@
+---
+# Install default storage classes in GCE & AWS
+- name: Ensure storageclass object
+ oc_obj:
+ kind: storageclass
+ name: "{{ openshift_storageclass_name }}"
+ content:
+ path: /tmp/openshift_storageclass
+ data:
+ kind: StorageClass
+ apiVersion: storage.k8s.io/v1beta1
+ metadata:
+ name: "{{ openshift_storageclass_name }}"
+ annotations:
+ storageclass.beta.kubernetes.io/is-default-class: "true"
+ provisioner: "{{ openshift_storageclass_provisioner }}"
+ parameters:
+ type: "{{ openshift_storageclass_type }}"
+ run_once: true
diff --git a/roles/openshift_default_storage_class/vars/main.yml b/roles/openshift_default_storage_class/vars/main.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/roles/openshift_default_storage_class/vars/main.yml
@@ -0,0 +1 @@
+---
diff --git a/roles/openshift_etcd_facts/vars/main.yml b/roles/openshift_etcd_facts/vars/main.yml
index 82db36eba..b3ecd57a6 100644
--- a/roles/openshift_etcd_facts/vars/main.yml
+++ b/roles/openshift_etcd_facts/vars/main.yml
@@ -5,6 +5,7 @@ etcd_hostname: "{{ openshift.common.hostname }}"
etcd_ip: "{{ openshift.common.ip }}"
etcd_cert_subdir: "etcd-{{ openshift.common.hostname }}"
etcd_cert_prefix:
-etcd_cert_config_dir: "{{ '/etc/etcd' if not openshift.common.is_etcd_system_container | bool else '/var/lib/etcd/etcd.etcd/etc' }}"
+etcd_cert_config_dir: "/etc/etcd"
+etcd_system_container_cert_config_dir: /var/lib/etcd/etcd.etcd/etc
etcd_peer_url_scheme: https
etcd_url_scheme: https
diff --git a/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json
index a81dbb654..2583018b7 100644
--- a/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json
+++ b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json
@@ -103,7 +103,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "4"
+ "name": "6"
}
},
{
@@ -137,6 +137,22 @@
"kind": "DockerImage",
"name": "centos/nodejs-4-centos7:latest"
}
+ },
+ {
+ "name": "6",
+ "annotations": {
+ "openshift.io/display-name": "Node.js 6",
+ "description": "Build and run Node.js 6 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/6/README.md.",
+ "iconClass": "icon-nodejs",
+ "tags": "builder,nodejs",
+ "supports":"nodejs:6,nodejs",
+ "version": "6",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/nodejs-6-centos7:latest"
+ }
}
]
}
@@ -407,7 +423,7 @@
"iconClass": "icon-wildfly",
"tags": "builder,wildfly,java",
"supports":"jee,java",
- "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ "sampleRepo": "https://github.com/openshift/openshift-jee-sample.git"
},
"from": {
"kind": "ImageStreamTag",
@@ -423,7 +439,7 @@
"tags": "builder,wildfly,java",
"supports":"wildfly:8.1,jee,java",
"version": "8.1",
- "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ "sampleRepo": "https://github.com/openshift/openshift-jee-sample.git"
},
"from": {
"kind": "DockerImage",
@@ -439,7 +455,7 @@
"tags": "builder,wildfly,java",
"supports":"wildfly:9.0,jee,java",
"version": "9.0",
- "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ "sampleRepo": "https://github.com/openshift/openshift-jee-sample.git"
},
"from": {
"kind": "DockerImage",
@@ -455,7 +471,7 @@
"tags": "builder,wildfly,java",
"supports":"wildfly:10.0,jee,java",
"version": "10.0",
- "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ "sampleRepo": "https://github.com/openshift/openshift-jee-sample.git"
},
"from": {
"kind": "DockerImage",
@@ -471,7 +487,7 @@
"tags": "builder,wildfly,java",
"supports":"wildfly:10.1,jee,java",
"version": "10.1",
- "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ "sampleRepo": "https://github.com/openshift/openshift-jee-sample.git"
},
"from": {
"kind": "DockerImage",
diff --git a/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json
index 2ed0efe1e..b65f0a5e3 100644
--- a/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json
+++ b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json
@@ -103,7 +103,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "4"
+ "name": "6"
}
},
{
@@ -137,6 +137,22 @@
"kind": "DockerImage",
"name": "registry.access.redhat.com/rhscl/nodejs-4-rhel7:latest"
}
+ },
+ {
+ "name": "6",
+ "annotations": {
+ "openshift.io/display-name": "Node.js 6",
+ "description": "Build and run Node.js 6 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container.",
+ "iconClass": "icon-nodejs",
+ "tags": "builder,nodejs",
+ "supports":"nodejs:6,nodejs",
+ "version": "6",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/nodejs-6-rhel7:latest"
+ }
}
]
}
@@ -253,7 +269,7 @@
"tags": "hidden,builder,php",
"supports":"php:5.5,php",
"version": "5.5",
- "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
},
"from": {
"kind": "DockerImage",
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 514c06500..cfe092a28 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -193,8 +193,7 @@ def hostname_valid(hostname):
"""
if (not hostname or
hostname.startswith('localhost') or
- hostname.endswith('localdomain') or
- hostname.endswith('novalocal')):
+ hostname.endswith('localdomain')):
return False
return True
@@ -1041,10 +1040,13 @@ def set_sdn_facts_if_unset(facts, system_facts):
def set_nodename(facts):
""" set nodename """
if 'node' in facts and 'common' in facts:
- if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack':
- facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '')
- elif 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'gce':
+ if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'gce':
facts['node']['nodename'] = facts['provider']['metadata']['instance']['hostname'].split('.')[0]
+
+ # TODO: The openstack cloudprovider nodename setting was too opinionaed.
+ # It needs to be generalized before it can be enabled again.
+ # elif 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack':
+ # facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '')
else:
facts['node']['nodename'] = facts['common']['hostname'].lower()
return facts
diff --git a/roles/openshift_health_checker/library/ocutil.py b/roles/openshift_health_checker/library/ocutil.py
new file mode 100644
index 000000000..2e60735d6
--- /dev/null
+++ b/roles/openshift_health_checker/library/ocutil.py
@@ -0,0 +1,74 @@
+#!/usr/bin/python
+"""Interface to OpenShift oc command"""
+
+import os
+import shlex
+import shutil
+import subprocess
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ """Find and return oc binary file"""
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
+def main():
+ """Module that executes commands on a remote OpenShift cluster"""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ namespace=dict(type="str", required=True),
+ config_file=dict(type="str", required=True),
+ cmd=dict(type="str", required=True),
+ extra_args=dict(type="list", default=[]),
+ ),
+ )
+
+ cmd = [
+ locate_oc_binary(),
+ '--config', module.params["config_file"],
+ '-n', module.params["namespace"],
+ ] + shlex.split(module.params["cmd"])
+
+ failed = True
+ try:
+ cmd_result = subprocess.check_output(list(cmd), stderr=subprocess.STDOUT)
+ failed = False
+ except subprocess.CalledProcessError as exc:
+ cmd_result = '[rc {}] {}\n{}'.format(exc.returncode, ' '.join(exc.cmd), exc.output)
+ except OSError as exc:
+ # we get this when 'oc' is not there
+ cmd_result = str(exc)
+
+ module.exit_json(
+ changed=False,
+ failed=failed,
+ result=cmd_result,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py
index be63d864a..5c9949ced 100644
--- a/roles/openshift_health_checker/openshift_checks/__init__.py
+++ b/roles/openshift_health_checker/openshift_checks/__init__.py
@@ -66,16 +66,26 @@ class OpenShiftCheck(object):
LOADER_EXCLUDES = (
"__init__.py",
"mixins.py",
+ "logging.py",
)
-def load_checks():
+def load_checks(path=None, subpkg=""):
"""Dynamically import all check modules for the side effect of registering checks."""
- return [
- import_module(__package__ + "." + name[:-3])
- for name in os.listdir(os.path.dirname(__file__))
- if name.endswith(".py") and name not in LOADER_EXCLUDES
- ]
+ if path is None:
+ path = os.path.dirname(__file__)
+
+ modules = []
+
+ for name in os.listdir(path):
+ if os.path.isdir(os.path.join(path, name)):
+ modules = modules + load_checks(os.path.join(path, name), subpkg + "." + name)
+ continue
+
+ if name.endswith(".py") and name not in LOADER_EXCLUDES:
+ modules.append(import_module(__package__ + subpkg + "." + name[:-3]))
+
+ return modules
def get_var(task_vars, *keys, **kwargs):
diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
index 4588ed634..27e6fe383 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
@@ -1,8 +1,9 @@
# pylint: disable=missing-docstring
from openshift_checks import OpenShiftCheck, get_var
+from openshift_checks.mixins import DockerHostMixin
-class DockerImageAvailability(OpenShiftCheck):
+class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
"""Check that required Docker images are available.
This check attempts to ensure that required docker images are
@@ -36,19 +37,11 @@ class DockerImageAvailability(OpenShiftCheck):
def run(self, tmp, task_vars):
msg, failed, changed = self.ensure_dependencies(task_vars)
-
- # exit early if Skopeo update fails
if failed:
- if "No package matching" in msg:
- msg = "Ensure that all required dependencies can be installed via `yum`.\n"
return {
"failed": True,
"changed": changed,
- "msg": (
- "Unable to update or install required dependency packages on this host;\n"
- "These are required in order to check Docker image availability:"
- "\n {deps}\n{msg}"
- ).format(deps=',\n '.join(self.dependencies), msg=msg),
+ "msg": "Some dependencies are required in order to check Docker image availability.\n" + msg
}
required_images = self.required_images(task_vars)
@@ -168,12 +161,3 @@ class DockerImageAvailability(OpenShiftCheck):
args = {"_raw_params": cmd_str}
result = self.module_executor("command", args, task_vars)
return not result.get("failed", False) and result.get("rc", 0) == 0
-
- # ensures that the skopeo and python-docker-py packages exist
- # check is skipped on atomic installations
- def ensure_dependencies(self, task_vars):
- if get_var(task_vars, "openshift", "common", "is_atomic"):
- return "", False, False
-
- result = self.module_executor("yum", {"name": self.dependencies, "state": "latest"}, task_vars)
- return result.get("msg", ""), result.get("failed", False) or result.get("rc", 0) != 0, result.get("changed")
diff --git a/roles/openshift_health_checker/openshift_checks/docker_storage.py b/roles/openshift_health_checker/openshift_checks/docker_storage.py
new file mode 100644
index 000000000..7f1751b36
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/docker_storage.py
@@ -0,0 +1,185 @@
+"""Check Docker storage driver and usage."""
+import json
+import re
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
+from openshift_checks.mixins import DockerHostMixin
+
+
+class DockerStorage(DockerHostMixin, OpenShiftCheck):
+ """Check Docker storage driver compatibility.
+
+ This check ensures that Docker is using a supported storage driver,
+ and that loopback is not being used (if using devicemapper).
+ Also that storage usage is not above threshold.
+ """
+
+ name = "docker_storage"
+ tags = ["pre-install", "health", "preflight"]
+
+ dependencies = ["python-docker-py"]
+ storage_drivers = ["devicemapper", "overlay2"]
+ max_thinpool_data_usage_percent = 90.0
+ max_thinpool_meta_usage_percent = 90.0
+
+ # pylint: disable=too-many-return-statements
+ # Reason: permanent stylistic exception;
+ # it is clearer to return on failures and there are just many ways to fail here.
+ def run(self, tmp, task_vars):
+ msg, failed, changed = self.ensure_dependencies(task_vars)
+ if failed:
+ return {
+ "failed": True,
+ "changed": changed,
+ "msg": "Some dependencies are required in order to query docker storage on host:\n" + msg
+ }
+
+ # attempt to get the docker info hash from the API
+ info = self.execute_module("docker_info", {}, task_vars)
+ if info.get("failed"):
+ return {"failed": True, "changed": changed,
+ "msg": "Failed to query Docker API. Is docker running on this host?"}
+ if not info.get("info"): # this would be very strange
+ return {"failed": True, "changed": changed,
+ "msg": "Docker API query missing info:\n{}".format(json.dumps(info))}
+ info = info["info"]
+
+ # check if the storage driver we saw is valid
+ driver = info.get("Driver", "[NONE]")
+ if driver not in self.storage_drivers:
+ msg = (
+ "Detected unsupported Docker storage driver '{driver}'.\n"
+ "Supported storage drivers are: {drivers}"
+ ).format(driver=driver, drivers=', '.join(self.storage_drivers))
+ return {"failed": True, "changed": changed, "msg": msg}
+
+ # driver status info is a list of tuples; convert to dict and validate based on driver
+ driver_status = {item[0]: item[1] for item in info.get("DriverStatus", [])}
+ if driver == "devicemapper":
+ if driver_status.get("Data loop file"):
+ msg = (
+ "Use of loopback devices with the Docker devicemapper storage driver\n"
+ "(the default storage configuration) is unsupported in production.\n"
+ "Please use docker-storage-setup to configure a backing storage volume.\n"
+ "See http://red.ht/2rNperO for further information."
+ )
+ return {"failed": True, "changed": changed, "msg": msg}
+ result = self._check_dm_usage(driver_status, task_vars)
+ result['changed'] = result.get('changed', False) or changed
+ return result
+
+ # TODO(lmeyer): determine how to check usage for overlay2
+
+ return {"changed": changed}
+
+ def _check_dm_usage(self, driver_status, task_vars):
+ """
+ Backing assumptions: We expect devicemapper to be backed by an auto-expanding thin pool
+ implemented as an LV in an LVM2 VG. This is how docker-storage-setup currently configures
+ devicemapper storage. The LV is "thin" because it does not use all available storage
+ from its VG, instead expanding as needed; so to determine available space, we gather
+ current usage as the Docker API reports for the driver as well as space available for
+ expansion in the pool's VG.
+ Usage within the LV is divided into pools allocated to data and metadata, either of which
+ could run out of space first; so we check both.
+ """
+ vals = dict(
+ vg_free=self._get_vg_free(driver_status.get("Pool Name"), task_vars),
+ data_used=driver_status.get("Data Space Used"),
+ data_total=driver_status.get("Data Space Total"),
+ metadata_used=driver_status.get("Metadata Space Used"),
+ metadata_total=driver_status.get("Metadata Space Total"),
+ )
+
+ # convert all human-readable strings to bytes
+ for key, value in vals.copy().items():
+ try:
+ vals[key + "_bytes"] = self._convert_to_bytes(value)
+ except ValueError as err: # unlikely to hit this from API info, but just to be safe
+ return {
+ "failed": True,
+ "values": vals,
+ "msg": "Could not interpret {} value '{}' as bytes: {}".format(key, value, str(err))
+ }
+
+ # determine the threshold percentages which usage should not exceed
+ for name, default in [("data", self.max_thinpool_data_usage_percent),
+ ("metadata", self.max_thinpool_meta_usage_percent)]:
+ percent = get_var(task_vars, "max_thinpool_" + name + "_usage_percent", default=default)
+ try:
+ vals[name + "_threshold"] = float(percent)
+ except ValueError:
+ return {
+ "failed": True,
+ "msg": "Specified thinpool {} usage limit '{}' is not a percentage".format(name, percent)
+ }
+
+ # test whether the thresholds are exceeded
+ messages = []
+ for name in ["data", "metadata"]:
+ vals[name + "_pct_used"] = 100 * vals[name + "_used_bytes"] / (
+ vals[name + "_total_bytes"] + vals["vg_free_bytes"])
+ if vals[name + "_pct_used"] > vals[name + "_threshold"]:
+ messages.append(
+ "Docker thinpool {name} usage percentage {pct:.1f} "
+ "is higher than threshold {thresh:.1f}.".format(
+ name=name,
+ pct=vals[name + "_pct_used"],
+ thresh=vals[name + "_threshold"],
+ ))
+ vals["failed"] = True
+
+ vals["msg"] = "\n".join(messages or ["Thinpool usage is within thresholds."])
+ return vals
+
+ def _get_vg_free(self, pool, task_vars):
+ # Determine which VG to examine according to the pool name, the only indicator currently
+ # available from the Docker API driver info. We assume a name that looks like
+ # "vg--name-docker--pool"; vg and lv names with inner hyphens doubled, joined by a hyphen.
+ match = re.match(r'((?:[^-]|--)+)-(?!-)', pool) # matches up to the first single hyphen
+ if not match: # unlikely, but... be clear if we assumed wrong
+ raise OpenShiftCheckException(
+ "This host's Docker reports it is using a storage pool named '{}'.\n"
+ "However this name does not have the expected format of 'vgname-lvname'\n"
+ "so the available storage in the VG cannot be determined.".format(pool)
+ )
+ vg_name = match.groups()[0].replace("--", "-")
+ vgs_cmd = "/sbin/vgs --noheadings -o vg_free --select vg_name=" + vg_name
+ # should return free space like " 12.00g" if the VG exists; empty if it does not
+
+ ret = self.execute_module("command", {"_raw_params": vgs_cmd}, task_vars)
+ if ret.get("failed") or ret.get("rc", 0) != 0:
+ raise OpenShiftCheckException(
+ "Is LVM installed? Failed to run /sbin/vgs "
+ "to determine docker storage usage:\n" + ret.get("msg", "")
+ )
+ size = ret.get("stdout", "").strip()
+ if not size:
+ raise OpenShiftCheckException(
+ "This host's Docker reports it is using a storage pool named '{pool}'.\n"
+ "which we expect to come from local VG '{vg}'.\n"
+ "However, /sbin/vgs did not find this VG. Is Docker for this host"
+ "running and using the storage on the host?".format(pool=pool, vg=vg_name)
+ )
+ return size
+
+ @staticmethod
+ def _convert_to_bytes(string):
+ units = dict(
+ b=1,
+ k=1024,
+ m=1024**2,
+ g=1024**3,
+ t=1024**4,
+ p=1024**5,
+ )
+ string = string or ""
+ match = re.match(r'(\d+(?:\.\d+)?)\s*(\w)?', string) # float followed by optional unit
+ if not match:
+ raise ValueError("Cannot convert to a byte size: " + string)
+
+ number, unit = match.groups()
+ multiplier = 1 if not unit else units.get(unit.lower())
+ if not multiplier:
+ raise ValueError("Cannot convert to a byte size: " + string)
+
+ return float(number) * multiplier
diff --git a/roles/openshift_health_checker/openshift_checks/logging/__init__.py b/roles/openshift_health_checker/openshift_checks/logging/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/logging/__init__.py
diff --git a/roles/openshift_health_checker/openshift_checks/logging/curator.py b/roles/openshift_health_checker/openshift_checks/logging/curator.py
new file mode 100644
index 000000000..c9fc59896
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/logging/curator.py
@@ -0,0 +1,61 @@
+"""
+Module for performing checks on an Curator logging deployment
+"""
+
+from openshift_checks import get_var
+from openshift_checks.logging.logging import LoggingCheck
+
+
+class Curator(LoggingCheck):
+ """Module that checks an integrated logging Curator deployment"""
+
+ name = "curator"
+ tags = ["health", "logging"]
+
+ logging_namespace = None
+
+ def run(self, tmp, task_vars):
+ """Check various things and gather errors. Returns: result as hash"""
+
+ self.logging_namespace = get_var(task_vars, "openshift_logging_namespace", default="logging")
+ curator_pods, error = super(Curator, self).get_pods_for_component(
+ self.module_executor,
+ self.logging_namespace,
+ "curator",
+ task_vars
+ )
+ if error:
+ return {"failed": True, "changed": False, "msg": error}
+ check_error = self.check_curator(curator_pods)
+
+ if check_error:
+ msg = ("The following Curator deployment issue was found:"
+ "\n-------\n"
+ "{}".format(check_error))
+ return {"failed": True, "changed": False, "msg": msg}
+
+ # TODO(lmeyer): run it all again for the ops cluster
+ return {"failed": False, "changed": False, "msg": 'No problems found with Curator deployment.'}
+
+ def check_curator(self, pods):
+ """Check to see if curator is up and working. Returns: error string"""
+ if not pods:
+ return (
+ "There are no Curator pods for the logging stack,\n"
+ "so nothing will prune Elasticsearch indexes.\n"
+ "Is Curator correctly deployed?"
+ )
+
+ not_running = super(Curator, self).not_running_pods(pods)
+ if len(not_running) == len(pods):
+ return (
+ "The Curator pod is not currently in a running state,\n"
+ "so Elasticsearch indexes may increase without bound."
+ )
+ if len(pods) - len(not_running) > 1:
+ return (
+ "There is more than one Curator pod running. This should not normally happen.\n"
+ "Although this doesn't cause any problems, you may want to investigate."
+ )
+
+ return None
diff --git a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
new file mode 100644
index 000000000..01cb35b81
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
@@ -0,0 +1,217 @@
+"""
+Module for performing checks on an Elasticsearch logging deployment
+"""
+
+import json
+import re
+
+from openshift_checks import get_var
+from openshift_checks.logging.logging import LoggingCheck
+
+
+class Elasticsearch(LoggingCheck):
+ """Module that checks an integrated logging Elasticsearch deployment"""
+
+ name = "elasticsearch"
+ tags = ["health", "logging"]
+
+ logging_namespace = None
+
+ def run(self, tmp, task_vars):
+ """Check various things and gather errors. Returns: result as hash"""
+
+ self.logging_namespace = get_var(task_vars, "openshift_logging_namespace", default="logging")
+ es_pods, error = super(Elasticsearch, self).get_pods_for_component(
+ self.execute_module,
+ self.logging_namespace,
+ "es",
+ task_vars,
+ )
+ if error:
+ return {"failed": True, "changed": False, "msg": error}
+ check_error = self.check_elasticsearch(es_pods, task_vars)
+
+ if check_error:
+ msg = ("The following Elasticsearch deployment issue was found:"
+ "\n-------\n"
+ "{}".format(check_error))
+ return {"failed": True, "changed": False, "msg": msg}
+
+ # TODO(lmeyer): run it all again for the ops cluster
+ return {"failed": False, "changed": False, "msg": 'No problems found with Elasticsearch deployment.'}
+
+ def _not_running_elasticsearch_pods(self, es_pods):
+ """Returns: list of running pods, list of errors about non-running pods"""
+ not_running = super(Elasticsearch, self).not_running_pods(es_pods)
+ if not_running:
+ return not_running, [(
+ 'The following Elasticsearch pods are not running:\n'
+ '{pods}'
+ 'These pods will not aggregate logs from their nodes.'
+ ).format(pods=''.join(
+ " {} ({})\n".format(pod['metadata']['name'], pod['spec'].get('host', 'None'))
+ for pod in not_running
+ ))]
+ return not_running, []
+
+ def check_elasticsearch(self, es_pods, task_vars):
+ """Various checks for elasticsearch. Returns: error string"""
+ not_running_pods, error_msgs = self._not_running_elasticsearch_pods(es_pods)
+ running_pods = [pod for pod in es_pods if pod not in not_running_pods]
+ pods_by_name = {
+ pod['metadata']['name']: pod for pod in running_pods
+ # Filter out pods that are not members of a DC
+ if pod['metadata'].get('labels', {}).get('deploymentconfig')
+ }
+ if not pods_by_name:
+ return 'No logging Elasticsearch pods were found. Is logging deployed?'
+ error_msgs += self._check_elasticsearch_masters(pods_by_name, task_vars)
+ error_msgs += self._check_elasticsearch_node_list(pods_by_name, task_vars)
+ error_msgs += self._check_es_cluster_health(pods_by_name, task_vars)
+ error_msgs += self._check_elasticsearch_diskspace(pods_by_name, task_vars)
+ return '\n'.join(error_msgs)
+
+ @staticmethod
+ def _build_es_curl_cmd(pod_name, url):
+ base = "exec {name} -- curl -s --cert {base}cert --key {base}key --cacert {base}ca -XGET '{url}'"
+ return base.format(base="/etc/elasticsearch/secret/admin-", name=pod_name, url=url)
+
+ def _check_elasticsearch_masters(self, pods_by_name, task_vars):
+ """Check that Elasticsearch masters are sane. Returns: list of error strings"""
+ es_master_names = set()
+ error_msgs = []
+ for pod_name in pods_by_name.keys():
+ # Compare what each ES node reports as master and compare for split brain
+ get_master_cmd = self._build_es_curl_cmd(pod_name, "https://localhost:9200/_cat/master")
+ master_name_str = self._exec_oc(get_master_cmd, [], task_vars)
+ master_names = (master_name_str or '').split(' ')
+ if len(master_names) > 1:
+ es_master_names.add(master_names[1])
+ else:
+ error_msgs.append(
+ 'No master? Elasticsearch {pod} returned bad string when asked master name:\n'
+ ' {response}'.format(pod=pod_name, response=master_name_str)
+ )
+
+ if not es_master_names:
+ error_msgs.append('No logging Elasticsearch masters were found. Is logging deployed?')
+ return '\n'.join(error_msgs)
+
+ if len(es_master_names) > 1:
+ error_msgs.append(
+ 'Found multiple Elasticsearch masters according to the pods:\n'
+ '{master_list}\n'
+ 'This implies that the masters have "split brain" and are not correctly\n'
+ 'replicating data for the logging cluster. Log loss is likely to occur.'
+ .format(master_list='\n'.join(' ' + master for master in es_master_names))
+ )
+
+ return error_msgs
+
+ def _check_elasticsearch_node_list(self, pods_by_name, task_vars):
+ """Check that reported ES masters are accounted for by pods. Returns: list of error strings"""
+
+ if not pods_by_name:
+ return ['No logging Elasticsearch masters were found. Is logging deployed?']
+
+ # get ES cluster nodes
+ node_cmd = self._build_es_curl_cmd(list(pods_by_name.keys())[0], 'https://localhost:9200/_nodes')
+ cluster_node_data = self._exec_oc(node_cmd, [], task_vars)
+ try:
+ cluster_nodes = json.loads(cluster_node_data)['nodes']
+ except (ValueError, KeyError):
+ return [
+ 'Failed to query Elasticsearch for the list of ES nodes. The output was:\n' +
+ cluster_node_data
+ ]
+
+ # Try to match all ES-reported node hosts to known pods.
+ error_msgs = []
+ for node in cluster_nodes.values():
+ # Note that with 1.4/3.4 the pod IP may be used as the master name
+ if not any(node['host'] in (pod_name, pod['status'].get('podIP'))
+ for pod_name, pod in pods_by_name.items()):
+ error_msgs.append(
+ 'The Elasticsearch cluster reports a member node "{node}"\n'
+ 'that does not correspond to any known ES pod.'.format(node=node['host'])
+ )
+
+ return error_msgs
+
+ def _check_es_cluster_health(self, pods_by_name, task_vars):
+ """Exec into the elasticsearch pods and check the cluster health. Returns: list of errors"""
+ error_msgs = []
+ for pod_name in pods_by_name.keys():
+ cluster_health_cmd = self._build_es_curl_cmd(pod_name, 'https://localhost:9200/_cluster/health?pretty=true')
+ cluster_health_data = self._exec_oc(cluster_health_cmd, [], task_vars)
+ try:
+ health_res = json.loads(cluster_health_data)
+ if not health_res or not health_res.get('status'):
+ raise ValueError()
+ except ValueError:
+ error_msgs.append(
+ 'Could not retrieve cluster health status from logging ES pod "{pod}".\n'
+ 'Response was:\n{output}'.format(pod=pod_name, output=cluster_health_data)
+ )
+ continue
+
+ if health_res['status'] not in ['green', 'yellow']:
+ error_msgs.append(
+ 'Elasticsearch cluster health status is RED according to pod "{}"'.format(pod_name)
+ )
+
+ return error_msgs
+
+ def _check_elasticsearch_diskspace(self, pods_by_name, task_vars):
+ """
+ Exec into an ES pod and query the diskspace on the persistent volume.
+ Returns: list of errors
+ """
+ error_msgs = []
+ for pod_name in pods_by_name.keys():
+ df_cmd = 'exec {} -- df --output=ipcent,pcent /elasticsearch/persistent'.format(pod_name)
+ disk_output = self._exec_oc(df_cmd, [], task_vars)
+ lines = disk_output.splitlines()
+ # expecting one header looking like 'IUse% Use%' and one body line
+ body_re = r'\s*(\d+)%?\s+(\d+)%?\s*$'
+ if len(lines) != 2 or len(lines[0].split()) != 2 or not re.match(body_re, lines[1]):
+ error_msgs.append(
+ 'Could not retrieve storage usage from logging ES pod "{pod}".\n'
+ 'Response to `df` command was:\n{output}'.format(pod=pod_name, output=disk_output)
+ )
+ continue
+ inode_pct, disk_pct = re.match(body_re, lines[1]).groups()
+
+ inode_pct_thresh = get_var(task_vars, 'openshift_check_efk_es_inode_pct', default='90')
+ if int(inode_pct) >= int(inode_pct_thresh):
+ error_msgs.append(
+ 'Inode percent usage on the storage volume for logging ES pod "{pod}"\n'
+ ' is {pct}, greater than threshold {limit}.\n'
+ ' Note: threshold can be specified in inventory with {param}'.format(
+ pod=pod_name,
+ pct=str(inode_pct),
+ limit=str(inode_pct_thresh),
+ param='openshift_check_efk_es_inode_pct',
+ ))
+ disk_pct_thresh = get_var(task_vars, 'openshift_check_efk_es_storage_pct', default='80')
+ if int(disk_pct) >= int(disk_pct_thresh):
+ error_msgs.append(
+ 'Disk percent usage on the storage volume for logging ES pod "{pod}"\n'
+ ' is {pct}, greater than threshold {limit}.\n'
+ ' Note: threshold can be specified in inventory with {param}'.format(
+ pod=pod_name,
+ pct=str(disk_pct),
+ limit=str(disk_pct_thresh),
+ param='openshift_check_efk_es_storage_pct',
+ ))
+
+ return error_msgs
+
+ def _exec_oc(self, cmd_str, extra_args, task_vars):
+ return super(Elasticsearch, self).exec_oc(
+ self.execute_module,
+ self.logging_namespace,
+ cmd_str,
+ extra_args,
+ task_vars,
+ )
diff --git a/roles/openshift_health_checker/openshift_checks/logging/fluentd.py b/roles/openshift_health_checker/openshift_checks/logging/fluentd.py
new file mode 100644
index 000000000..627567293
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/logging/fluentd.py
@@ -0,0 +1,170 @@
+"""
+Module for performing checks on an Fluentd logging deployment
+"""
+
+import json
+
+from openshift_checks import get_var
+from openshift_checks.logging.logging import LoggingCheck
+
+
+class Fluentd(LoggingCheck):
+ """Module that checks an integrated logging Fluentd deployment"""
+ name = "fluentd"
+ tags = ["health", "logging"]
+
+ logging_namespace = None
+
+ def run(self, tmp, task_vars):
+ """Check various things and gather errors. Returns: result as hash"""
+
+ self.logging_namespace = get_var(task_vars, "openshift_logging_namespace", default="logging")
+ fluentd_pods, error = super(Fluentd, self).get_pods_for_component(
+ self.execute_module,
+ self.logging_namespace,
+ "fluentd",
+ task_vars,
+ )
+ if error:
+ return {"failed": True, "changed": False, "msg": error}
+ check_error = self.check_fluentd(fluentd_pods, task_vars)
+
+ if check_error:
+ msg = ("The following Fluentd deployment issue was found:"
+ "\n-------\n"
+ "{}".format(check_error))
+ return {"failed": True, "changed": False, "msg": msg}
+
+ # TODO(lmeyer): run it all again for the ops cluster
+ return {"failed": False, "changed": False, "msg": 'No problems found with Fluentd deployment.'}
+
+ @staticmethod
+ def _filter_fluentd_labeled_nodes(nodes_by_name, node_selector):
+ """Filter to all nodes with fluentd label. Returns dict(name: node), error string"""
+ label, value = node_selector.split('=', 1)
+ fluentd_nodes = {
+ name: node for name, node in nodes_by_name.items()
+ if node['metadata']['labels'].get(label) == value
+ }
+ if not fluentd_nodes:
+ return None, (
+ 'There are no nodes with the fluentd label {label}.\n'
+ 'This means no logs will be aggregated from the nodes.'
+ ).format(label=node_selector)
+ return fluentd_nodes, None
+
+ @staticmethod
+ def _check_node_labeling(nodes_by_name, fluentd_nodes, node_selector, task_vars):
+ """Note if nodes are not labeled as expected. Returns: error string"""
+ intended_nodes = get_var(task_vars, 'openshift_logging_fluentd_hosts', default=['--all'])
+ if not intended_nodes or '--all' in intended_nodes:
+ intended_nodes = nodes_by_name.keys()
+ nodes_missing_labels = set(intended_nodes) - set(fluentd_nodes.keys())
+ if nodes_missing_labels:
+ return (
+ 'The following nodes are supposed to be labeled with {label} but are not:\n'
+ ' {nodes}\n'
+ 'Fluentd will not aggregate logs from these nodes.'
+ ).format(label=node_selector, nodes=', '.join(nodes_missing_labels))
+ return None
+
+ @staticmethod
+ def _check_nodes_have_fluentd(pods, fluentd_nodes):
+ """Make sure fluentd is on all the labeled nodes. Returns: error string"""
+ unmatched_nodes = fluentd_nodes.copy()
+ node_names_by_label = {
+ node['metadata']['labels']['kubernetes.io/hostname']: name
+ for name, node in fluentd_nodes.items()
+ }
+ node_names_by_internal_ip = {
+ address['address']: name
+ for name, node in fluentd_nodes.items()
+ for address in node['status']['addresses']
+ if address['type'] == "InternalIP"
+ }
+ for pod in pods:
+ for name in [
+ pod['spec']['nodeName'],
+ node_names_by_internal_ip.get(pod['spec']['nodeName']),
+ node_names_by_label.get(pod.get('spec', {}).get('host')),
+ ]:
+ unmatched_nodes.pop(name, None)
+ if unmatched_nodes:
+ return (
+ 'The following nodes are supposed to have a Fluentd pod but do not:\n'
+ '{nodes}'
+ 'These nodes will not have their logs aggregated.'
+ ).format(nodes=''.join(
+ " {}\n".format(name)
+ for name in unmatched_nodes.keys()
+ ))
+ return None
+
+ def _check_fluentd_pods_running(self, pods):
+ """Make sure all fluentd pods are running. Returns: error string"""
+ not_running = super(Fluentd, self).not_running_pods(pods)
+ if not_running:
+ return (
+ 'The following Fluentd pods are supposed to be running but are not:\n'
+ '{pods}'
+ 'These pods will not aggregate logs from their nodes.'
+ ).format(pods=''.join(
+ " {} ({})\n".format(pod['metadata']['name'], pod['spec'].get('host', 'None'))
+ for pod in not_running
+ ))
+ return None
+
+ def check_fluentd(self, pods, task_vars):
+ """Verify fluentd is running everywhere. Returns: error string"""
+
+ node_selector = get_var(task_vars, 'openshift_logging_fluentd_nodeselector',
+ default='logging-infra-fluentd=true')
+
+ nodes_by_name, error = self.get_nodes_by_name(task_vars)
+
+ if error:
+ return error
+ fluentd_nodes, error = self._filter_fluentd_labeled_nodes(nodes_by_name, node_selector)
+ if error:
+ return error
+
+ error_msgs = []
+ error = self._check_node_labeling(nodes_by_name, fluentd_nodes, node_selector, task_vars)
+ if error:
+ error_msgs.append(error)
+ error = self._check_nodes_have_fluentd(pods, fluentd_nodes)
+ if error:
+ error_msgs.append(error)
+ error = self._check_fluentd_pods_running(pods)
+ if error:
+ error_msgs.append(error)
+
+ # Make sure there are no extra fluentd pods
+ if len(pods) > len(fluentd_nodes):
+ error_msgs.append(
+ 'There are more Fluentd pods running than nodes labeled.\n'
+ 'This may not cause problems with logging but it likely indicates something wrong.'
+ )
+
+ return '\n'.join(error_msgs)
+
+ def get_nodes_by_name(self, task_vars):
+ """Retrieve all the node definitions. Returns: dict(name: node), error string"""
+ nodes_json = self._exec_oc("get nodes -o json", [], task_vars)
+ try:
+ nodes = json.loads(nodes_json)
+ except ValueError: # no valid json - should not happen
+ return None, "Could not obtain a list of nodes to validate fluentd. Output from oc get:\n" + nodes_json
+ if not nodes or not nodes.get('items'): # also should not happen
+ return None, "No nodes appear to be defined according to the API."
+ return {
+ node['metadata']['name']: node
+ for node in nodes['items']
+ }, None
+
+ def _exec_oc(self, cmd_str, extra_args, task_vars):
+ return super(Fluentd, self).exec_oc(self.execute_module,
+ self.logging_namespace,
+ cmd_str,
+ extra_args,
+ task_vars)
diff --git a/roles/openshift_health_checker/openshift_checks/logging/kibana.py b/roles/openshift_health_checker/openshift_checks/logging/kibana.py
new file mode 100644
index 000000000..442f407b1
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/logging/kibana.py
@@ -0,0 +1,229 @@
+"""
+Module for performing checks on a Kibana logging deployment
+"""
+
+import json
+import ssl
+
+try:
+ from urllib2 import HTTPError, URLError
+ import urllib2
+except ImportError:
+ from urllib.error import HTTPError, URLError
+ import urllib.request as urllib2
+
+from openshift_checks import get_var
+from openshift_checks.logging.logging import LoggingCheck
+
+
+class Kibana(LoggingCheck):
+ """Module that checks an integrated logging Kibana deployment"""
+
+ name = "kibana"
+ tags = ["health", "logging"]
+
+ logging_namespace = None
+
+ def run(self, tmp, task_vars):
+ """Check various things and gather errors. Returns: result as hash"""
+
+ self.logging_namespace = get_var(task_vars, "openshift_logging_namespace", default="logging")
+ kibana_pods, error = super(Kibana, self).get_pods_for_component(
+ self.execute_module,
+ self.logging_namespace,
+ "kibana",
+ task_vars,
+ )
+ if error:
+ return {"failed": True, "changed": False, "msg": error}
+ check_error = self.check_kibana(kibana_pods)
+
+ if not check_error:
+ check_error = self._check_kibana_route(task_vars)
+
+ if check_error:
+ msg = ("The following Kibana deployment issue was found:"
+ "\n-------\n"
+ "{}".format(check_error))
+ return {"failed": True, "changed": False, "msg": msg}
+
+ # TODO(lmeyer): run it all again for the ops cluster
+ return {"failed": False, "changed": False, "msg": 'No problems found with Kibana deployment.'}
+
+ def _verify_url_internal(self, url, task_vars):
+ """
+ Try to reach a URL from the host.
+ Returns: success (bool), reason (for failure)
+ """
+ args = dict(
+ url=url,
+ follow_redirects='none',
+ validate_certs='no', # likely to be signed with internal CA
+ # TODO(lmeyer): give users option to validate certs
+ status_code=302,
+ )
+ result = self.execute_module('uri', args, task_vars)
+ if result.get('failed'):
+ return result['msg']
+ return None
+
+ @staticmethod
+ def _verify_url_external(url):
+ """
+ Try to reach a URL from ansible control host.
+ Returns: success (bool), reason (for failure)
+ """
+ # This actually checks from the ansible control host, which may or may not
+ # really be "external" to the cluster.
+
+ # Disable SSL cert validation to work around internally signed certs
+ ctx = ssl.create_default_context()
+ ctx.check_hostname = False # or setting CERT_NONE is refused
+ ctx.verify_mode = ssl.CERT_NONE
+
+ # Verify that the url is returning a valid response
+ try:
+ # We only care if the url connects and responds
+ return_code = urllib2.urlopen(url, context=ctx).getcode()
+ except HTTPError as httperr:
+ return httperr.reason
+ except URLError as urlerr:
+ return str(urlerr)
+
+ # there appears to be no way to prevent urlopen from following redirects
+ if return_code != 200:
+ return 'Expected success (200) but got return code {}'.format(int(return_code))
+
+ return None
+
+ def check_kibana(self, pods):
+ """Check to see if Kibana is up and working. Returns: error string."""
+
+ if not pods:
+ return "There are no Kibana pods deployed, so no access to the logging UI."
+
+ not_running = self.not_running_pods(pods)
+ if len(not_running) == len(pods):
+ return "No Kibana pod is in a running state, so there is no access to the logging UI."
+ elif not_running:
+ return (
+ "The following Kibana pods are not currently in a running state:\n"
+ "{pods}"
+ "However at least one is, so service may not be impacted."
+ ).format(pods="".join(" " + pod['metadata']['name'] + "\n" for pod in not_running))
+
+ return None
+
+ def _get_kibana_url(self, task_vars):
+ """
+ Get kibana route or report error.
+ Returns: url (or empty), reason for failure
+ """
+
+ # Get logging url
+ get_route = self._exec_oc("get route logging-kibana -o json", [], task_vars)
+ if not get_route:
+ return None, 'no_route_exists'
+
+ route = json.loads(get_route)
+
+ # check that the route has been accepted by a router
+ ingress = route["status"]["ingress"]
+ # ingress can be null if there is no router, or empty if not routed
+ if not ingress or not ingress[0]:
+ return None, 'route_not_accepted'
+
+ host = route.get("spec", {}).get("host")
+ if not host:
+ return None, 'route_missing_host'
+
+ return 'https://{}/'.format(host), None
+
+ def _check_kibana_route(self, task_vars):
+ """
+ Check to see if kibana route is up and working.
+ Returns: error string
+ """
+ known_errors = dict(
+ no_route_exists=(
+ 'No route is defined for Kibana in the logging namespace,\n'
+ 'so the logging stack is not accessible. Is logging deployed?\n'
+ 'Did something remove the logging-kibana route?'
+ ),
+ route_not_accepted=(
+ 'The logging-kibana route is not being routed by any router.\n'
+ 'Is the router deployed and working?'
+ ),
+ route_missing_host=(
+ 'The logging-kibana route has no hostname defined,\n'
+ 'which should never happen. Did something alter its definition?'
+ ),
+ )
+
+ kibana_url, error = self._get_kibana_url(task_vars)
+ if not kibana_url:
+ return known_errors.get(error, error)
+
+ # first, check that kibana is reachable from the master.
+ error = self._verify_url_internal(kibana_url, task_vars)
+ if error:
+ if 'urlopen error [Errno 111] Connection refused' in error:
+ error = (
+ 'Failed to connect from this master to Kibana URL {url}\n'
+ 'Is kibana running, and is at least one router routing to it?'
+ ).format(url=kibana_url)
+ elif 'urlopen error [Errno -2] Name or service not known' in error:
+ error = (
+ 'Failed to connect from this master to Kibana URL {url}\n'
+ 'because the hostname does not resolve.\n'
+ 'Is DNS configured for the Kibana hostname?'
+ ).format(url=kibana_url)
+ elif 'Status code was not' in error:
+ error = (
+ 'A request from this master to the Kibana URL {url}\n'
+ 'did not return the correct status code (302).\n'
+ 'This could mean that Kibana is malfunctioning, the hostname is\n'
+ 'resolving incorrectly, or other network issues. The output was:\n'
+ ' {error}'
+ ).format(url=kibana_url, error=error)
+ return 'Error validating the logging Kibana route:\n' + error
+
+ # in production we would like the kibana route to work from outside the
+ # cluster too; but that may not be the case, so allow disabling just this part.
+ if not get_var(task_vars, "openshift_check_efk_kibana_external", default=True):
+ return None
+ error = self._verify_url_external(kibana_url)
+ if error:
+ if 'urlopen error [Errno 111] Connection refused' in error:
+ error = (
+ 'Failed to connect from the Ansible control host to Kibana URL {url}\n'
+ 'Is the router for the Kibana hostname exposed externally?'
+ ).format(url=kibana_url)
+ elif 'urlopen error [Errno -2] Name or service not known' in error:
+ error = (
+ 'Failed to resolve the Kibana hostname in {url}\n'
+ 'from the Ansible control host.\n'
+ 'Is DNS configured to resolve this Kibana hostname externally?'
+ ).format(url=kibana_url)
+ elif 'Expected success (200)' in error:
+ error = (
+ 'A request to Kibana at {url}\n'
+ 'returned the wrong error code:\n'
+ ' {error}\n'
+ 'This could mean that Kibana is malfunctioning, the hostname is\n'
+ 'resolving incorrectly, or other network issues.'
+ ).format(url=kibana_url, error=error)
+ error = (
+ 'Error validating the logging Kibana route:\n{error}\n'
+ 'To disable external Kibana route validation, set in your inventory:\n'
+ ' openshift_check_efk_kibana_external=False'
+ ).format(error=error)
+ return error
+ return None
+
+ def _exec_oc(self, cmd_str, extra_args, task_vars):
+ return super(Kibana, self).exec_oc(self.execute_module,
+ self.logging_namespace,
+ cmd_str,
+ extra_args,
+ task_vars)
diff --git a/roles/openshift_health_checker/openshift_checks/logging/logging.py b/roles/openshift_health_checker/openshift_checks/logging/logging.py
new file mode 100644
index 000000000..05b4d300c
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/logging/logging.py
@@ -0,0 +1,96 @@
+"""
+Util functions for performing checks on an Elasticsearch, Fluentd, and Kibana stack
+"""
+
+import json
+import os
+
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
+
+
+class LoggingCheck(OpenShiftCheck):
+ """Base class for logging component checks"""
+
+ name = "logging"
+
+ @classmethod
+ def is_active(cls, task_vars):
+ return super(LoggingCheck, cls).is_active(task_vars) and cls.is_first_master(task_vars)
+
+ @staticmethod
+ def is_first_master(task_vars):
+ """Run only on first master and only when logging is configured. Returns: bool"""
+ logging_deployed = get_var(task_vars, "openshift_hosted_logging_deploy", default=True)
+ # Note: It would be nice to use membership in oo_first_master group, however for now it
+ # seems best to avoid requiring that setup and just check this is the first master.
+ hostname = get_var(task_vars, "ansible_ssh_host") or [None]
+ masters = get_var(task_vars, "groups", "masters", default=None) or [None]
+ return logging_deployed and masters[0] == hostname
+
+ def run(self, tmp, task_vars):
+ pass
+
+ def get_pods_for_component(self, execute_module, namespace, logging_component, task_vars):
+ """Get all pods for a given component. Returns: list of pods for component, error string"""
+ pod_output = self.exec_oc(
+ execute_module,
+ namespace,
+ "get pods -l component={} -o json".format(logging_component),
+ [],
+ task_vars
+ )
+ try:
+ pods = json.loads(pod_output)
+ if not pods or not pods.get('items'):
+ raise ValueError()
+ except ValueError:
+ # successful run but non-parsing data generally means there were no pods in the namespace
+ return None, 'There are no pods in the {} namespace. Is logging deployed?'.format(namespace)
+
+ return pods['items'], None
+
+ @staticmethod
+ def not_running_pods(pods):
+ """Returns: list of pods not in a ready and running state"""
+ return [
+ pod for pod in pods
+ if any(
+ container['ready'] is False
+ for container in pod['status']['containerStatuses']
+ ) or not any(
+ condition['type'] == 'Ready' and condition['status'] == 'True'
+ for condition in pod['status']['conditions']
+ )
+ ]
+
+ @staticmethod
+ def exec_oc(execute_module=None, namespace="logging", cmd_str="", extra_args=None, task_vars=None):
+ """
+ Execute an 'oc' command in the remote host.
+ Returns: output of command and namespace,
+ or raises OpenShiftCheckException on error
+ """
+ config_base = get_var(task_vars, "openshift", "common", "config_base")
+ args = {
+ "namespace": namespace,
+ "config_file": os.path.join(config_base, "master", "admin.kubeconfig"),
+ "cmd": cmd_str,
+ "extra_args": list(extra_args) if extra_args else [],
+ }
+
+ result = execute_module("ocutil", args, task_vars)
+ if result.get("failed"):
+ msg = (
+ 'Unexpected error using `oc` to validate the logging stack components.\n'
+ 'Error executing `oc {cmd}`:\n'
+ '{error}'
+ ).format(cmd=args['cmd'], error=result['result'])
+
+ if result['result'] == '[Errno 2] No such file or directory':
+ msg = (
+ "This host is supposed to be a master but does not have the `oc` command where expected.\n"
+ "Has an installation been run on this host yet?"
+ )
+ raise OpenShiftCheckException(msg)
+
+ return result.get("result", "")
diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py
index 20d160eaf..7f3d78cc4 100644
--- a/roles/openshift_health_checker/openshift_checks/mixins.py
+++ b/roles/openshift_health_checker/openshift_checks/mixins.py
@@ -1,4 +1,3 @@
-# pylint: disable=missing-docstring,too-few-public-methods
"""
Mixin classes meant to be used with subclasses of OpenShiftCheck.
"""
@@ -8,8 +7,49 @@ from openshift_checks import get_var
class NotContainerizedMixin(object):
"""Mixin for checks that are only active when not in containerized mode."""
+ # permanent # pylint: disable=too-few-public-methods
+ # Reason: The mixin is not intended to stand on its own as a class.
@classmethod
def is_active(cls, task_vars):
+ """Only run on non-containerized hosts."""
is_containerized = get_var(task_vars, "openshift", "common", "is_containerized")
return super(NotContainerizedMixin, cls).is_active(task_vars) and not is_containerized
+
+
+class DockerHostMixin(object):
+ """Mixin for checks that are only active on hosts that require Docker."""
+
+ dependencies = []
+
+ @classmethod
+ def is_active(cls, task_vars):
+ """Only run on hosts that depend on Docker."""
+ is_containerized = get_var(task_vars, "openshift", "common", "is_containerized")
+ is_node = "nodes" in get_var(task_vars, "group_names", default=[])
+ return super(DockerHostMixin, cls).is_active(task_vars) and (is_containerized or is_node)
+
+ def ensure_dependencies(self, task_vars):
+ """
+ Ensure that docker-related packages exist, but not on atomic hosts
+ (which would not be able to install but should already have them).
+ Returns: msg, failed, changed
+ """
+ if get_var(task_vars, "openshift", "common", "is_atomic"):
+ return "", False, False
+
+ # NOTE: we would use the "package" module but it's actually an action plugin
+ # and it's not clear how to invoke one of those. This is about the same anyway:
+ pkg_manager = get_var(task_vars, "ansible_pkg_mgr", default="yum")
+ result = self.module_executor(pkg_manager, {"name": self.dependencies, "state": "present"}, task_vars)
+ msg = result.get("msg", "")
+ if result.get("failed"):
+ if "No package matching" in msg:
+ msg = "Ensure that all required dependencies can be installed via `yum`.\n"
+ msg = (
+ "Unable to install required packages on this host:\n"
+ " {deps}\n{msg}"
+ ).format(deps=',\n '.join(self.dependencies), msg=msg)
+ failed = result.get("failed", False) or result.get("rc", 0) != 0
+ changed = result.get("changed", False)
+ return msg, failed, changed
diff --git a/roles/openshift_health_checker/test/curator_test.py b/roles/openshift_health_checker/test/curator_test.py
new file mode 100644
index 000000000..ae108c96e
--- /dev/null
+++ b/roles/openshift_health_checker/test/curator_test.py
@@ -0,0 +1,68 @@
+import pytest
+
+from openshift_checks.logging.curator import Curator
+
+
+def canned_curator(exec_oc=None):
+ """Create a Curator check object with canned exec_oc method"""
+ check = Curator("dummy") # fails if a module is actually invoked
+ if exec_oc:
+ check._exec_oc = exec_oc
+ return check
+
+
+def assert_error(error, expect_error):
+ if expect_error:
+ assert error
+ assert expect_error in error
+ else:
+ assert not error
+
+
+plain_curator_pod = {
+ "metadata": {
+ "labels": {"component": "curator", "deploymentconfig": "logging-curator"},
+ "name": "logging-curator-1",
+ },
+ "status": {
+ "containerStatuses": [{"ready": True}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ "podIP": "10.10.10.10",
+ }
+}
+
+not_running_curator_pod = {
+ "metadata": {
+ "labels": {"component": "curator", "deploymentconfig": "logging-curator"},
+ "name": "logging-curator-2",
+ },
+ "status": {
+ "containerStatuses": [{"ready": False}],
+ "conditions": [{"status": "False", "type": "Ready"}],
+ "podIP": "10.10.10.10",
+ }
+}
+
+
+@pytest.mark.parametrize('pods, expect_error', [
+ (
+ [],
+ "no Curator pods",
+ ),
+ (
+ [plain_curator_pod],
+ None,
+ ),
+ (
+ [not_running_curator_pod],
+ "not currently in a running state",
+ ),
+ (
+ [plain_curator_pod, plain_curator_pod],
+ "more than one Curator pod",
+ ),
+])
+def test_get_curator_pods(pods, expect_error):
+ check = canned_curator()
+ error = check.check_curator(pods)
+ assert_error(error, expect_error)
diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py
index 0379cafb5..197c65f51 100644
--- a/roles/openshift_health_checker/test/docker_image_availability_test.py
+++ b/roles/openshift_health_checker/test/docker_image_availability_test.py
@@ -3,19 +3,25 @@ import pytest
from openshift_checks.docker_image_availability import DockerImageAvailability
-@pytest.mark.parametrize('deployment_type,is_active', [
- ("origin", True),
- ("openshift-enterprise", True),
- ("enterprise", False),
- ("online", False),
- ("invalid", False),
- ("", False),
+@pytest.mark.parametrize('deployment_type, is_containerized, group_names, expect_active', [
+ ("origin", True, [], True),
+ ("openshift-enterprise", True, [], True),
+ ("enterprise", True, [], False),
+ ("online", True, [], False),
+ ("invalid", True, [], False),
+ ("", True, [], False),
+ ("origin", False, [], False),
+ ("openshift-enterprise", False, [], False),
+ ("origin", False, ["nodes", "masters"], True),
+ ("openshift-enterprise", False, ["etcd"], False),
])
-def test_is_active(deployment_type, is_active):
+def test_is_active(deployment_type, is_containerized, group_names, expect_active):
task_vars = dict(
+ openshift=dict(common=dict(is_containerized=is_containerized)),
openshift_deployment_type=deployment_type,
+ group_names=group_names,
)
- assert DockerImageAvailability.is_active(task_vars=task_vars) == is_active
+ assert DockerImageAvailability.is_active(task_vars=task_vars) == expect_active
@pytest.mark.parametrize("is_containerized,is_atomic", [
diff --git a/roles/openshift_health_checker/test/docker_storage_test.py b/roles/openshift_health_checker/test/docker_storage_test.py
new file mode 100644
index 000000000..292a323db
--- /dev/null
+++ b/roles/openshift_health_checker/test/docker_storage_test.py
@@ -0,0 +1,224 @@
+import pytest
+
+from openshift_checks import OpenShiftCheckException
+from openshift_checks.docker_storage import DockerStorage
+
+
+def dummy_check(execute_module=None):
+ def dummy_exec(self, status, task_vars):
+ raise Exception("dummy executor called")
+ return DockerStorage(execute_module=execute_module or dummy_exec)
+
+
+@pytest.mark.parametrize('is_containerized, group_names, is_active', [
+ (False, ["masters", "etcd"], False),
+ (False, ["masters", "nodes"], True),
+ (True, ["etcd"], True),
+])
+def test_is_active(is_containerized, group_names, is_active):
+ task_vars = dict(
+ openshift=dict(common=dict(is_containerized=is_containerized)),
+ group_names=group_names,
+ )
+ assert DockerStorage.is_active(task_vars=task_vars) == is_active
+
+
+non_atomic_task_vars = {"openshift": {"common": {"is_atomic": False}}}
+
+
+@pytest.mark.parametrize('docker_info, failed, expect_msg', [
+ (
+ dict(failed=True, msg="Error connecting: Error while fetching server API version"),
+ True,
+ ["Is docker running on this host?"],
+ ),
+ (
+ dict(msg="I have no info"),
+ True,
+ ["missing info"],
+ ),
+ (
+ dict(info={
+ "Driver": "devicemapper",
+ "DriverStatus": [("Pool Name", "docker-docker--pool")],
+ }),
+ False,
+ [],
+ ),
+ (
+ dict(info={
+ "Driver": "devicemapper",
+ "DriverStatus": [("Data loop file", "true")],
+ }),
+ True,
+ ["loopback devices with the Docker devicemapper storage driver"],
+ ),
+ (
+ dict(info={
+ "Driver": "overlay2",
+ "DriverStatus": []
+ }),
+ False,
+ [],
+ ),
+ (
+ dict(info={
+ "Driver": "overlay",
+ }),
+ True,
+ ["unsupported Docker storage driver"],
+ ),
+ (
+ dict(info={
+ "Driver": "unsupported",
+ }),
+ True,
+ ["unsupported Docker storage driver"],
+ ),
+])
+def test_check_storage_driver(docker_info, failed, expect_msg):
+ def execute_module(module_name, args, tmp=None, task_vars=None):
+ if module_name == "yum":
+ return {}
+ if module_name != "docker_info":
+ raise ValueError("not expecting module " + module_name)
+ return docker_info
+
+ check = dummy_check(execute_module=execute_module)
+ check._check_dm_usage = lambda status, task_vars: dict() # stub out for this test
+ result = check.run(tmp=None, task_vars=non_atomic_task_vars)
+
+ if failed:
+ assert result["failed"]
+ else:
+ assert not result.get("failed", False)
+
+ for word in expect_msg:
+ assert word in result["msg"]
+
+
+enough_space = {
+ "Pool Name": "docker--vg-docker--pool",
+ "Data Space Used": "19.92 MB",
+ "Data Space Total": "8.535 GB",
+ "Metadata Space Used": "40.96 kB",
+ "Metadata Space Total": "25.17 MB",
+}
+
+not_enough_space = {
+ "Pool Name": "docker--vg-docker--pool",
+ "Data Space Used": "10 GB",
+ "Data Space Total": "10 GB",
+ "Metadata Space Used": "42 kB",
+ "Metadata Space Total": "43 kB",
+}
+
+
+@pytest.mark.parametrize('task_vars, driver_status, vg_free, success, expect_msg', [
+ (
+ {"max_thinpool_data_usage_percent": "not a float"},
+ enough_space,
+ "12g",
+ False,
+ ["is not a percentage"],
+ ),
+ (
+ {},
+ {}, # empty values from driver status
+ "bogus", # also does not parse as bytes
+ False,
+ ["Could not interpret", "as bytes"],
+ ),
+ (
+ {},
+ enough_space,
+ "12.00g",
+ True,
+ [],
+ ),
+ (
+ {},
+ not_enough_space,
+ "0.00",
+ False,
+ ["data usage", "metadata usage", "higher than threshold"],
+ ),
+])
+def test_dm_usage(task_vars, driver_status, vg_free, success, expect_msg):
+ check = dummy_check()
+ check._get_vg_free = lambda pool, task_vars: vg_free
+ result = check._check_dm_usage(driver_status, task_vars)
+ result_success = not result.get("failed")
+
+ assert result_success is success
+ for msg in expect_msg:
+ assert msg in result["msg"]
+
+
+@pytest.mark.parametrize('pool, command_returns, raises, returns', [
+ (
+ "foo-bar",
+ { # vgs missing
+ "msg": "[Errno 2] No such file or directory",
+ "failed": True,
+ "cmd": "/sbin/vgs",
+ "rc": 2,
+ },
+ "Failed to run /sbin/vgs",
+ None,
+ ),
+ (
+ "foo", # no hyphen in name - should not happen
+ {},
+ "name does not have the expected format",
+ None,
+ ),
+ (
+ "foo-bar",
+ dict(stdout=" 4.00g\n"),
+ None,
+ "4.00g",
+ ),
+ (
+ "foo-bar",
+ dict(stdout="\n"), # no matching VG
+ "vgs did not find this VG",
+ None,
+ )
+])
+def test_vg_free(pool, command_returns, raises, returns):
+ def execute_module(module_name, args, tmp=None, task_vars=None):
+ if module_name != "command":
+ raise ValueError("not expecting module " + module_name)
+ return command_returns
+
+ check = dummy_check(execute_module=execute_module)
+ if raises:
+ with pytest.raises(OpenShiftCheckException) as err:
+ check._get_vg_free(pool, {})
+ assert raises in str(err.value)
+ else:
+ ret = check._get_vg_free(pool, {})
+ assert ret == returns
+
+
+@pytest.mark.parametrize('string, expect_bytes', [
+ ("12", 12.0),
+ ("12 k", 12.0 * 1024),
+ ("42.42 MB", 42.42 * 1024**2),
+ ("12g", 12.0 * 1024**3),
+])
+def test_convert_to_bytes(string, expect_bytes):
+ got = DockerStorage._convert_to_bytes(string)
+ assert got == expect_bytes
+
+
+@pytest.mark.parametrize('string', [
+ "bork",
+ "42 Qs",
+])
+def test_convert_to_bytes_error(string):
+ with pytest.raises(ValueError) as err:
+ DockerStorage._convert_to_bytes(string)
+ assert "Cannot convert" in str(err.value)
+ assert string in str(err.value)
diff --git a/roles/openshift_health_checker/test/elasticsearch_test.py b/roles/openshift_health_checker/test/elasticsearch_test.py
new file mode 100644
index 000000000..b9d375d8c
--- /dev/null
+++ b/roles/openshift_health_checker/test/elasticsearch_test.py
@@ -0,0 +1,180 @@
+import pytest
+import json
+
+from openshift_checks.logging.elasticsearch import Elasticsearch
+
+task_vars_config_base = dict(openshift=dict(common=dict(config_base='/etc/origin')))
+
+
+def canned_elasticsearch(exec_oc=None):
+ """Create an Elasticsearch check object with canned exec_oc method"""
+ check = Elasticsearch("dummy") # fails if a module is actually invoked
+ if exec_oc:
+ check._exec_oc = exec_oc
+ return check
+
+
+def assert_error(error, expect_error):
+ if expect_error:
+ assert error
+ assert expect_error in error
+ else:
+ assert not error
+
+
+plain_es_pod = {
+ "metadata": {
+ "labels": {"component": "es", "deploymentconfig": "logging-es"},
+ "name": "logging-es",
+ },
+ "status": {
+ "conditions": [{"status": "True", "type": "Ready"}],
+ "containerStatuses": [{"ready": True}],
+ "podIP": "10.10.10.10",
+ },
+ "_test_master_name_str": "name logging-es",
+}
+
+split_es_pod = {
+ "metadata": {
+ "labels": {"component": "es", "deploymentconfig": "logging-es-2"},
+ "name": "logging-es-2",
+ },
+ "status": {
+ "conditions": [{"status": "True", "type": "Ready"}],
+ "containerStatuses": [{"ready": True}],
+ "podIP": "10.10.10.10",
+ },
+ "_test_master_name_str": "name logging-es-2",
+}
+
+
+def test_check_elasticsearch():
+ assert 'No logging Elasticsearch pods' in canned_elasticsearch().check_elasticsearch([], {})
+
+ # canned oc responses to match so all the checks pass
+ def _exec_oc(cmd, args, task_vars):
+ if '_cat/master' in cmd:
+ return 'name logging-es'
+ elif '/_nodes' in cmd:
+ return json.dumps(es_node_list)
+ elif '_cluster/health' in cmd:
+ return '{"status": "green"}'
+ elif ' df ' in cmd:
+ return 'IUse% Use%\n 3% 4%\n'
+ else:
+ raise Exception(cmd)
+
+ assert not canned_elasticsearch(_exec_oc).check_elasticsearch([plain_es_pod], {})
+
+
+def pods_by_name(pods):
+ return {pod['metadata']['name']: pod for pod in pods}
+
+
+@pytest.mark.parametrize('pods, expect_error', [
+ (
+ [],
+ 'No logging Elasticsearch masters',
+ ),
+ (
+ [plain_es_pod],
+ None,
+ ),
+ (
+ [plain_es_pod, split_es_pod],
+ 'Found multiple Elasticsearch masters',
+ ),
+])
+def test_check_elasticsearch_masters(pods, expect_error):
+ test_pods = list(pods)
+ check = canned_elasticsearch(lambda cmd, args, task_vars: test_pods.pop(0)['_test_master_name_str'])
+
+ errors = check._check_elasticsearch_masters(pods_by_name(pods), task_vars_config_base)
+ assert_error(''.join(errors), expect_error)
+
+
+es_node_list = {
+ 'nodes': {
+ 'random-es-name': {
+ 'host': 'logging-es',
+ }}}
+
+
+@pytest.mark.parametrize('pods, node_list, expect_error', [
+ (
+ [],
+ {},
+ 'No logging Elasticsearch masters',
+ ),
+ (
+ [plain_es_pod],
+ es_node_list,
+ None,
+ ),
+ (
+ [plain_es_pod],
+ {}, # empty list of nodes triggers KeyError
+ "Failed to query",
+ ),
+ (
+ [split_es_pod],
+ es_node_list,
+ 'does not correspond to any known ES pod',
+ ),
+])
+def test_check_elasticsearch_node_list(pods, node_list, expect_error):
+ check = canned_elasticsearch(lambda cmd, args, task_vars: json.dumps(node_list))
+
+ errors = check._check_elasticsearch_node_list(pods_by_name(pods), task_vars_config_base)
+ assert_error(''.join(errors), expect_error)
+
+
+@pytest.mark.parametrize('pods, health_data, expect_error', [
+ (
+ [plain_es_pod],
+ [{"status": "green"}],
+ None,
+ ),
+ (
+ [plain_es_pod],
+ [{"no-status": "should bomb"}],
+ 'Could not retrieve cluster health status',
+ ),
+ (
+ [plain_es_pod, split_es_pod],
+ [{"status": "green"}, {"status": "red"}],
+ 'Elasticsearch cluster health status is RED',
+ ),
+])
+def test_check_elasticsearch_cluster_health(pods, health_data, expect_error):
+ test_health_data = list(health_data)
+ check = canned_elasticsearch(lambda cmd, args, task_vars: json.dumps(test_health_data.pop(0)))
+
+ errors = check._check_es_cluster_health(pods_by_name(pods), task_vars_config_base)
+ assert_error(''.join(errors), expect_error)
+
+
+@pytest.mark.parametrize('disk_data, expect_error', [
+ (
+ 'df: /elasticsearch/persistent: No such file or directory\n',
+ 'Could not retrieve storage usage',
+ ),
+ (
+ 'IUse% Use%\n 3% 4%\n',
+ None,
+ ),
+ (
+ 'IUse% Use%\n 95% 40%\n',
+ 'Inode percent usage on the storage volume',
+ ),
+ (
+ 'IUse% Use%\n 3% 94%\n',
+ 'Disk percent usage on the storage volume',
+ ),
+])
+def test_check_elasticsearch_diskspace(disk_data, expect_error):
+ check = canned_elasticsearch(lambda cmd, args, task_vars: disk_data)
+
+ errors = check._check_elasticsearch_diskspace(pods_by_name([plain_es_pod]), task_vars_config_base)
+ assert_error(''.join(errors), expect_error)
diff --git a/roles/openshift_health_checker/test/fluentd_test.py b/roles/openshift_health_checker/test/fluentd_test.py
new file mode 100644
index 000000000..d151c0b19
--- /dev/null
+++ b/roles/openshift_health_checker/test/fluentd_test.py
@@ -0,0 +1,109 @@
+import pytest
+import json
+
+from openshift_checks.logging.fluentd import Fluentd
+
+
+def canned_fluentd(exec_oc=None):
+ """Create a Fluentd check object with canned exec_oc method"""
+ check = Fluentd("dummy") # fails if a module is actually invoked
+ if exec_oc:
+ check._exec_oc = exec_oc
+ return check
+
+
+def assert_error(error, expect_error):
+ if expect_error:
+ assert error
+ assert expect_error in error
+ else:
+ assert not error
+
+
+fluentd_pod_node1 = {
+ "metadata": {
+ "labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
+ "name": "logging-fluentd-1",
+ },
+ "spec": {"host": "node1", "nodeName": "node1"},
+ "status": {
+ "containerStatuses": [{"ready": True}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ }
+}
+fluentd_pod_node2_down = {
+ "metadata": {
+ "labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
+ "name": "logging-fluentd-2",
+ },
+ "spec": {"host": "node2", "nodeName": "node2"},
+ "status": {
+ "containerStatuses": [{"ready": False}],
+ "conditions": [{"status": "False", "type": "Ready"}],
+ }
+}
+fluentd_node1 = {
+ "metadata": {
+ "labels": {"logging-infra-fluentd": "true", "kubernetes.io/hostname": "node1"},
+ "name": "node1",
+ },
+ "status": {"addresses": [{"type": "InternalIP", "address": "10.10.1.1"}]},
+}
+fluentd_node2 = {
+ "metadata": {
+ "labels": {"logging-infra-fluentd": "true", "kubernetes.io/hostname": "hostname"},
+ "name": "node2",
+ },
+ "status": {"addresses": [{"type": "InternalIP", "address": "10.10.1.2"}]},
+}
+fluentd_node3_unlabeled = {
+ "metadata": {
+ "labels": {"kubernetes.io/hostname": "hostname"},
+ "name": "node3",
+ },
+ "status": {"addresses": [{"type": "InternalIP", "address": "10.10.1.3"}]},
+}
+
+
+@pytest.mark.parametrize('pods, nodes, expect_error', [
+ (
+ [],
+ [],
+ 'No nodes appear to be defined',
+ ),
+ (
+ [],
+ [fluentd_node3_unlabeled],
+ 'There are no nodes with the fluentd label',
+ ),
+ (
+ [],
+ [fluentd_node1, fluentd_node3_unlabeled],
+ 'Fluentd will not aggregate logs from these nodes.',
+ ),
+ (
+ [],
+ [fluentd_node2],
+ "nodes are supposed to have a Fluentd pod but do not",
+ ),
+ (
+ [fluentd_pod_node1, fluentd_pod_node1],
+ [fluentd_node1],
+ 'more Fluentd pods running than nodes labeled',
+ ),
+ (
+ [fluentd_pod_node2_down],
+ [fluentd_node2],
+ "Fluentd pods are supposed to be running",
+ ),
+ (
+ [fluentd_pod_node1],
+ [fluentd_node1],
+ None,
+ ),
+])
+def test_get_fluentd_pods(pods, nodes, expect_error):
+ check = canned_fluentd(lambda cmd, args, task_vars: json.dumps(dict(items=nodes)))
+
+ error = check.check_fluentd(pods, {})
+ assert_error(error, expect_error)
diff --git a/roles/openshift_health_checker/test/kibana_test.py b/roles/openshift_health_checker/test/kibana_test.py
new file mode 100644
index 000000000..19140a1b6
--- /dev/null
+++ b/roles/openshift_health_checker/test/kibana_test.py
@@ -0,0 +1,218 @@
+import pytest
+import json
+
+try:
+ import urllib2
+ from urllib2 import HTTPError, URLError
+except ImportError:
+ from urllib.error import HTTPError, URLError
+ import urllib.request as urllib2
+
+from openshift_checks.logging.kibana import Kibana
+
+
+def canned_kibana(exec_oc=None):
+ """Create a Kibana check object with canned exec_oc method"""
+ check = Kibana("dummy") # fails if a module is actually invoked
+ if exec_oc:
+ check._exec_oc = exec_oc
+ return check
+
+
+def assert_error(error, expect_error):
+ if expect_error:
+ assert error
+ assert expect_error in error
+ else:
+ assert not error
+
+
+plain_kibana_pod = {
+ "metadata": {
+ "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
+ "name": "logging-kibana-1",
+ },
+ "status": {
+ "containerStatuses": [{"ready": True}, {"ready": True}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ }
+}
+not_running_kibana_pod = {
+ "metadata": {
+ "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
+ "name": "logging-kibana-2",
+ },
+ "status": {
+ "containerStatuses": [{"ready": True}, {"ready": False}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ }
+}
+
+
+@pytest.mark.parametrize('pods, expect_error', [
+ (
+ [],
+ "There are no Kibana pods deployed",
+ ),
+ (
+ [plain_kibana_pod],
+ None,
+ ),
+ (
+ [not_running_kibana_pod],
+ "No Kibana pod is in a running state",
+ ),
+ (
+ [plain_kibana_pod, not_running_kibana_pod],
+ "The following Kibana pods are not currently in a running state",
+ ),
+])
+def test_check_kibana(pods, expect_error):
+ check = canned_kibana()
+ error = check.check_kibana(pods)
+ assert_error(error, expect_error)
+
+
+@pytest.mark.parametrize('route, expect_url, expect_error', [
+ (
+ None,
+ None,
+ 'no_route_exists',
+ ),
+
+ # test route with no ingress
+ (
+ {
+ "metadata": {
+ "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
+ "name": "logging-kibana",
+ },
+ "status": {
+ "ingress": [],
+ },
+ "spec": {
+ "host": "hostname",
+ }
+ },
+ None,
+ 'route_not_accepted',
+ ),
+
+ # test route with no host
+ (
+ {
+ "metadata": {
+ "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
+ "name": "logging-kibana",
+ },
+ "status": {
+ "ingress": [{
+ "status": True,
+ }],
+ },
+ "spec": {},
+ },
+ None,
+ 'route_missing_host',
+ ),
+
+ # test route that looks fine
+ (
+ {
+ "metadata": {
+ "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
+ "name": "logging-kibana",
+ },
+ "status": {
+ "ingress": [{
+ "status": True,
+ }],
+ },
+ "spec": {
+ "host": "hostname",
+ },
+ },
+ "https://hostname/",
+ None,
+ ),
+])
+def test_get_kibana_url(route, expect_url, expect_error):
+ check = canned_kibana(lambda cmd, args, task_vars: json.dumps(route) if route else "")
+
+ url, error = check._get_kibana_url({})
+ if expect_url:
+ assert url == expect_url
+ else:
+ assert not url
+ if expect_error:
+ assert error == expect_error
+ else:
+ assert not error
+
+
+@pytest.mark.parametrize('exec_result, expect', [
+ (
+ 'urlopen error [Errno 111] Connection refused',
+ 'at least one router routing to it?',
+ ),
+ (
+ 'urlopen error [Errno -2] Name or service not known',
+ 'DNS configured for the Kibana hostname?',
+ ),
+ (
+ 'Status code was not [302]: HTTP Error 500: Server error',
+ 'did not return the correct status code',
+ ),
+ (
+ 'bork bork bork',
+ 'bork bork bork', # should pass through
+ ),
+])
+def test_verify_url_internal_failure(exec_result, expect):
+ check = Kibana(execute_module=lambda module_name, args, task_vars: dict(failed=True, msg=exec_result))
+ check._get_kibana_url = lambda task_vars: ('url', None)
+
+ error = check._check_kibana_route({})
+ assert_error(error, expect)
+
+
+@pytest.mark.parametrize('lib_result, expect', [
+ (
+ HTTPError('url', 500, "it broke", hdrs=None, fp=None),
+ 'it broke',
+ ),
+ (
+ URLError('it broke'),
+ 'it broke',
+ ),
+ (
+ 302,
+ 'returned the wrong error code',
+ ),
+ (
+ 200,
+ None,
+ ),
+])
+def test_verify_url_external_failure(lib_result, expect, monkeypatch):
+
+ class _http_return:
+
+ def __init__(self, code):
+ self.code = code
+
+ def getcode(self):
+ return self.code
+
+ def urlopen(url, context):
+ if type(lib_result) is int:
+ return _http_return(lib_result)
+ raise lib_result
+ monkeypatch.setattr(urllib2, 'urlopen', urlopen)
+
+ check = canned_kibana()
+ check._get_kibana_url = lambda task_vars: ('url', None)
+ check._verify_url_internal = lambda url, task_vars: None
+
+ error = check._check_kibana_route({})
+ assert_error(error, expect)
diff --git a/roles/openshift_health_checker/test/logging_check_test.py b/roles/openshift_health_checker/test/logging_check_test.py
new file mode 100644
index 000000000..b6db34fe3
--- /dev/null
+++ b/roles/openshift_health_checker/test/logging_check_test.py
@@ -0,0 +1,137 @@
+import pytest
+import json
+
+from openshift_checks.logging.logging import LoggingCheck, OpenShiftCheckException
+
+task_vars_config_base = dict(openshift=dict(common=dict(config_base='/etc/origin')))
+
+
+logging_namespace = "logging"
+
+
+def canned_loggingcheck(exec_oc=None):
+ """Create a LoggingCheck object with canned exec_oc method"""
+ check = LoggingCheck("dummy") # fails if a module is actually invoked
+ check.logging_namespace = 'logging'
+ if exec_oc:
+ check.exec_oc = exec_oc
+ return check
+
+
+def assert_error(error, expect_error):
+ if expect_error:
+ assert error
+ assert expect_error in error
+ else:
+ assert not error
+
+
+plain_es_pod = {
+ "metadata": {
+ "labels": {"component": "es", "deploymentconfig": "logging-es"},
+ "name": "logging-es",
+ },
+ "status": {
+ "conditions": [{"status": "True", "type": "Ready"}],
+ "containerStatuses": [{"ready": True}],
+ "podIP": "10.10.10.10",
+ },
+ "_test_master_name_str": "name logging-es",
+}
+
+plain_kibana_pod = {
+ "metadata": {
+ "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
+ "name": "logging-kibana-1",
+ },
+ "status": {
+ "containerStatuses": [{"ready": True}, {"ready": True}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ }
+}
+
+fluentd_pod_node1 = {
+ "metadata": {
+ "labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
+ "name": "logging-fluentd-1",
+ },
+ "spec": {"host": "node1", "nodeName": "node1"},
+ "status": {
+ "containerStatuses": [{"ready": True}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ }
+}
+
+plain_curator_pod = {
+ "metadata": {
+ "labels": {"component": "curator", "deploymentconfig": "logging-curator"},
+ "name": "logging-curator-1",
+ },
+ "status": {
+ "containerStatuses": [{"ready": True}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ "podIP": "10.10.10.10",
+ }
+}
+
+
+@pytest.mark.parametrize('problem, expect', [
+ ("[Errno 2] No such file or directory", "supposed to be a master"),
+ ("Permission denied", "Unexpected error using `oc`"),
+])
+def test_oc_failure(problem, expect):
+ def execute_module(module_name, args, task_vars):
+ if module_name == "ocutil":
+ return dict(failed=True, result=problem)
+ return dict(changed=False)
+
+ check = LoggingCheck({})
+
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.exec_oc(execute_module, logging_namespace, 'get foo', [], task_vars=task_vars_config_base)
+ assert expect in str(excinfo)
+
+
+groups_with_first_master = dict(masters=['this-host', 'other-host'])
+groups_with_second_master = dict(masters=['other-host', 'this-host'])
+groups_not_a_master = dict(masters=['other-host'])
+
+
+@pytest.mark.parametrize('groups, logging_deployed, is_active', [
+ (groups_with_first_master, True, True),
+ (groups_with_first_master, False, False),
+ (groups_not_a_master, True, False),
+ (groups_with_second_master, True, False),
+ (groups_not_a_master, True, False),
+])
+def test_is_active(groups, logging_deployed, is_active):
+ task_vars = dict(
+ ansible_ssh_host='this-host',
+ groups=groups,
+ openshift_hosted_logging_deploy=logging_deployed,
+ )
+
+ assert LoggingCheck.is_active(task_vars=task_vars) == is_active
+
+
+@pytest.mark.parametrize('pod_output, expect_pods, expect_error', [
+ (
+ 'No resources found.',
+ None,
+ 'There are no pods in the logging namespace',
+ ),
+ (
+ json.dumps({'items': [plain_kibana_pod, plain_es_pod, plain_curator_pod, fluentd_pod_node1]}),
+ [plain_es_pod],
+ None,
+ ),
+])
+def test_get_pods_for_component(pod_output, expect_pods, expect_error):
+ check = canned_loggingcheck(lambda exec_module, namespace, cmd, args, task_vars: pod_output)
+ pods, error = check.get_pods_for_component(
+ lambda name, args, task_vars: {},
+ logging_namespace,
+ "es",
+ {}
+ )
+ assert_error(error, expect_error)
diff --git a/roles/openshift_hosted/tasks/registry/storage/object_storage.yml b/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
index 3dde83bee..8aaba0f3c 100644
--- a/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
+++ b/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
@@ -1,20 +1,4 @@
---
-- name: Assert supported openshift.hosted.registry.storage.provider
- assert:
- that:
- - openshift.hosted.registry.storage.provider in ['azure_blob', 's3', 'swift']
- msg: >
- Object Storage Provider: "{{ openshift.hosted.registry.storage.provider }}"
- is not currently supported
-
-- name: Assert implemented openshift.hosted.registry.storage.provider
- assert:
- that:
- - openshift.hosted.registry.storage.provider not in ['azure_blob', 'swift']
- msg: >
- Support for provider: "{{ openshift.hosted.registry.storage.provider }}"
- not implemented yet
-
- include: s3.yml
when: openshift.hosted.registry.storage.provider == 's3'
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 573cbdd09..3c343c9dc 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -87,7 +87,7 @@ openshift_logging_es_cpu_limit: null
# the logging appenders for the root loggers to write ES logs. Valid values: 'file', 'console'
openshift_logging_es_log_appenders: ['file']
openshift_logging_es_memory_limit: "{{ openshift_hosted_logging_elasticsearch_instance_ram | default('8Gi') }}"
-openshift_logging_es_pv_selector: null
+openshift_logging_es_pv_selector: "{{ openshift_hosted_logging_storage_labels | default(null) }}"
openshift_logging_es_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_pvc_dynamic | default(False) }}"
openshift_logging_es_pvc_size: "{{ openshift_hosted_logging_elasticsearch_pvc_size | default('') }}"
openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default('logging-es') }}"
@@ -126,7 +126,7 @@ openshift_logging_es_ops_client_key: /etc/fluent/keys/key
openshift_logging_es_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}"
openshift_logging_es_ops_cpu_limit: null
openshift_logging_es_ops_memory_limit: "{{ openshift_hosted_logging_elasticsearch_ops_instance_ram | default('8Gi') }}"
-openshift_logging_es_ops_pv_selector: None
+openshift_logging_es_ops_pv_selector: "{{ openshift_hosted_loggingops_storage_labels | default(null) }}"
openshift_logging_es_ops_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | default(False) }}"
openshift_logging_es_ops_pvc_size: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_size | default('') }}"
openshift_logging_es_ops_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_prefix | default('logging-es-ops') }}"
diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml
index 0c7152b16..6d023a02d 100644
--- a/roles/openshift_logging/tasks/delete_logging.yaml
+++ b/roles/openshift_logging/tasks/delete_logging.yaml
@@ -32,9 +32,8 @@
# delete our old secrets
- name: delete logging secrets
- oc_obj:
+ oc_secret:
state: absent
- kind: secret
namespace: "{{ openshift_logging_namespace }}"
name: "{{ item }}"
with_items:
diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml
index 040356e3d..9c8f0986a 100644
--- a/roles/openshift_logging/tasks/generate_certs.yaml
+++ b/roles/openshift_logging/tasks/generate_certs.yaml
@@ -17,7 +17,7 @@
- name: Generate certificates
command: >
- {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert
+ {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert
--key={{generated_certs_dir}}/ca.key --cert={{generated_certs_dir}}/ca.crt
--serial={{generated_certs_dir}}/ca.serial.txt --name=logging-signer-test
check_mode: no
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index dde76b142..7c1062b77 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -10,7 +10,7 @@
name: "{{ openshift_logging_namespace }}"
node_selector: "{{ openshift_logging_nodeselector | default(null) }}"
-- name: Labelling logging project
+- name: Labeling logging project
oc_label:
state: present
kind: namespace
@@ -23,7 +23,7 @@
- openshift_logging_labels is defined
- openshift_logging_labels is dict
-- name: Labelling logging project
+- name: Labeling logging project
oc_label:
state: present
kind: namespace
@@ -78,6 +78,8 @@
- "{{ openshift_logging_facts.elasticsearch.deploymentconfigs }}"
- "{{ openshift_logging_facts.elasticsearch.pvcs }}"
- "{{ es_indices }}"
+ when:
+ - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count > 0
# Create any new DC that may be required
- include_role:
@@ -124,6 +126,7 @@
- "{{ es_ops_indices }}"
when:
- openshift_logging_use_ops | bool
+ - openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count > 0
# Create any new DC that may be required
- include_role:
diff --git a/roles/openshift_logging/tasks/procure_server_certs.yaml b/roles/openshift_logging/tasks/procure_server_certs.yaml
index 7ab140357..00de0ca06 100644
--- a/roles/openshift_logging/tasks/procure_server_certs.yaml
+++ b/roles/openshift_logging/tasks/procure_server_certs.yaml
@@ -27,7 +27,7 @@
- name: Creating signed server cert and key for {{ cert_info.procure_component }}
command: >
- {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert
+ {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert
--key={{generated_certs_dir}}/{{cert_info.procure_component}}.key --cert={{generated_certs_dir}}/{{cert_info.procure_component}}.crt
--hostnames={{cert_info.hostnames|quote}} --signer-cert={{generated_certs_dir}}/ca.crt --signer-key={{generated_certs_dir}}/ca.key
--signer-serial={{generated_certs_dir}}/ca.serial.txt
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index 7e88a7498..f1d15b76d 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -217,7 +217,7 @@
access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
annotations:
- volume.alpha.kubernetes.io/storage-class: "dynamic"
+ volume.beta.kubernetes.io/storage-class: "dynamic"
when:
- openshift_logging_elasticsearch_storage_type == "pvc"
- openshift_logging_elasticsearch_pvc_dynamic
diff --git a/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2
index 681f5a7e6..58c325c8a 100644
--- a/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2
+++ b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2
@@ -38,6 +38,7 @@ gateway:
io.fabric8.elasticsearch.authentication.users: ["system.logging.kibana", "system.logging.fluentd", "system.logging.curator", "system.admin"]
io.fabric8.elasticsearch.kibana.mapping.app: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json
io.fabric8.elasticsearch.kibana.mapping.ops: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json
+io.fabric8.elasticsearch.kibana.mapping.empty: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json
openshift.config:
use_common_data_model: true
diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
index e129205ca..bd2289f0d 100644
--- a/roles/openshift_logging_elasticsearch/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -84,6 +84,9 @@ spec:
name: "RECOVER_AFTER_TIME"
value: "{{openshift_logging_elasticsearch_recover_after_time}}"
-
+ name: "READINESS_PROBE_TIMEOUT"
+ value: "30"
+ -
name: "IS_MASTER"
value: "{% if deploy_type in ['data-master', 'master'] %}true{% else %}false{% endif %}"
@@ -104,8 +107,8 @@ spec:
exec:
command:
- "/usr/share/elasticsearch/probe/readiness.sh"
- initialDelaySeconds: 5
- timeoutSeconds: 4
+ initialDelaySeconds: 10
+ timeoutSeconds: 30
periodSeconds: 5
volumes:
- name: elasticsearch
diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml
index 55b28ee24..bae55ffaa 100644
--- a/roles/openshift_logging_kibana/tasks/main.yaml
+++ b/roles/openshift_logging_kibana/tasks/main.yaml
@@ -43,6 +43,31 @@
kibana_name: "{{ 'logging-kibana' ~ ( (openshift_logging_kibana_ops_deployment | default(false) | bool) | ternary('-ops', '')) }}"
kibana_component: "{{ 'kibana' ~ ( (openshift_logging_kibana_ops_deployment | default(false) | bool) | ternary('-ops', '')) }}"
+# Check {{ generated_certs_dir }} for session_secret and oauth_secret
+- name: Checking for session_secret
+ stat: path="{{generated_certs_dir}}/session_secret"
+ register: session_secret_file
+
+- name: Checking for oauth_secret
+ stat: path="{{generated_certs_dir}}/oauth_secret"
+ register: oauth_secret_file
+
+# gen session_secret if necessary
+- name: Generate session secret
+ copy:
+ content: "{{ 200 | oo_random_word }}"
+ dest: "{{ generated_certs_dir }}/session_secret"
+ when:
+ - not session_secret_file.stat.exists
+
+# gen oauth_secret if necessary
+- name: Generate oauth secret
+ copy:
+ content: "{{ 64 | oo_random_word }}"
+ dest: "{{ generated_certs_dir }}/oauth_secret"
+ when:
+ - not oauth_secret_file.stat.exists
+
- name: Retrieving the cert to use when generating secrets for the logging components
slurp:
src: "{{ generated_certs_dir }}/{{ item.file }}"
@@ -52,6 +77,8 @@
- { name: "kibana_internal_key", file: "kibana-internal.key"}
- { name: "kibana_internal_cert", file: "kibana-internal.crt"}
- { name: "server_tls", file: "server-tls.json"}
+ - { name: "session_secret", file: "session_secret" }
+ - { name: "oauth_secret", file: "oauth_secret" }
# services
- name: Set {{ kibana_name }} service
@@ -120,19 +147,16 @@
files:
- "{{ tempdir }}/templates/kibana-route.yaml"
-# gen session_secret -- if necessary
-# TODO: make idempotent
-- name: Generate proxy session
- set_fact:
- session_secret: "{{ 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' | random_word(200) }}"
- check_mode: no
+# preserve list of current hostnames
+- name: Get current oauthclient hostnames
+ oc_obj:
+ state: list
+ name: kibana-proxy
+ namespace: "{{ openshift_logging_namespace }}"
+ kind: oauthclient
+ register: oauth_client_list
-# gen oauth_secret -- if necessary
-# TODO: make idempotent
-- name: Generate oauth client secret
- set_fact:
- oauth_secret: "{{ 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' | random_word(64) }}"
- check_mode: no
+- set_fact: proxy_hostnames={{ oauth_client_list.results.results[0].redirectURIs | default ([]) + ['https://' ~ openshift_logging_kibana_hostname] }}
# create oauth client
- name: Create oauth-client template
@@ -140,8 +164,8 @@
src: oauth-client.j2
dest: "{{ tempdir }}/templates/oauth-client.yml"
vars:
- kibana_hostname: "{{ openshift_logging_kibana_hostname }}"
- secret: "{{ oauth_secret }}"
+ kibana_hostnames: "{{ proxy_hostnames | unique }}"
+ secret: "{{ key_pairs | entry_from_named_pair('oauth_secret') | b64decode }}"
- name: Set kibana-proxy oauth-client
oc_obj:
@@ -179,18 +203,18 @@
# path: "{{ generated_certs_dir }}/kibana-internal.key"
#- name: server-cert
# path: "{{ generated_certs_dir }}/kibana-internal.crt"
- #- name: server-tls
+ #- name: server-tls.json
# path: "{{ generated_certs_dir }}/server-tls.json"
contents:
- path: oauth-secret
- data: "{{ oauth_secret }}"
+ data: "{{ key_pairs | entry_from_named_pair('oauth_secret') | b64decode }}"
- path: session-secret
- data: "{{ session_secret }}"
+ data: "{{ key_pairs | entry_from_named_pair('session_secret') | b64decode }}"
- path: server-key
data: "{{ key_pairs | entry_from_named_pair('kibana_internal_key') | b64decode }}"
- path: server-cert
data: "{{ key_pairs | entry_from_named_pair('kibana_internal_cert') | b64decode }}"
- - path: server-tls
+ - path: server-tls.json
data: "{{ key_pairs | entry_from_named_pair('server_tls') | b64decode }}"
# create Kibana DC
diff --git a/roles/openshift_logging_kibana/templates/oauth-client.j2 b/roles/openshift_logging_kibana/templates/oauth-client.j2
index 6767f6d89..c80ff3d30 100644
--- a/roles/openshift_logging_kibana/templates/oauth-client.j2
+++ b/roles/openshift_logging_kibana/templates/oauth-client.j2
@@ -4,9 +4,11 @@ metadata:
name: kibana-proxy
labels:
logging-infra: support
-secret: {{secret}}
+secret: {{ secret }}
redirectURIs:
-- https://{{kibana_hostname}}
+{% for host in kibana_hostnames %}
+- {{ host }}
+{% endfor %}
scopeRestrictions:
- literals:
- user:info
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index 9706da24b..62413536b 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -71,7 +71,7 @@
delegate_to: "{{ openshift_ca_host }}"
run_once: true
-- name: Generate the master client config
+- name: Generate the loopback master client config
command: >
{{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config
{% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
@@ -80,8 +80,8 @@
--certificate-authority={{ openshift_ca_cert }}
--client-dir={{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}
--groups=system:masters,system:openshift-master
- --master={{ openshift.master.api_url }}
- --public-master={{ openshift.master.public_api_url }}
+ --master={{ hostvars[item].openshift.master.loopback_api_url }}
+ --public-master={{ hostvars[item].openshift.master.loopback_api_url }}
--signer-cert={{ openshift_ca_cert }}
--signer-key={{ openshift_ca_key }}
--signer-serial={{ openshift_ca_serial }}
diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml
index 1d3db8a1a..467db34c8 100644
--- a/roles/openshift_metrics/defaults/main.yaml
+++ b/roles/openshift_metrics/defaults/main.yaml
@@ -16,6 +16,7 @@ openshift_metrics_hawkular_nodeselector: ""
openshift_metrics_cassandra_replicas: 1
openshift_metrics_cassandra_storage_type: "{{ openshift_hosted_metrics_storage_kind | default('emptydir') }}"
openshift_metrics_cassandra_pvc_size: "{{ openshift_hosted_metrics_storage_volume_size | default('10Gi') }}"
+openshift_metrics_cassandra_pv_selector: "{{ openshift_hosted_metrics_storage_labels | default(null) }}"
openshift_metrics_cassandra_limits_memory: 2G
openshift_metrics_cassandra_limits_cpu: null
openshift_metrics_cassandra_requests_memory: 1G
diff --git a/roles/openshift_metrics/tasks/generate_certificates.yaml b/roles/openshift_metrics/tasks/generate_certificates.yaml
index 7af3f9467..3dc15d58b 100644
--- a/roles/openshift_metrics/tasks/generate_certificates.yaml
+++ b/roles/openshift_metrics/tasks/generate_certificates.yaml
@@ -1,7 +1,7 @@
---
- name: generate ca certificate chain
command: >
- {{ openshift.common.admin_binary }} ca create-signer-cert
+ {{ openshift.common.client_binary }} adm ca create-signer-cert
--config={{ mktemp.stdout }}/admin.kubeconfig
--key='{{ mktemp.stdout }}/ca.key'
--cert='{{ mktemp.stdout }}/ca.crt'
diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml
index 3b4e8560f..62b7f52cb 100644
--- a/roles/openshift_metrics/tasks/install_cassandra.yaml
+++ b/roles/openshift_metrics/tasks/install_cassandra.yaml
@@ -23,7 +23,7 @@
changed_when: false
- set_fact: openshift_metrics_cassandra_pvc_prefix="hawkular-metrics"
- when: not openshift_metrics_cassandra_pvc_prefix or openshift_metrics_cassandra_pvc_prefix == ''
+ when: "not openshift_metrics_cassandra_pvc_prefix or openshift_metrics_cassandra_pvc_prefix == ''"
- name: generate hawkular-cassandra persistent volume claims
template:
@@ -35,6 +35,7 @@
metrics-infra: hawkular-cassandra
access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"
size: "{{ openshift_metrics_cassandra_pvc_size }}"
+ pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}"
with_sequence: count={{ openshift_metrics_cassandra_replicas }}
when:
- openshift_metrics_cassandra_storage_type != 'emptydir'
@@ -50,9 +51,10 @@
labels:
metrics-infra: hawkular-cassandra
annotations:
- volume.alpha.kubernetes.io/storage-class: dynamic
+ volume.beta.kubernetes.io/storage-class: dynamic
access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"
size: "{{ openshift_metrics_cassandra_pvc_size }}"
+ pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}"
with_sequence: count={{ openshift_metrics_cassandra_replicas }}
when: openshift_metrics_cassandra_storage_type == 'dynamic'
changed_when: false
diff --git a/roles/openshift_metrics/tasks/setup_certificate.yaml b/roles/openshift_metrics/tasks/setup_certificate.yaml
index 199968579..2d880f4d6 100644
--- a/roles/openshift_metrics/tasks/setup_certificate.yaml
+++ b/roles/openshift_metrics/tasks/setup_certificate.yaml
@@ -1,7 +1,7 @@
---
- name: generate {{ component }} keys
command: >
- {{ openshift.common.admin_binary }} ca create-server-cert
+ {{ openshift.common.client_binary }} adm ca create-server-cert
--config={{ mktemp.stdout }}/admin.kubeconfig
--key='{{ mktemp.stdout }}/{{ component }}.key'
--cert='{{ mktemp.stdout }}/{{ component }}.crt'
diff --git a/roles/openshift_metrics/templates/pvc.j2 b/roles/openshift_metrics/templates/pvc.j2
index c2e56ba21..0b801b33f 100644
--- a/roles/openshift_metrics/templates/pvc.j2
+++ b/roles/openshift_metrics/templates/pvc.j2
@@ -18,6 +18,13 @@ metadata:
{% endfor %}
{% endif %}
spec:
+{% if pv_selector is defined and pv_selector is mapping %}
+ selector:
+ matchLabels:
+{% for key,value in pv_selector.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
accessModes:
{% for mode in access_modes %}
- {{ mode }}
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index 4dcf1eef8..a6bd12d4e 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -1,6 +1,8 @@
---
- name: restart openvswitch
- systemd: name=openvswitch state=restarted
+ systemd:
+ name: openvswitch
+ state: restarted
when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift.common.use_openshift_sdn | bool
notify:
- restart openvswitch pause
@@ -10,8 +12,13 @@
when: (not skip_node_svc_handlers | default(False) | bool) and openshift.common.is_containerized | bool
- name: restart node
- systemd: name={{ openshift.common.service_type }}-node state=restarted
+ systemd:
+ name: "{{ openshift.common.service_type }}-node"
+ state: restarted
when: (not skip_node_svc_handlers | default(False) | bool) and not (node_service_status_changed | default(false) | bool)
- name: reload sysctl.conf
command: /sbin/sysctl -p
+
+- name: reload systemd units
+ command: systemctl daemon-reload
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index f58c803c4..e3ce5df3d 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -8,6 +8,9 @@
src: openshift.docker.node.dep.service
register: install_node_dep_result
when: openshift.common.is_containerized | bool
+ notify:
+ - reload systemd units
+ - restart node
- block:
- name: Pre-pull node image
@@ -21,6 +24,9 @@
dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
src: openshift.docker.node.service
register: install_node_result
+ notify:
+ - reload systemd units
+ - restart node
when:
- openshift.common.is_containerized | bool
- not openshift.common.is_node_system_container | bool
@@ -31,6 +37,9 @@
src: "{{ openshift.common.service_type }}-node.service.j2"
register: install_node_result
when: not openshift.common.is_containerized | bool
+ notify:
+ - reload systemd units
+ - restart node
- name: Create the openvswitch service env file
template:
@@ -39,6 +48,7 @@
when: openshift.common.is_containerized | bool
register: install_ovs_sysconfig
notify:
+ - reload systemd units
- restart openvswitch
- name: Install Node system container
@@ -67,6 +77,7 @@
when: openshift.common.use_openshift_sdn | default(true) | bool
register: install_oom_fix_result
notify:
+ - reload systemd units
- restart openvswitch
- block:
@@ -81,6 +92,7 @@
dest: "/etc/systemd/system/openvswitch.service"
src: openvswitch.docker.service
notify:
+ - reload systemd units
- restart openvswitch
when:
- openshift.common.is_containerized | bool
@@ -119,8 +131,3 @@
when: ('http_proxy' in openshift.common and openshift.common.http_proxy != '')
notify:
- restart node
-
-- name: Reload systemd units
- command: systemctl daemon-reload
- notify:
- - restart node
diff --git a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
index f397cbbf1..8bae9aaac 100644
--- a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
+++ b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
@@ -1,3 +1,5 @@
no-resolv
domain-needed
server=/{{ openshift.common.dns_domain }}/{{ openshift.common.kube_svc_ip }}
+no-negcache
+max-cache-ttl=1
diff --git a/roles/openshift_node_upgrade/tasks/rpm_upgrade.yml b/roles/openshift_node_upgrade/tasks/rpm_upgrade.yml
index 480e87d58..06a2d16ba 100644
--- a/roles/openshift_node_upgrade/tasks/rpm_upgrade.yml
+++ b/roles/openshift_node_upgrade/tasks/rpm_upgrade.yml
@@ -12,3 +12,18 @@
- name: Ensure python-yaml present for config upgrade
package: name=PyYAML state=present
when: not openshift.common.is_atomic | bool
+
+- name: Install Node service file
+ template:
+ dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
+ src: "{{ openshift.common.service_type }}-node.service.j2"
+ register: l_node_unit
+
+# NOTE: This is needed to make sure we are using the correct set
+# of systemd unit files. The RPMs lay down defaults but
+# the install/upgrade may override them in /etc/systemd/system/.
+# NOTE: We don't use the systemd module as some versions of the module
+# require a service to be part of the call.
+- name: Reload systemd units
+ command: systemctl daemon-reload
+ when: l_node_unit | changed
diff --git a/roles/openshift_node_upgrade/templates/atomic-openshift-node.service.j2 b/roles/openshift_node_upgrade/templates/atomic-openshift-node.service.j2
new file mode 120000
index 000000000..6041fb13a
--- /dev/null
+++ b/roles/openshift_node_upgrade/templates/atomic-openshift-node.service.j2
@@ -0,0 +1 @@
+../../openshift_node/templates/atomic-openshift-node.service.j2 \ No newline at end of file
diff --git a/roles/openshift_node_upgrade/templates/origin-node.service.j2 b/roles/openshift_node_upgrade/templates/origin-node.service.j2
new file mode 120000
index 000000000..79c45a303
--- /dev/null
+++ b/roles/openshift_node_upgrade/templates/origin-node.service.j2
@@ -0,0 +1 @@
+../../openshift_node/templates/origin-node.service.j2 \ No newline at end of file
diff --git a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
index 877e88002..9c5103597 100644
--- a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
+++ b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
@@ -7,6 +7,12 @@ items:
kind: PersistentVolume
metadata:
name: "{{ volume.name }}"
+{% if volume.labels is defined and volume.labels is mapping %}
+ labels:
+{% for key,value in volume.labels.iteritems() %}
+ {{ key }}: {{ value }}
+{% endfor %}
+{% endif %}
spec:
capacity:
storage: "{{ volume.capacity }}"
diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md
index 7b310dbf8..62fc35299 100644
--- a/roles/openshift_storage_glusterfs/README.md
+++ b/roles/openshift_storage_glusterfs/README.md
@@ -1,7 +1,31 @@
OpenShift GlusterFS Cluster
===========================
-OpenShift GlusterFS Cluster Installation
+OpenShift GlusterFS Cluster Configuration
+
+This role handles the configuration of GlusterFS clusters. It can handle
+two primary configuration scenarios:
+
+* Configuring a new, natively-hosted GlusterFS cluster. In this scenario,
+ GlusterFS pods are deployed on nodes in the OpenShift cluster which are
+ configured to provide storage.
+* Configuring a new, external GlusterFS cluster. In this scenario, the
+ cluster nodes have the GlusterFS software pre-installed but have not
+ been configured yet. The installer will take care of configuring the
+ cluster(s) for use by OpenShift applications.
+* Using existing GlusterFS clusters. In this scenario, one or more
+ GlusterFS clusters are assumed to be already setup. These clusters can
+ be either natively-hosted or external, but must be managed by a
+ [heketi service](https://github.com/heketi/heketi).
+
+As part of the configuration, a particular GlusterFS cluster may be
+specified to provide backend storage for a natively-hosted Docker
+registry.
+
+Unless configured otherwise, a StorageClass will be automatically
+created for each non-registry GlusterFS cluster. This will allow
+applications which can mount PersistentVolumes to request
+dynamically-provisioned GlusterFS volumes.
Requirements
------------
@@ -21,26 +45,50 @@ hosted Docker registry:
* `[glusterfs_registry]`
+Host Variables
+--------------
+
+For configuring new clusters, the following role variables are available.
+
+Each host in either of the above groups must have the following variable
+defined:
+
+| Name | Default value | Description |
+|-------------------|---------------|-----------------------------------------|
+| glusterfs_devices | None | A list of block devices that will be completely managed as part of a GlusterFS cluster. There must be at least one device listed. Each device must be bare, e.g. no partitions or LVM PVs. **Example:** '[ "/dev/sdb" ]'
+
+In addition, each host may specify the following variables to further control
+their configuration as GlusterFS nodes:
+
+| Name | Default value | Description |
+|--------------------|---------------------------|-----------------------------------------|
+| glusterfs_cluster | 1 | The ID of the cluster this node should belong to. This is useful when a single heketi service is expected to manage multiple distinct clusters. **NOTE:** For natively-hosted clusters, all pods will be in the same OpenShift namespace
+| glusterfs_hostname | openshift.common.hostname | A hostname (or IP address) that will be used for internal GlusterFS communication
+| glusterfs_ip | openshift.common.ip | An IP address that will be used by pods to communicate with the GlusterFS node
+| glusterfs_zone | 1 | A zone number for the node. Zones are used within the cluster for determining how to distribute the bricks of GlusterFS volumes. heketi will try to spread each volumes' bricks as evenly as possible across all zones
+
Role Variables
--------------
This role has the following variables that control the integration of a
GlusterFS cluster into a new or existing OpenShift cluster:
-| Name | Default value | |
+| Name | Default value | Description |
|--------------------------------------------------|-------------------------|-----------------------------------------|
| openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready
| openshift_storage_glusterfs_namespace | 'default' | Namespace in which to create GlusterFS resources
| openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized
-| openshift_storage_glusterfs_nodeselector | 'storagenode=glusterfs' | Selector to determine which nodes will host GlusterFS pods in native mode
+| openshift_storage_glusterfs_name | 'storage' | A name to identify the GlusterFS cluster, which will be used in resource names
+| openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name
+| openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster
| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7'
| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods
| openshift_storage_glusterfs_wipe | False | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.**
| openshift_storage_glusterfs_heketi_is_native | True | heketi should be containerized
| openshift_storage_glusterfs_heketi_image | 'heketi/heketi' | Container image to use for heketi pods, enterprise default is 'rhgs3/rhgs-volmanager-rhel7'
| openshift_storage_glusterfs_heketi_version | 'latest' | Container image version to use for heketi pods
-| openshift_storage_glusterfs_heketi_admin_key | '' | String to use as secret key for performing heketi commands as admin
-| openshift_storage_glusterfs_heketi_user_key | '' | String to use as secret key for performing heketi commands as user that can only view or modify volumes
+| openshift_storage_glusterfs_heketi_admin_key | auto-generated | String to use as secret key for performing heketi commands as admin
+| openshift_storage_glusterfs_heketi_user_key | auto-generated | String to use as secret key for performing heketi commands as user that can only view or modify volumes
| openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi
| openshift_storage_glusterfs_heketi_url | Undefined | URL for the heketi REST API, dynamically determined in native mode
| openshift_storage_glusterfs_heketi_wipe | False | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe`
@@ -52,17 +100,24 @@ registry. These variables start with the prefix
values in their corresponding non-registry variables. The following variables
are an exception:
-| Name | Default value | |
-|---------------------------------------------------|-----------------------|-----------------------------------------|
-| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'default'
-| openshift_storage_glusterfs_registry_nodeselector | 'storagenode=registry'| This allows for the logical separation of the registry GlusterFS cluster from any regular-use GlusterFS clusters
+| Name | Default value | Description |
+|-------------------------------------------------------|-----------------------|-----------------------------------------|
+| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'default'
+| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters
+| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties
+| openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above
+| openshift_storage_glusterfs_registry_heketi_user_key | auto-generated | Separate from the above
Additionally, this role's behavior responds to the following registry-specific
-variable:
-
-| Name | Default value | Description |
-|----------------------------------------------|---------------|------------------------------------------------------------------------------|
-| openshift_hosted_registry_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume |
+variables:
+
+| Name | Default value | Description |
+|-----------------------------------------------|------------------------------|-----------------------------------------|
+| openshift_hosted_registry_glusterfs_endpoints | glusterfs-registry-endpoints | The name for the Endpoints resource that will point the registry to the GlusterFS nodes
+| openshift_hosted_registry_glusterfs_path | glusterfs-registry-volume | The name for the GlusterFS volume that will provide registry storage
+| openshift_hosted_registry_glusterfs_readonly | False | Whether the GlusterFS volume should be read-only
+| openshift_hosted_registry_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume
+| openshift_hosted_registry_glusterfs_swapcopy | True | If swapping, copy the contents of the pre-existing registry storage to the new GlusterFS volume
Dependencies
------------
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
index ebe9ca30b..468877e57 100644
--- a/roles/openshift_storage_glusterfs/defaults/main.yml
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -2,7 +2,9 @@
openshift_storage_glusterfs_timeout: 300
openshift_storage_glusterfs_namespace: 'default'
openshift_storage_glusterfs_is_native: True
-openshift_storage_glusterfs_nodeselector: 'storagenode=glusterfs'
+openshift_storage_glusterfs_name: 'storage'
+openshift_storage_glusterfs_nodeselector: "glusterfs={{ openshift_storage_glusterfs_name }}-host"
+openshift_storage_glusterfs_storageclass: True
openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}"
openshift_storage_glusterfs_version: 'latest'
openshift_storage_glusterfs_wipe: False
@@ -11,8 +13,8 @@ openshift_storage_glusterfs_heketi_is_missing: True
openshift_storage_glusterfs_heketi_deploy_is_missing: True
openshift_storage_glusterfs_heketi_image: "{{ 'rhgs3/rhgs-volmanager-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'heketi/heketi' | quote }}"
openshift_storage_glusterfs_heketi_version: 'latest'
-openshift_storage_glusterfs_heketi_admin_key: ''
-openshift_storage_glusterfs_heketi_user_key: ''
+openshift_storage_glusterfs_heketi_admin_key: "{{ 32 | oo_generate_secret }}"
+openshift_storage_glusterfs_heketi_user_key: "{{ 32 | oo_generate_secret }}"
openshift_storage_glusterfs_heketi_topology_load: True
openshift_storage_glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_wipe }}"
openshift_storage_glusterfs_heketi_url: "{{ omit }}"
@@ -20,7 +22,9 @@ openshift_storage_glusterfs_heketi_url: "{{ omit }}"
openshift_storage_glusterfs_registry_timeout: "{{ openshift_storage_glusterfs_timeout }}"
openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}"
-openshift_storage_glusterfs_registry_nodeselector: 'storagenode=registry'
+openshift_storage_glusterfs_registry_name: 'registry'
+openshift_storage_glusterfs_registry_nodeselector: "glusterfs={{ openshift_storage_glusterfs_registry_name }}-host"
+openshift_storage_glusterfs_registry_storageclass: False
openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}"
openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}"
openshift_storage_glusterfs_registry_wipe: "{{ openshift_storage_glusterfs_wipe }}"
@@ -29,8 +33,8 @@ openshift_storage_glusterfs_registry_heketi_is_missing: "{{ openshift_storage_gl
openshift_storage_glusterfs_registry_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing }}"
openshift_storage_glusterfs_registry_heketi_image: "{{ openshift_storage_glusterfs_heketi_image }}"
openshift_storage_glusterfs_registry_heketi_version: "{{ openshift_storage_glusterfs_heketi_version }}"
-openshift_storage_glusterfs_registry_heketi_admin_key: "{{ openshift_storage_glusterfs_heketi_admin_key }}"
-openshift_storage_glusterfs_registry_heketi_user_key: "{{ openshift_storage_glusterfs_heketi_user_key }}"
+openshift_storage_glusterfs_registry_heketi_admin_key: "{{ 32 | oo_generate_secret }}"
+openshift_storage_glusterfs_registry_heketi_user_key: "{{ 32 | oo_generate_secret }}"
openshift_storage_glusterfs_registry_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}"
openshift_storage_glusterfs_registry_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}"
openshift_storage_glusterfs_registry_heketi_url: "{{ openshift_storage_glusterfs_heketi_url | default(omit) }}"
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
index c9945be13..81b4fa5dc 100644
--- a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
@@ -9,49 +9,47 @@ metadata:
annotations:
description: Bootstrap Heketi installation
tags: glusterfs,heketi,installation
-labels:
- template: deploy-heketi
objects:
- kind: Service
apiVersion: v1
metadata:
- name: deploy-heketi
+ name: deploy-heketi-${CLUSTER_NAME}
labels:
- glusterfs: deploy-heketi-service
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-service
deploy-heketi: support
annotations:
description: Exposes Heketi service
spec:
ports:
- - name: deploy-heketi
+ - name: deploy-heketi-${CLUSTER_NAME}
port: 8080
targetPort: 8080
selector:
- name: deploy-heketi
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
- kind: Route
apiVersion: v1
metadata:
- name: deploy-heketi
+ name: deploy-heketi-${CLUSTER_NAME}
labels:
- glusterfs: deploy-heketi-route
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-route
deploy-heketi: support
spec:
to:
kind: Service
- name: deploy-heketi
+ name: deploy-heketi-${CLUSTER_NAME}
- kind: DeploymentConfig
apiVersion: v1
metadata:
- name: deploy-heketi
+ name: deploy-heketi-${CLUSTER_NAME}
labels:
- glusterfs: deploy-heketi-dc
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-dc
deploy-heketi: support
annotations:
description: Defines how to deploy Heketi
spec:
replicas: 1
selector:
- name: deploy-heketi
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
triggers:
- type: ConfigChange
strategy:
@@ -60,13 +58,12 @@ objects:
metadata:
name: deploy-heketi
labels:
- name: deploy-heketi
- glusterfs: deploy-heketi-pod
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
deploy-heketi: support
spec:
- serviceAccountName: heketi-service-account
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
containers:
- - name: deploy-heketi
+ - name: heketi
image: ${IMAGE_NAME}:${IMAGE_VERSION}
env:
- name: HEKETI_USER_KEY
@@ -81,11 +78,15 @@ objects:
value: '14'
- name: HEKETI_KUBE_GLUSTER_DAEMONSET
value: '1'
+ - name: HEKETI_KUBE_NAMESPACE
+ value: ${HEKETI_KUBE_NAMESPACE}
ports:
- containerPort: 8080
volumeMounts:
- name: db
mountPath: /var/lib/heketi
+ - name: topology
+ mountPath: ${TOPOLOGY_PATH}
readinessProbe:
timeoutSeconds: 3
initialDelaySeconds: 3
@@ -100,6 +101,9 @@ objects:
port: 8080
volumes:
- name: db
+ - name: topology
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-topology-secret
parameters:
- name: HEKETI_USER_KEY
displayName: Heketi User Secret
@@ -107,9 +111,19 @@ parameters:
- name: HEKETI_ADMIN_KEY
displayName: Heketi Administrator Secret
description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_KUBE_NAMESPACE
+ displayName: Namespace
+ description: Set the namespace where the GlusterFS pods reside
+ value: default
- name: IMAGE_NAME
- displayName: GlusterFS container name
+ displayName: heketi container name
required: True
- name: IMAGE_VERSION
- displayName: GlusterFS container versiona
+ displayName: heketi container versiona
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ value: glusterfs
+- name: TOPOLOGY_PATH
+ displayName: heketi topology file location
required: True
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml
index c66705752..dc3d2250a 100644
--- a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml
@@ -12,24 +12,24 @@ objects:
- kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
- name: glusterfs
+ name: glusterfs-${CLUSTER_NAME}
labels:
- glusterfs: daemonset
+ glusterfs: ${CLUSTER_NAME}-daemonset
annotations:
description: GlusterFS DaemonSet
tags: glusterfs
spec:
selector:
matchLabels:
- glusterfs-node: pod
+ glusterfs: ${CLUSTER_NAME}-pod
template:
metadata:
- name: glusterfs
+ name: glusterfs-${CLUSTER_NAME}
labels:
+ glusterfs: ${CLUSTER_NAME}-pod
glusterfs-node: pod
spec:
- nodeSelector:
- storagenode: glusterfs
+ nodeSelector: "${{NODE_LABELS}}"
hostNetwork: true
containers:
- name: glusterfs
@@ -63,26 +63,26 @@ objects:
privileged: true
readinessProbe:
timeoutSeconds: 3
- initialDelaySeconds: 100
+ initialDelaySeconds: 40
exec:
command:
- "/bin/bash"
- "-c"
- systemctl status glusterd.service
- periodSeconds: 10
+ periodSeconds: 25
successThreshold: 1
- failureThreshold: 3
+ failureThreshold: 15
livenessProbe:
timeoutSeconds: 3
- initialDelaySeconds: 100
+ initialDelaySeconds: 40
exec:
command:
- "/bin/bash"
- "-c"
- systemctl status glusterd.service
- periodSeconds: 10
+ periodSeconds: 25
successThreshold: 1
- failureThreshold: 3
+ failureThreshold: 15
resources: {}
terminationMessagePath: "/dev/termination-log"
volumes:
@@ -120,9 +120,16 @@ objects:
dnsPolicy: ClusterFirst
securityContext: {}
parameters:
+- name: NODE_LABELS
+ displayName: Daemonset Node Labels
+ description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\'
+ value: '{ "glusterfs": "storage-host" }'
- name: IMAGE_NAME
displayName: GlusterFS container name
required: True
- name: IMAGE_VERSION
displayName: GlusterFS container versiona
required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml
index df045c170..1d8f1abdf 100644
--- a/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml
@@ -8,15 +8,13 @@ metadata:
annotations:
description: Heketi service deployment template
tags: glusterfs,heketi
-labels:
- template: heketi
objects:
- kind: Service
apiVersion: v1
metadata:
- name: heketi
+ name: heketi-${CLUSTER_NAME}
labels:
- glusterfs: heketi-service
+ glusterfs: heketi-${CLUSTER_NAME}-service
annotations:
description: Exposes Heketi service
spec:
@@ -25,40 +23,40 @@ objects:
port: 8080
targetPort: 8080
selector:
- glusterfs: heketi-pod
+ glusterfs: heketi-${CLUSTER_NAME}-pod
- kind: Route
apiVersion: v1
metadata:
- name: heketi
+ name: heketi-${CLUSTER_NAME}
labels:
- glusterfs: heketi-route
+ glusterfs: heketi-${CLUSTER_NAME}-route
spec:
to:
kind: Service
- name: heketi
+ name: heketi-${CLUSTER_NAME}
- kind: DeploymentConfig
apiVersion: v1
metadata:
- name: heketi
+ name: heketi-${CLUSTER_NAME}
labels:
- glusterfs: heketi-dc
+ glusterfs: heketi-${CLUSTER_NAME}-dc
annotations:
description: Defines how to deploy Heketi
spec:
replicas: 1
selector:
- glusterfs: heketi-pod
+ glusterfs: heketi-${CLUSTER_NAME}-pod
triggers:
- type: ConfigChange
strategy:
type: Recreate
template:
metadata:
- name: heketi
+ name: heketi-${CLUSTER_NAME}
labels:
- glusterfs: heketi-pod
+ glusterfs: heketi-${CLUSTER_NAME}-pod
spec:
- serviceAccountName: heketi-service-account
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
containers:
- name: heketi
image: ${IMAGE_NAME}:${IMAGE_VERSION}
@@ -76,6 +74,8 @@ objects:
value: '14'
- name: HEKETI_KUBE_GLUSTER_DAEMONSET
value: '1'
+ - name: HEKETI_KUBE_NAMESPACE
+ value: ${HEKETI_KUBE_NAMESPACE}
ports:
- containerPort: 8080
volumeMounts:
@@ -96,7 +96,7 @@ objects:
volumes:
- name: db
glusterfs:
- endpoints: heketi-storage-endpoints
+ endpoints: heketi-db-${CLUSTER_NAME}-endpoints
path: heketidbstorage
parameters:
- name: HEKETI_USER_KEY
@@ -105,9 +105,16 @@ parameters:
- name: HEKETI_ADMIN_KEY
displayName: Heketi Administrator Secret
description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_KUBE_NAMESPACE
+ displayName: Namespace
+ description: Set the namespace where the GlusterFS pods reside
+ value: default
- name: IMAGE_NAME
- displayName: GlusterFS container name
+ displayName: heketi container name
required: True
- name: IMAGE_VERSION
- displayName: GlusterFS container versiona
+ displayName: heketi container versiona
required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ value: glusterfs
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
index fa5fa2cb0..829c1f51b 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
@@ -5,12 +5,6 @@
name: "{{ glusterfs_namespace }}"
when: glusterfs_is_native or glusterfs_heketi_is_native
-- include: glusterfs_deploy.yml
- when: glusterfs_is_native
-
-- name: Make sure heketi-client is installed
- package: name=heketi-client state=present
-
- name: Delete pre-existing heketi resources
oc_obj:
namespace: "{{ glusterfs_namespace }}"
@@ -21,12 +15,18 @@
with_items:
- kind: "template,route,service,dc,jobs,secret"
selector: "deploy-heketi"
- - kind: "template,route,service,dc"
- name: "heketi"
- - kind: "svc,ep"
+ - kind: "svc"
name: "heketi-storage-endpoints"
+ - kind: "secret"
+ name: "heketi-{{ glusterfs_name }}-topology-secret"
+ - kind: "template,route,service,dc"
+ name: "heketi-{{ glusterfs_name }}"
+ - kind: "svc"
+ name: "heketi-db-{{ glusterfs_name }}-endpoints"
- kind: "sa"
- name: "heketi-service-account"
+ name: "heketi-{{ glusterfs_name }}-service-account"
+ - kind: "secret"
+ name: "heketi-{{ glusterfs_name }}-user-secret"
failed_when: False
when: glusterfs_heketi_wipe
@@ -35,11 +35,11 @@
namespace: "{{ glusterfs_namespace }}"
kind: pod
state: list
- selector: "glusterfs=deploy-heketi-pod"
+ selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod"
register: heketi_pod
until: "heketi_pod.results.results[0]['items'] | count == 0"
delay: 10
- retries: "{{ (glusterfs_timeout / 10) | int }}"
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
when: glusterfs_heketi_wipe
- name: Wait for heketi pods to terminate
@@ -47,23 +47,26 @@
namespace: "{{ glusterfs_namespace }}"
kind: pod
state: list
- selector: "glusterfs=heketi-pod"
+ selector: "glusterfs=heketi-{{ glusterfs_name }}-pod"
register: heketi_pod
until: "heketi_pod.results.results[0]['items'] | count == 0"
delay: 10
- retries: "{{ (glusterfs_timeout / 10) | int }}"
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
when: glusterfs_heketi_wipe
+- include: glusterfs_deploy.yml
+ when: glusterfs_is_native
+
- name: Create heketi service account
oc_serviceaccount:
namespace: "{{ glusterfs_namespace }}"
- name: heketi-service-account
+ name: "heketi-{{ glusterfs_name }}-service-account"
state: present
when: glusterfs_heketi_is_native
- name: Add heketi service account to privileged SCC
oc_adm_policy_user:
- user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-service-account"
+ user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-{{ glusterfs_name }}-service-account"
resource_kind: scc
resource_name: privileged
state: present
@@ -71,7 +74,7 @@
- name: Allow heketi service account to view/edit pods
oc_adm_policy_user:
- user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-service-account"
+ user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-{{ glusterfs_name }}-service-account"
resource_kind: role
resource_name: edit
state: present
@@ -82,7 +85,7 @@
namespace: "{{ glusterfs_namespace }}"
state: list
kind: pod
- selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support"
+ selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod"
register: heketi_pod
when: glusterfs_heketi_is_native
@@ -100,7 +103,7 @@
namespace: "{{ glusterfs_namespace }}"
state: list
kind: pod
- selector: "glusterfs=heketi-pod"
+ selector: "glusterfs=heketi-{{ glusterfs_name }}-pod"
register: heketi_pod
when: glusterfs_heketi_is_native
@@ -113,48 +116,35 @@
# heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
- "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
+- name: Generate topology file
+ template:
+ src: "{{ openshift.common.examples_content_version }}/topology.json.j2"
+ dest: "{{ mktemp.stdout }}/topology.json"
+ when:
+ - glusterfs_heketi_topology_load
+
- include: heketi_deploy_part1.yml
when:
- glusterfs_heketi_is_native
- glusterfs_heketi_deploy_is_missing
- glusterfs_heketi_is_missing
-- name: Determine heketi URL
- oc_obj:
- namespace: "{{ glusterfs_namespace }}"
- state: list
- kind: ep
- selector: "glusterfs in (deploy-heketi-service, heketi-service)"
- register: heketi_url
- until:
- - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''"
- - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''"
- delay: 10
- retries: "{{ (glusterfs_timeout / 10) | int }}"
- when:
- - glusterfs_heketi_is_native
- - glusterfs_heketi_url is undefined
-
- name: Set heketi URL
set_fact:
- glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}"
+ glusterfs_heketi_url: "localhost:8080"
when:
- glusterfs_heketi_is_native
- - glusterfs_heketi_url is undefined
+
+- name: Set heketi-cli command
+ set_fact:
+ glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}oc rsh {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {% endif %}heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
- name: Verify heketi service
- command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' cluster list"
+ command: "{{ glusterfs_heketi_client }} cluster list"
changed_when: False
-- name: Generate topology file
- template:
- src: "{{ openshift.common.examples_content_version }}/topology.json.j2"
- dest: "{{ mktemp.stdout }}/topology.json"
- when:
- - glusterfs_heketi_topology_load
-
- name: Load heketi topology
- command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' topology load --json={{ mktemp.stdout }}/topology.json 2>&1"
+ command: "{{ glusterfs_heketi_client }} topology load --json={{ mktemp.stdout }}/topology.json 2>&1"
register: topology_load
failed_when: "topology_load.rc != 0 or 'Unable' in topology_load.stdout"
when:
@@ -164,3 +154,29 @@
when:
- glusterfs_heketi_is_native
- glusterfs_heketi_is_missing
+
+- name: Create heketi user secret
+ oc_secret:
+ namespace: "{{ glusterfs_namespace }}"
+ state: present
+ name: "heketi-{{ glusterfs_name }}-user-secret"
+ type: "kubernetes.io/glusterfs"
+ force: True
+ contents:
+ - path: key
+ data: "{{ glusterfs_heketi_user_key }}"
+
+- name: Generate GlusterFS StorageClass file
+ template:
+ src: "{{ openshift.common.examples_content_version }}/glusterfs-storageclass.yml.j2"
+ dest: "{{ mktemp.stdout }}/glusterfs-storageclass.yml"
+
+- name: Create GlusterFS StorageClass
+ oc_obj:
+ state: present
+ kind: storageclass
+ name: "glusterfs-{{ glusterfs_name }}"
+ files:
+ - "{{ mktemp.stdout }}/glusterfs-storageclass.yml"
+ when:
+ - glusterfs_storageclass
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
index 451990240..aa303d126 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
@@ -3,7 +3,9 @@
glusterfs_timeout: "{{ openshift_storage_glusterfs_timeout }}"
glusterfs_namespace: "{{ openshift_storage_glusterfs_namespace }}"
glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native }}"
- glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | map_from_pairs }}"
+ glusterfs_name: "{{ openshift_storage_glusterfs_name }}"
+ glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | default(['storagenode', openshift_storage_glusterfs_name] | join('=')) | map_from_pairs }}"
+ glusterfs_storageclass: "{{ openshift_storage_glusterfs_storageclass }}"
glusterfs_image: "{{ openshift_storage_glusterfs_image }}"
glusterfs_version: "{{ openshift_storage_glusterfs_version }}"
glusterfs_wipe: "{{ openshift_storage_glusterfs_wipe }}"
@@ -17,6 +19,6 @@
glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}"
glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}"
glusterfs_heketi_url: "{{ openshift_storage_glusterfs_heketi_url }}"
- glusterfs_nodes: "{{ g_glusterfs_hosts }}"
+ glusterfs_nodes: "{{ groups.glusterfs }}"
- include: glusterfs_common.yml
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
index 579112349..ea4dcc510 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
@@ -1,23 +1,24 @@
---
- assert:
- that: "glusterfs_nodeselector.keys() | count == 1"
- msg: Only one GlusterFS nodeselector key pair should be provided
-
-- assert:
that: "glusterfs_nodes | count >= 3"
msg: There must be at least three GlusterFS nodes specified
- name: Delete pre-existing GlusterFS resources
oc_obj:
namespace: "{{ glusterfs_namespace }}"
- kind: "template,daemonset"
- name: glusterfs
+ kind: "{{ item.kind }}"
+ name: "{{ item.name }}"
state: absent
+ with_items:
+ - kind: template
+ name: glusterfs
+ - kind: daemonset
+ name: "glusterfs-{{ glusterfs_name }}"
when: glusterfs_wipe
- name: Unlabel any existing GlusterFS nodes
oc_label:
- name: "{{ item }}"
+ name: "{{ hostvars[item].openshift.common.hostname }}"
kind: node
state: absent
labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
@@ -40,11 +41,16 @@
failed_when: False
when: glusterfs_wipe
- # Runs "vgremove -fy <vg>; pvremove -fy <pv>" for every device found to be a physical volume.
+ # Runs "lvremove -ff <vg>; vgremove -fy <vg>; pvremove -fy <pv>" for every device found to be a physical volume.
- name: Clear GlusterFS storage device contents
- shell: "{% for line in item.stdout_lines %}{% set fields = line.split() %}{% if fields | count > 1 %}vgremove -fy {{ fields[1] }}; {% endif %}pvremove -fy {{ fields[0] }}; {% endfor %}"
+ shell: "{% for line in item.stdout_lines %}{% set fields = line.split() %}{% if fields | count > 1 %}lvremove -ff {{ fields[1] }}; vgremove -fy {{ fields[1] }}; {% endif %}pvremove -fy {{ fields[0] }}; {% endfor %}"
delegate_to: "{{ item.item }}"
with_items: "{{ devices_info.results }}"
+ register: clear_devices
+ until:
+ - "'contains a filesystem in use' not in clear_devices.stderr"
+ delay: 1
+ retries: 30
when:
- glusterfs_wipe
- item.stdout_lines | count > 0
@@ -61,13 +67,11 @@
- name: Label GlusterFS nodes
oc_label:
- name: "{{ glusterfs_host }}"
+ name: "{{ hostvars[item].openshift.common.hostname }}"
kind: node
state: add
labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
with_items: "{{ glusterfs_nodes | default([]) }}"
- loop_control:
- loop_var: glusterfs_host
- name: Copy GlusterFS DaemonSet template
copy:
@@ -78,7 +82,7 @@
oc_obj:
namespace: "{{ glusterfs_namespace }}"
kind: template
- name: glusterfs
+ name: "glusterfs"
state: present
files:
- "{{ mktemp.stdout }}/glusterfs-template.yml"
@@ -91,17 +95,19 @@
params:
IMAGE_NAME: "{{ glusterfs_image }}"
IMAGE_VERSION: "{{ glusterfs_version }}"
+ NODE_LABELS: "{{ glusterfs_nodeselector }}"
+ CLUSTER_NAME: "{{ glusterfs_name }}"
- name: Wait for GlusterFS pods
oc_obj:
namespace: "{{ glusterfs_namespace }}"
kind: pod
state: list
- selector: "glusterfs-node=pod"
+ selector: "glusterfs={{ glusterfs_name }}-pod"
register: glusterfs_pods
until:
- "glusterfs_pods.results.results[0]['items'] | count > 0"
# There must be as many pods with 'Ready' staus True as there are nodes expecting those pods
- "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == glusterfs_nodes | count"
delay: 10
- retries: "{{ (glusterfs_timeout / 10) | int }}"
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
index 392f4b65b..4c6891eeb 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -3,7 +3,9 @@
glusterfs_timeout: "{{ openshift_storage_glusterfs_registry_timeout }}"
glusterfs_namespace: "{{ openshift_storage_glusterfs_registry_namespace }}"
glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native }}"
- glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | map_from_pairs }}"
+ glusterfs_name: "{{ openshift_storage_glusterfs_registry_name }}"
+ glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | default(['storagenode', openshift_storage_glusterfs_registry_name] | join('=')) | map_from_pairs }}"
+ glusterfs_storageclass: "{{ openshift_storage_glusterfs_registry_storageclass }}"
glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}"
glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}"
glusterfs_wipe: "{{ openshift_storage_glusterfs_registry_wipe }}"
@@ -17,21 +19,22 @@
glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_registry_heketi_topology_load }}"
glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_registry_heketi_wipe }}"
glusterfs_heketi_url: "{{ openshift_storage_glusterfs_registry_heketi_url }}"
- glusterfs_nodes: "{{ g_glusterfs_registry_hosts }}"
+ glusterfs_nodes: "{{ groups.glusterfs_registry }}"
- include: glusterfs_common.yml
- when: g_glusterfs_registry_hosts != g_glusterfs_hosts
+ when:
+ - groups.glusterfs_registry | default([]) | count > 0
+ - "'glusterfs' not in groups or groups.glusterfs_registry != groups.glusterfs"
- name: Delete pre-existing GlusterFS registry resources
oc_obj:
namespace: "{{ glusterfs_namespace }}"
kind: "{{ item.kind }}"
- name: "{{ item.name | default(omit) }}"
- selector: "{{ item.selector | default(omit) }}"
+ name: "{{ item.name }}"
state: absent
with_items:
- - kind: "svc,ep"
- name: "glusterfs-registry-endpoints"
+ - kind: "svc"
+ name: "glusterfs-{{ glusterfs_name }}-endpoints"
failed_when: False
- name: Generate GlusterFS registry endpoints
@@ -40,8 +43,8 @@
dest: "{{ mktemp.stdout }}/glusterfs-registry-endpoints.yml"
- name: Copy GlusterFS registry service
- copy:
- src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-service.yml"
+ template:
+ src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-service.yml.j2"
dest: "{{ mktemp.stdout }}/glusterfs-registry-service.yml"
- name: Create GlusterFS registry endpoints
@@ -49,7 +52,7 @@
namespace: "{{ glusterfs_namespace }}"
state: present
kind: endpoints
- name: glusterfs-registry-endpoints
+ name: "glusterfs-{{ glusterfs_name }}-endpoints"
files:
- "{{ mktemp.stdout }}/glusterfs-registry-endpoints.yml"
@@ -58,14 +61,14 @@
namespace: "{{ glusterfs_namespace }}"
state: present
kind: service
- name: glusterfs-registry-endpoints
+ name: "glusterfs-{{ glusterfs_name }}-endpoints"
files:
- "{{ mktemp.stdout }}/glusterfs-registry-service.yml"
- name: Check if GlusterFS registry volume exists
- command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' volume list"
+ command: "{{ glusterfs_heketi_client }} volume list"
register: registry_volume
- name: Create GlusterFS registry volume
- command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}"
+ command: "{{ glusterfs_heketi_client }} volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}"
when: "openshift.hosted.registry.storage.glusterfs.path not in registry_volume.stdout"
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
index c14fcfb15..318d34b5d 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
@@ -6,11 +6,21 @@
with_items:
- "deploy-heketi-template.yml"
-- name: Create deploy-heketi resources
+- name: Create heketi topology secret
+ oc_secret:
+ namespace: "{{ glusterfs_namespace }}"
+ state: present
+ name: "heketi-{{ glusterfs_name }}-topology-secret"
+ force: True
+ files:
+ - name: topology.json
+ path: "{{ mktemp.stdout }}/topology.json"
+
+- name: Create deploy-heketi template
oc_obj:
namespace: "{{ glusterfs_namespace }}"
kind: template
- name: deploy-heketi
+ name: "deploy-heketi"
state: present
files:
- "{{ mktemp.stdout }}/deploy-heketi-template.yml"
@@ -25,17 +35,20 @@
IMAGE_VERSION: "{{ glusterfs_heketi_version }}"
HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}"
HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}"
+ HEKETI_KUBE_NAMESPACE: "{{ glusterfs_namespace }}"
+ CLUSTER_NAME: "{{ glusterfs_name }}"
+ TOPOLOGY_PATH: "{{ mktemp.stdout }}"
- name: Wait for deploy-heketi pod
oc_obj:
namespace: "{{ glusterfs_namespace }}"
kind: pod
state: list
- selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support"
+ selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod"
register: heketi_pod
until:
- "heketi_pod.results.results[0]['items'] | count > 0"
# Pod's 'Ready' status must be True
- "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
delay: 10
- retries: "{{ (glusterfs_timeout / 10) | int }}"
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
index 64410a9ab..3a9619d9d 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
@@ -1,8 +1,10 @@
---
- name: Create heketi DB volume
- command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' setup-openshift-heketi-storage --listfile {{ mktemp.stdout }}/heketi-storage.json"
+ command: "{{ glusterfs_heketi_client }} setup-openshift-heketi-storage --listfile /tmp/heketi-storage.json"
register: setup_storage
- failed_when: False
+
+- name: Copy heketi-storage list
+ shell: "{{ openshift.common.client_binary }} rsh {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json"
# This is used in the subsequent task
- name: Copy the admin client config
@@ -28,7 +30,7 @@
# Pod's 'Complete' status must be True
- "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1"
delay: 10
- retries: "{{ (glusterfs_timeout / 10) | int }}"
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
failed_when:
- "'results' in heketi_job.results"
- "heketi_job.results.results | count > 0"
@@ -46,14 +48,45 @@
with_items:
- kind: "template,route,service,jobs,dc,secret"
selector: "deploy-heketi"
- failed_when: False
+ - kind: "svc"
+ name: "heketi-storage-endpoints"
+ - kind: "secret"
+ name: "heketi-{{ glusterfs_name }}-topology-secret"
+
+- name: Generate heketi endpoints
+ template:
+ src: "{{ openshift.common.examples_content_version }}/heketi-endpoints.yml.j2"
+ dest: "{{ mktemp.stdout }}/heketi-endpoints.yml"
+
+- name: Generate heketi service
+ template:
+ src: "{{ openshift.common.examples_content_version }}/heketi-service.yml.j2"
+ dest: "{{ mktemp.stdout }}/heketi-service.yml"
+
+- name: Create heketi endpoints
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ state: present
+ kind: endpoints
+ name: "heketi-db-{{ glusterfs_name }}-endpoints"
+ files:
+ - "{{ mktemp.stdout }}/heketi-endpoints.yml"
+
+- name: Create heketi service
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ state: present
+ kind: service
+ name: "heketi-db-{{ glusterfs_name }}-endpoints"
+ files:
+ - "{{ mktemp.stdout }}/heketi-service.yml"
- name: Copy heketi template
copy:
src: "{{ openshift.common.examples_content_version }}/heketi-template.yml"
dest: "{{ mktemp.stdout }}/heketi-template.yml"
-- name: Create heketi resources
+- name: Create heketi template
oc_obj:
namespace: "{{ glusterfs_namespace }}"
kind: template
@@ -72,38 +105,27 @@
IMAGE_VERSION: "{{ glusterfs_heketi_version }}"
HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}"
HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}"
+ HEKETI_KUBE_NAMESPACE: "{{ glusterfs_namespace }}"
+ CLUSTER_NAME: "{{ glusterfs_name }}"
- name: Wait for heketi pod
oc_obj:
namespace: "{{ glusterfs_namespace }}"
kind: pod
state: list
- selector: "glusterfs=heketi-pod"
+ selector: "glusterfs=heketi-{{ glusterfs_name }}-pod"
register: heketi_pod
until:
- "heketi_pod.results.results[0]['items'] | count > 0"
# Pod's 'Ready' status must be True
- "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
delay: 10
- retries: "{{ (glusterfs_timeout / 10) | int }}"
-
-- name: Determine heketi URL
- oc_obj:
- namespace: "{{ glusterfs_namespace }}"
- state: list
- kind: ep
- selector: "glusterfs=heketi-service"
- register: heketi_url
- until:
- - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''"
- - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''"
- delay: 10
- retries: "{{ (glusterfs_timeout / 10) | int }}"
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
-- name: Set heketi URL
+- name: Set heketi-cli command
set_fact:
- glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}"
+ glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}oc rsh {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {% endif %}heketi-cli -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
- name: Verify heketi service
- command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' cluster list"
+ command: "{{ glusterfs_heketi_client }} cluster list"
changed_when: False
diff --git a/roles/openshift_storage_glusterfs/tasks/main.yml b/roles/openshift_storage_glusterfs/tasks/main.yml
index ebd8db453..c9bfdd1cd 100644
--- a/roles/openshift_storage_glusterfs/tasks/main.yml
+++ b/roles/openshift_storage_glusterfs/tasks/main.yml
@@ -7,12 +7,11 @@
- include: glusterfs_config.yml
when:
- - g_glusterfs_hosts | default([]) | count > 0
+ - groups.glusterfs | default([]) | count > 0
- include: glusterfs_registry.yml
when:
- - g_glusterfs_registry_hosts | default([]) | count > 0
- - "openshift.hosted.registry.storage.kind == 'glusterfs' or openshift.hosted.registry.glusterfs.swap"
+ - "groups.glusterfs_registry | default([]) | count > 0 or openshift.hosted.registry.storage.kind == 'glusterfs' or openshift.hosted.registry.glusterfs.swap"
- name: Delete temp directory
file:
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2
index 605627ab5..11c9195bb 100644
--- a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2
@@ -1,7 +1,8 @@
+---
apiVersion: v1
kind: Endpoints
metadata:
- name: glusterfs-registry-endpoints
+ name: glusterfs-{{ glusterfs_name }}-endpoints
subsets:
- addresses:
{% for node in glusterfs_nodes %}
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-registry-service.yml b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-service.yml.j2
index 3f8d8f507..3f869d2b7 100644
--- a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-registry-service.yml
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-service.yml.j2
@@ -2,7 +2,7 @@
apiVersion: v1
kind: Service
metadata:
- name: glusterfs-registry-endpoints
+ name: glusterfs-{{ glusterfs_name }}-endpoints
spec:
ports:
- port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
new file mode 100644
index 000000000..9b8fae310
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: glusterfs-{{ glusterfs_name }}
+provisioner: kubernetes.io/glusterfs
+parameters:
+ resturl: "http://{{ glusterfs_heketi_url }}:8081"
+ secretNamespace: "{{ glusterfs_namespace }}"
+ secretName: "heketi-{{ glusterfs_name }}-user-secret"
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/heketi-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/heketi-endpoints.yml.j2
new file mode 100644
index 000000000..99cbdf748
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/heketi-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/heketi-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/heketi-service.yml.j2
new file mode 100644
index 000000000..dcb896441
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/heketi-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}