summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/docker/defaults/main.yml5
-rw-r--r--roles/docker/tasks/package_docker.yml14
-rw-r--r--roles/docker/tasks/systemcontainer_crio.yml2
-rw-r--r--roles/docker/tasks/systemcontainer_docker.yml20
-rw-r--r--roles/etcd_common/defaults/main.yml2
-rw-r--r--roles/etcd_common/tasks/backup.yml5
-rw-r--r--roles/etcd_common/tasks/main.yml2
-rw-r--r--roles/etcd_common/tasks/noop.yml4
-rw-r--r--roles/lib_utils/library/repoquery.py18
-rw-r--r--roles/lib_utils/library/yedit.py1
-rw-r--r--roles/lib_utils/src/ansible/repoquery.py17
-rw-r--r--roles/lib_utils/src/lib/import.py1
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py6
-rw-r--r--roles/openshift_gcp/tasks/main.yaml43
-rw-r--r--roles/openshift_gcp/templates/dns.j2.sh13
-rw-r--r--roles/openshift_gcp/templates/provision.j2.sh318
-rw-r--r--roles/openshift_gcp/templates/remove.j2.sh156
-rw-r--r--roles/openshift_gcp_image_prep/files/partition.conf3
-rw-r--r--roles/openshift_gcp_image_prep/tasks/main.yaml18
-rw-r--r--roles/openshift_health_checker/action_plugins/openshift_health_check.py160
-rw-r--r--roles/openshift_health_checker/callback_plugins/zz_failure_summary.py16
-rw-r--r--roles/openshift_health_checker/openshift_checks/__init__.py134
-rw-r--r--roles/openshift_health_checker/openshift_checks/disk_availability.py9
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_image_availability.py89
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py8
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/logging.py4
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/mixins.py3
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_availability.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_update.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_version.py2
-rw-r--r--roles/openshift_health_checker/test/action_plugin_test.py134
-rw-r--r--roles/openshift_health_checker/test/disk_availability_test.py13
-rw-r--r--roles/openshift_health_checker/test/docker_image_availability_test.py187
-rw-r--r--roles/openshift_health_checker/test/elasticsearch_test.py18
-rw-r--r--roles/openshift_health_checker/test/logging_index_time_test.py8
-rw-r--r--roles/openshift_health_checker/test/openshift_check_test.py43
-rw-r--r--roles/openshift_health_checker/test/ovs_version_test.py2
-rw-r--r--roles/openshift_health_checker/test/package_availability_test.py6
-rw-r--r--roles/openshift_health_checker/test/package_update_test.py6
-rw-r--r--roles/openshift_health_checker/test/package_version_test.py8
-rw-r--r--roles/openshift_health_checker/test/zz_failure_summary_test.py15
-rw-r--r--roles/openshift_hosted/defaults/main.yml4
-rw-r--r--roles/openshift_hosted/tasks/registry/registry.yml2
-rw-r--r--roles/openshift_hosted/tasks/registry/secure.yml2
-rw-r--r--roles/openshift_hosted/tasks/router/router.yml2
-rw-r--r--roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml2
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml2
-rw-r--r--roles/openshift_logging_curator/vars/main.yml4
-rw-r--r--roles/openshift_logging_elasticsearch/vars/main.yml4
-rw-r--r--roles/openshift_logging_fluentd/vars/main.yml4
-rw-r--r--roles/openshift_logging_kibana/vars/main.yml4
-rw-r--r--roles/openshift_logging_mux/vars/main.yml4
-rw-r--r--roles/openshift_manageiq/vars/main.yml3
-rw-r--r--roles/openshift_master/defaults/main.yml2
-rw-r--r--roles/openshift_master/meta/main.yml16
-rw-r--r--roles/openshift_master/tasks/main.yml18
-rw-r--r--roles/openshift_master/tasks/registry_auth.yml27
-rw-r--r--roles/openshift_master/tasks/update_etcd_client_urls.yml8
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j212
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j212
-rw-r--r--roles/openshift_master_facts/filter_plugins/openshift_master.py5
-rw-r--r--roles/openshift_metrics/tasks/pre_install.yaml2
-rw-r--r--roles/openshift_node/defaults/main.yml6
-rw-r--r--roles/openshift_node/handlers/main.yml3
-rw-r--r--roles/openshift_node/tasks/config.yml16
-rw-r--r--roles/openshift_node/tasks/install.yml4
-rw-r--r--roles/openshift_node/tasks/main.yml30
-rw-r--r--roles/openshift_node/tasks/registry_auth.yml25
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service17
-rw-r--r--roles/openshift_prometheus/defaults/main.yaml2
-rw-r--r--roles/openshift_prometheus/tasks/install_prometheus.yaml5
-rw-r--r--roles/openshift_sanitize_inventory/tasks/main.yml21
-rw-r--r--roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js1
-rw-r--r--roles/openshift_storage_glusterfs/README.md5
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml6
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml143
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml136
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml134
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml1
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml1
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml1
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-service.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j213
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/heketi-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/heketi-service.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j236
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/topology.json.j249
-rw-r--r--roles/openshift_version/defaults/main.yml1
-rw-r--r--roles/openshift_version/tasks/main.yml6
-rw-r--r--roles/os_firewall/tasks/iptables.yml2
-rw-r--r--roles/rhel_subscribe/tasks/main.yml6
102 files changed, 2015 insertions, 375 deletions
diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml
index ed97d539c..7e206ded1 100644
--- a/roles/docker/defaults/main.yml
+++ b/roles/docker/defaults/main.yml
@@ -1 +1,6 @@
---
+docker_cli_auth_config_path: '/root/.docker'
+
+oreg_url: ''
+oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}"
+oreg_auth_credentials_replace: False
diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml
index bc52ab60c..16aea5067 100644
--- a/roles/docker/tasks/package_docker.yml
+++ b/roles/docker/tasks/package_docker.yml
@@ -3,6 +3,8 @@
command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker"
when: not openshift.common.is_atomic | bool
register: curr_docker_version
+ retries: 4
+ until: curr_docker_version | succeeded
changed_when: false
- name: Error out if Docker pre-installed but too old
@@ -117,6 +119,18 @@
notify:
- restart docker
+- name: Check for credentials file for registry auth
+ stat:
+ path: "{{ docker_cli_auth_config_path }}/config.json"
+ when: oreg_auth_user is defined
+ register: docker_cli_auth_credentials_stat
+
+- name: Create credentials for docker cli registry auth
+ command: "docker --config={{ docker_cli_auth_config_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
+ when:
+ - oreg_auth_user is defined
+ - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+
- name: Start the Docker service
systemd:
name: docker
diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml
index 24ca0d9f8..0bab0899c 100644
--- a/roles/docker/tasks/systemcontainer_crio.yml
+++ b/roles/docker/tasks/systemcontainer_crio.yml
@@ -95,7 +95,7 @@
- name: Set to default prepend
set_fact:
l_crio_image_prepend: "docker.io/gscrivano"
- l_crio_image_name: "crio-o-fedora"
+ l_crio_image_name: "cri-o-fedora"
- name: Use Centos based image when distribution is CentOS
set_fact:
diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml
index 57a84bc2c..146e5f430 100644
--- a/roles/docker/tasks/systemcontainer_docker.yml
+++ b/roles/docker/tasks/systemcontainer_docker.yml
@@ -100,18 +100,22 @@
l_docker_image_prepend: "registry.fedoraproject.org/f25"
when: ansible_distribution == 'Fedora'
- # For https://github.com/openshift/openshift-ansible/pull/4049#discussion_r114478504
- - name: Use a testing registry if requested
- set_fact:
- l_docker_image_prepend: "{{ openshift_docker_systemcontainer_image_registry_override }}"
- when:
- - openshift_docker_systemcontainer_image_registry_override is defined
- - openshift_docker_systemcontainer_image_registry_override != ""
-
- name: Set the full image name
set_fact:
l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift.docker.service_name }}:latest"
+ # For https://github.com/openshift/openshift-ansible/pull/5354#issuecomment-328552959
+ - name: Use a specific image if requested
+ set_fact:
+ l_docker_image: "{{ openshift_docker_systemcontainer_image_override }}"
+ when:
+ - openshift_docker_systemcontainer_image_override is defined
+ - openshift_docker_systemcontainer_image_override != ""
+
+ # Be nice and let the user see the variable result
+ - debug:
+ var: l_docker_image
+
# NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released
- name: Pre-pull Container Engine System Container image
command: "atomic pull --storage ostree {{ l_docker_image }}"
diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml
index 89993f7ea..b67411f40 100644
--- a/roles/etcd_common/defaults/main.yml
+++ b/roles/etcd_common/defaults/main.yml
@@ -56,7 +56,7 @@ etcd_is_containerized: False
etcd_is_thirdparty: False
# etcd dir vars
-etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' if openshift.common.etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/' }}"
+etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' if r_etcd_common_etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/' }}"
# etcd ports and protocols
etcd_client_port: 2379
diff --git a/roles/etcd_common/tasks/backup.yml b/roles/etcd_common/tasks/backup.yml
index 2bc486d3f..42d27c081 100644
--- a/roles/etcd_common/tasks/backup.yml
+++ b/roles/etcd_common/tasks/backup.yml
@@ -29,7 +29,6 @@
- name: Check current etcd disk usage
shell: du --exclude='*openshift-backup*' -k {{ l_etcd_data_dir }} | tail -n 1 | cut -f1
register: l_etcd_disk_usage
- when: r_etcd_common_embedded_etcd | bool
# AUDIT:changed_when: `false` because we are only inspecting
# state, not manipulating anything
changed_when: false
@@ -37,9 +36,9 @@
- name: Abort if insufficient disk space for etcd backup
fail:
msg: >
- {{ l_etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
+ {{ l_etcd_disk_usage.stdout|int*2 }} Kb disk space required for etcd backup,
{{ l_avail_disk.stdout }} Kb available.
- when: (r_etcd_common_embedded_etcd | bool) and (l_etcd_disk_usage.stdout|int > l_avail_disk.stdout|int)
+ when: l_etcd_disk_usage.stdout|int*2 > l_avail_disk.stdout|int
# For non containerized and non embedded we should have the correct version of
# etcd installed already. So don't do anything.
diff --git a/roles/etcd_common/tasks/main.yml b/roles/etcd_common/tasks/main.yml
index f5bcd03ee..6ed87e6c7 100644
--- a/roles/etcd_common/tasks/main.yml
+++ b/roles/etcd_common/tasks/main.yml
@@ -6,4 +6,4 @@
- name: Include main action task file
include: "{{ r_etcd_common_action }}.yml"
- when: '"noop" not in r_etcd_common_action'
+ when: r_etcd_common_action != "noop"
diff --git a/roles/etcd_common/tasks/noop.yml b/roles/etcd_common/tasks/noop.yml
new file mode 100644
index 000000000..a88d78235
--- /dev/null
+++ b/roles/etcd_common/tasks/noop.yml
@@ -0,0 +1,4 @@
+---
+# This is file is here because the usage of tags, specifically `pre_upgrade`
+# breaks the functionality of this role.
+# See https://bugzilla.redhat.com/show_bug.cgi?id=1464025
diff --git a/roles/lib_utils/library/repoquery.py b/roles/lib_utils/library/repoquery.py
index 95a305b58..e5ac1f74f 100644
--- a/roles/lib_utils/library/repoquery.py
+++ b/roles/lib_utils/library/repoquery.py
@@ -35,6 +35,7 @@ import os # noqa: F401
import re # noqa: F401
import shutil # noqa: F401
import tempfile # noqa: F401
+import time # noqa: F401
try:
import ruamel.yaml as yaml # noqa: F401
@@ -618,17 +619,22 @@ def main():
show_duplicates=dict(default=False, required=False, type='bool'),
match_version=dict(default=None, required=False, type='str'),
ignore_excluders=dict(default=False, required=False, type='bool'),
+ retries=dict(default=4, required=False, type='int'),
+ retry_interval=dict(default=5, required=False, type='int'),
),
supports_check_mode=False,
required_if=[('show_duplicates', True, ['name'])],
)
- rval = Repoquery.run_ansible(module.params, module.check_mode)
-
- if 'failed' in rval:
- module.fail_json(**rval)
-
- module.exit_json(**rval)
+ tries = 1
+ while True:
+ rval = Repoquery.run_ansible(module.params, module.check_mode)
+ if 'failed' not in rval:
+ module.exit_json(**rval)
+ elif tries > module.params['retries']:
+ module.fail_json(**rval)
+ tries += 1
+ time.sleep(module.params['retry_interval'])
if __name__ == "__main__":
diff --git a/roles/lib_utils/library/yedit.py b/roles/lib_utils/library/yedit.py
index baf72fe47..921bca074 100644
--- a/roles/lib_utils/library/yedit.py
+++ b/roles/lib_utils/library/yedit.py
@@ -35,6 +35,7 @@ import os # noqa: F401
import re # noqa: F401
import shutil # noqa: F401
import tempfile # noqa: F401
+import time # noqa: F401
try:
import ruamel.yaml as yaml # noqa: F401
diff --git a/roles/lib_utils/src/ansible/repoquery.py b/roles/lib_utils/src/ansible/repoquery.py
index 40773b1c1..5f5b93639 100644
--- a/roles/lib_utils/src/ansible/repoquery.py
+++ b/roles/lib_utils/src/ansible/repoquery.py
@@ -19,17 +19,22 @@ def main():
show_duplicates=dict(default=False, required=False, type='bool'),
match_version=dict(default=None, required=False, type='str'),
ignore_excluders=dict(default=False, required=False, type='bool'),
+ retries=dict(default=4, required=False, type='int'),
+ retry_interval=dict(default=5, required=False, type='int'),
),
supports_check_mode=False,
required_if=[('show_duplicates', True, ['name'])],
)
- rval = Repoquery.run_ansible(module.params, module.check_mode)
-
- if 'failed' in rval:
- module.fail_json(**rval)
-
- module.exit_json(**rval)
+ tries = 1
+ while True:
+ rval = Repoquery.run_ansible(module.params, module.check_mode)
+ if 'failed' not in rval:
+ module.exit_json(**rval)
+ elif tries > module.params['retries']:
+ module.fail_json(**rval)
+ tries += 1
+ time.sleep(module.params['retry_interval'])
if __name__ == "__main__":
diff --git a/roles/lib_utils/src/lib/import.py b/roles/lib_utils/src/lib/import.py
index 567f8c9e0..07a04b7ae 100644
--- a/roles/lib_utils/src/lib/import.py
+++ b/roles/lib_utils/src/lib/import.py
@@ -10,6 +10,7 @@ import os # noqa: F401
import re # noqa: F401
import shutil # noqa: F401
import tempfile # noqa: F401
+import time # noqa: F401
try:
import ruamel.yaml as yaml # noqa: F401
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index ebfa6bb8f..517e0231d 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -1602,11 +1602,13 @@ def set_builddefaults_facts(facts):
builddefaults['git_no_proxy'] = builddefaults['no_proxy']
# If we're actually defining a builddefaults config then create admission_plugin_config
# then merge builddefaults[config] structure into admission_plugin_config
+
+ # 'config' is the 'openshift_builddefaults_json' inventory variable
if 'config' in builddefaults:
if 'admission_plugin_config' not in facts['master']:
- facts['master']['admission_plugin_config'] = dict()
+ # Scaffold out the full expected datastructure
+ facts['master']['admission_plugin_config'] = {'BuildDefaults': {'configuration': {'env': {}}}}
facts['master']['admission_plugin_config'].update(builddefaults['config'])
- # if the user didn't actually provide proxy values, delete the proxy env variable defaults.
delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env'])
return facts
diff --git a/roles/openshift_gcp/tasks/main.yaml b/roles/openshift_gcp/tasks/main.yaml
new file mode 100644
index 000000000..ad205ba33
--- /dev/null
+++ b/roles/openshift_gcp/tasks/main.yaml
@@ -0,0 +1,43 @@
+#
+# This role relies on gcloud invoked via templated bash in order to
+# provide a high performance deployment option. The next logical step
+# is to transition to a deployment manager template which is then instantiated.
+# TODO: use a formal set of role parameters consistent with openshift_aws
+#
+---
+- name: Templatize DNS script
+ template: src=dns.j2.sh dest=/tmp/openshift_gcp_provision_dns.sh mode=u+rx
+- name: Templatize provision script
+ template: src=provision.j2.sh dest=/tmp/openshift_gcp_provision.sh mode=u+rx
+- name: Templatize de-provision script
+ template: src=remove.j2.sh dest=/tmp/openshift_gcp_provision_remove.sh mode=u+rx
+ when:
+ - state | default('present') == 'absent'
+
+- name: Provision GCP DNS domain
+ command: /tmp/openshift_gcp_provision_dns.sh
+ args:
+ chdir: "{{ playbook_dir }}/files"
+ register: dns_provision
+ when:
+ - state | default('present') == 'present'
+
+- name: Ensure that DNS resolves to the hosted zone
+ assert:
+ that:
+ - "lookup('dig', public_hosted_zone, 'qtype=NS', wantlist=True) | sort | join(',') == dns_provision.stdout"
+ msg: "The DNS domain {{ public_hosted_zone }} defined in 'public_hosted_zone' must have NS records pointing to the Google nameservers: '{{ dns_provision.stdout }}' instead of '{{ lookup('dig', public_hosted_zone, 'qtype=NS') }}'."
+ when:
+ - state | default('present') == 'present'
+
+- name: Provision GCP resources
+ command: /tmp/openshift_gcp_provision.sh
+ args:
+ chdir: "{{ playbook_dir }}/files"
+ when:
+ - state | default('present') == 'present'
+
+- name: De-provision GCP resources
+ command: /tmp/openshift_gcp_provision_remove.sh
+ when:
+ - state | default('present') == 'absent'
diff --git a/roles/openshift_gcp/templates/dns.j2.sh b/roles/openshift_gcp/templates/dns.j2.sh
new file mode 100644
index 000000000..eacf84b4d
--- /dev/null
+++ b/roles/openshift_gcp/templates/dns.j2.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+set -euo pipefail
+
+dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}"
+
+# Check the DNS managed zone in Google Cloud DNS, create it if it doesn't exist
+if ! gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" dns managed-zones create "${dns_zone}" --dns-name "{{ public_hosted_zone }}" --description "{{ public_hosted_zone }} domain" >/dev/null
+fi
+
+# Always output the expected nameservers as a comma delimited list
+gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" --format='value(nameServers)' | tr ';' ','
diff --git a/roles/openshift_gcp/templates/provision.j2.sh b/roles/openshift_gcp/templates/provision.j2.sh
new file mode 100644
index 000000000..e68e9683f
--- /dev/null
+++ b/roles/openshift_gcp/templates/provision.j2.sh
@@ -0,0 +1,318 @@
+#!/bin/bash
+
+set -euo pipefail
+
+# Create SSH key for GCE
+if [ ! -f "{{ gce_ssh_private_key }}" ]; then
+ ssh-keygen -t rsa -f "{{ gce_ssh_private_key }}" -C gce-provision-cloud-user -N ''
+ ssh-add "{{ gce_ssh_private_key }}" || true
+fi
+
+# Check if the ~/.ssh/google_compute_engine.pub key is in the project metadata, and if not, add it there
+pub_key=$(cut -d ' ' -f 2 < "{{ gce_ssh_private_key }}.pub")
+key_tmp_file='/tmp/ocp-gce-keys'
+if ! gcloud --project "{{ gce_project_id }}" compute project-info describe | grep -q "$pub_key"; then
+ if gcloud --project "{{ gce_project_id }}" compute project-info describe | grep -q ssh-rsa; then
+ gcloud --project "{{ gce_project_id }}" compute project-info describe | grep ssh-rsa | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e 's/value: //' > "$key_tmp_file"
+ fi
+ echo -n 'cloud-user:' >> "$key_tmp_file"
+ cat "{{ gce_ssh_private_key }}.pub" >> "$key_tmp_file"
+ gcloud --project "{{ gce_project_id }}" compute project-info add-metadata --metadata-from-file "sshKeys=${key_tmp_file}"
+ rm -f "$key_tmp_file"
+fi
+
+metadata=""
+if [[ -n "{{ provision_gce_startup_script_file }}" ]]; then
+ if [[ ! -f "{{ provision_gce_startup_script_file }}" ]]; then
+ echo "Startup script file missing at {{ provision_gce_startup_script_file }} from=$(pwd)"
+ exit 1
+ fi
+ metadata+="--metadata-from-file=startup-script={{ provision_gce_startup_script_file }}"
+fi
+if [[ -n "{{ provision_gce_user_data_file }}" ]]; then
+ if [[ ! -f "{{ provision_gce_user_data_file }}" ]]; then
+ echo "User data file missing at {{ provision_gce_user_data_file }}"
+ exit 1
+ fi
+ if [[ -n "${metadata}" ]]; then
+ metadata+=","
+ else
+ metadata="--metadata-from-file="
+ fi
+ metadata+="user-data={{ provision_gce_user_data_file }}"
+fi
+
+# Select image or image family
+image="{{ provision_gce_registered_image }}"
+if ! gcloud --project "{{ gce_project_id }}" compute images describe "${image}" &>/dev/null; then
+ if ! gcloud --project "{{ gce_project_id }}" compute images describe-from-family "${image}" &>/dev/null; then
+ echo "No compute image or image-family found, create an image named '{{ provision_gce_registered_image }}' to continue'"
+ exit 1
+ fi
+ image="family/${image}"
+fi
+
+### PROVISION THE INFRASTRUCTURE ###
+
+dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}"
+
+# Check the DNS managed zone in Google Cloud DNS, create it if it doesn't exist and exit after printing NS servers
+if ! gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
+ echo "DNS zone '${dns_zone}' doesn't exist. Must be configured prior to running this script"
+ exit 1
+fi
+
+# Create network
+if ! gcloud --project "{{ gce_project_id }}" compute networks describe "{{ gce_network_name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute networks create "{{ gce_network_name }}" --mode "auto"
+else
+ echo "Network '{{ gce_network_name }}' already exists"
+fi
+
+# Firewall rules in a form:
+# ['name']='parameters for "gcloud compute firewall-rules create"'
+# For all possible parameters see: gcloud compute firewall-rules create --help
+range=""
+if [[ -n "{{ openshift_node_port_range }}" ]]; then
+ range=",tcp:{{ openshift_node_port_range }},udp:{{ openshift_node_port_range }}"
+fi
+declare -A FW_RULES=(
+ ['icmp']='--allow icmp'
+ ['ssh-external']='--allow tcp:22'
+ ['ssh-internal']='--allow tcp:22 --source-tags bastion'
+ ['master-internal']="--allow tcp:2224,tcp:2379,tcp:2380,tcp:4001,udp:4789,udp:5404,udp:5405,tcp:8053,udp:8053,tcp:8444,tcp:10250,tcp:10255,udp:10255,tcp:24224,udp:24224 --source-tags ocp --target-tags ocp-master"
+ ['master-external']="--allow tcp:80,tcp:443,tcp:1936,tcp:8080,tcp:8443${range} --target-tags ocp-master"
+ ['node-internal']="--allow udp:4789,tcp:10250,tcp:10255,udp:10255 --source-tags ocp --target-tags ocp-node,ocp-infra-node"
+ ['infra-node-internal']="--allow tcp:5000 --source-tags ocp --target-tags ocp-infra-node"
+ ['infra-node-external']="--allow tcp:80,tcp:443,tcp:1936${range} --target-tags ocp-infra-node"
+)
+for rule in "${!FW_RULES[@]}"; do
+ ( if ! gcloud --project "{{ gce_project_id }}" compute firewall-rules describe "{{ provision_prefix }}$rule" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute firewall-rules create "{{ provision_prefix }}$rule" --network "{{ gce_network_name }}" ${FW_RULES[$rule]}
+ else
+ echo "Firewall rule '{{ provision_prefix }}${rule}' already exists"
+ fi ) &
+done
+
+
+# Master IP
+( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}master-ssl-lb-ip" --global
+else
+ echo "IP '{{ provision_prefix }}master-ssl-lb-ip' already exists"
+fi ) &
+
+# Internal master IP
+( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}"
+else
+ echo "IP '{{ provision_prefix }}master-network-lb-ip' already exists"
+fi ) &
+
+# Router IP
+( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}"
+else
+ echo "IP '{{ provision_prefix }}router-network-lb-ip' already exists"
+fi ) &
+
+
+{% for node_group in provision_gce_node_groups %}
+# configure {{ node_group.name }}
+(
+ if ! gcloud --project "{{ gce_project_id }}" compute instance-templates describe "{{ provision_prefix }}instance-template-{{ node_group.name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute instance-templates create "{{ provision_prefix }}instance-template-{{ node_group.name }}" \
+ --machine-type "{{ node_group.machine_type }}" --network "{{ gce_network_name }}" \
+ --tags "{{ provision_prefix }}ocp,ocp,{{ node_group.tags }}" \
+ --boot-disk-size "{{ node_group.boot_disk_size }}" --boot-disk-type "pd-ssd" \
+ --scopes "logging-write,monitoring-write,useraccounts-ro,service-control,service-management,storage-ro,compute-rw" \
+ --image "${image}" ${metadata}
+ else
+ echo "Instance template '{{ provision_prefix }}instance-template-{{ node_group.name }}' already exists"
+ fi
+
+ # Create instance group
+ if ! gcloud --project "{{ gce_project_id }}" compute instance-groups managed describe "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute instance-groups managed create "{{ provision_prefix }}ig-{{ node_group.suffix }}" \
+ --zone "{{ gce_zone_name }}" --template "{{ provision_prefix }}instance-template-{{ node_group.name }}" --size "{{ node_group.scale }}"
+ else
+ echo "Instance group '{{ provision_prefix }}ig-{{ node_group.suffix }}' already exists"
+ fi
+) &
+{% endfor %}
+
+for i in `jobs -p`; do wait $i; done
+
+
+# Configure the master external LB rules
+(
+# Master health check
+if ! gcloud --project "{{ gce_project_id }}" compute health-checks describe "{{ provision_prefix }}master-ssl-lb-health-check" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute health-checks create https "{{ provision_prefix }}master-ssl-lb-health-check" --port "{{ internal_console_port }}" --request-path "/healthz"
+else
+ echo "Health check '{{ provision_prefix }}master-ssl-lb-health-check' already exists"
+fi
+
+gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-named-ports "{{ provision_prefix }}ig-m" \
+ --zone "{{ gce_zone_name }}" --named-ports "{{ provision_prefix }}port-name-master:{{ internal_console_port }}"
+
+# Master backend service
+if ! gcloud --project "{{ gce_project_id }}" compute backend-services describe "{{ provision_prefix }}master-ssl-lb-backend" --global &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute backend-services create "{{ provision_prefix }}master-ssl-lb-backend" --health-checks "{{ provision_prefix }}master-ssl-lb-health-check" --port-name "{{ provision_prefix }}port-name-master" --protocol "TCP" --global --timeout="{{ provision_gce_master_https_timeout | default('2m') }}"
+ gcloud --project "{{ gce_project_id }}" compute backend-services add-backend "{{ provision_prefix }}master-ssl-lb-backend" --instance-group "{{ provision_prefix }}ig-m" --global --instance-group-zone "{{ gce_zone_name }}"
+else
+ echo "Backend service '{{ provision_prefix }}master-ssl-lb-backend' already exists"
+fi
+
+# Master tcp proxy target
+if ! gcloud --project "{{ gce_project_id }}" compute target-tcp-proxies describe "{{ provision_prefix }}master-ssl-lb-target" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute target-tcp-proxies create "{{ provision_prefix }}master-ssl-lb-target" --backend-service "{{ provision_prefix }}master-ssl-lb-backend"
+else
+ echo "Proxy target '{{ provision_prefix }}master-ssl-lb-target' already exists"
+fi
+
+# Master forwarding rule
+if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}master-ssl-lb-rule" --global &>/dev/null; then
+ IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global --format='value(address)')
+ gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}master-ssl-lb-rule" --address "$IP" --global --ports "{{ console_port }}" --target-tcp-proxy "{{ provision_prefix }}master-ssl-lb-target"
+else
+ echo "Forwarding rule '{{ provision_prefix }}master-ssl-lb-rule' already exists"
+fi
+) &
+
+
+# Configure the master internal LB rules
+(
+# Internal master health check
+if ! gcloud --project "{{ gce_project_id }}" compute http-health-checks describe "{{ provision_prefix }}master-network-lb-health-check" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute http-health-checks create "{{ provision_prefix }}master-network-lb-health-check" --port "8080" --request-path "/healthz"
+else
+ echo "Health check '{{ provision_prefix }}master-network-lb-health-check' already exists"
+fi
+
+# Internal master target pool
+if ! gcloud --project "{{ gce_project_id }}" compute target-pools describe "{{ provision_prefix }}master-network-lb-pool" --region "{{ gce_region_name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute target-pools create "{{ provision_prefix }}master-network-lb-pool" --http-health-check "{{ provision_prefix }}master-network-lb-health-check" --region "{{ gce_region_name }}"
+else
+ echo "Target pool '{{ provision_prefix }}master-network-lb-pool' already exists"
+fi
+
+# Internal master forwarding rule
+if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}master-network-lb-rule" --region "{{ gce_region_name }}" &>/dev/null; then
+ IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)')
+ gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}master-network-lb-rule" --address "$IP" --region "{{ gce_region_name }}" --target-pool "{{ provision_prefix }}master-network-lb-pool"
+else
+ echo "Forwarding rule '{{ provision_prefix }}master-network-lb-rule' already exists"
+fi
+) &
+
+
+# Configure the infra node rules
+(
+# Router health check
+if ! gcloud --project "{{ gce_project_id }}" compute http-health-checks describe "{{ provision_prefix }}router-network-lb-health-check" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute http-health-checks create "{{ provision_prefix }}router-network-lb-health-check" --port "1936" --request-path "/healthz"
+else
+ echo "Health check '{{ provision_prefix }}router-network-lb-health-check' already exists"
+fi
+
+# Router target pool
+if ! gcloud --project "{{ gce_project_id }}" compute target-pools describe "{{ provision_prefix }}router-network-lb-pool" --region "{{ gce_region_name }}" &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" compute target-pools create "{{ provision_prefix }}router-network-lb-pool" --http-health-check "{{ provision_prefix }}router-network-lb-health-check" --region "{{ gce_region_name }}"
+else
+ echo "Target pool '{{ provision_prefix }}router-network-lb-pool' already exists"
+fi
+
+# Router forwarding rule
+if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}router-network-lb-rule" --region "{{ gce_region_name }}" &>/dev/null; then
+ IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)')
+ gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}router-network-lb-rule" --address "$IP" --region "{{ gce_region_name }}" --target-pool "{{ provision_prefix }}router-network-lb-pool"
+else
+ echo "Forwarding rule '{{ provision_prefix }}router-network-lb-rule' already exists"
+fi
+) &
+
+for i in `jobs -p`; do wait $i; done
+
+# set the target pools
+(
+if [[ "ig-m" == "{{ provision_gce_router_network_instance_group }}" ]]; then
+ gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}ig-m" --target-pools "{{ provision_prefix }}master-network-lb-pool,{{ provision_prefix }}router-network-lb-pool" --zone "{{ gce_zone_name }}"
+else
+ gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}ig-m" --target-pools "{{ provision_prefix }}master-network-lb-pool" --zone "{{ gce_zone_name }}"
+ gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}{{ provision_gce_router_network_instance_group }}" --target-pools "{{ provision_prefix }}router-network-lb-pool" --zone "{{ gce_zone_name }}"
+fi
+) &
+
+# configure DNS
+(
+# Retry DNS changes until they succeed since this may be a shared resource
+while true; do
+ dns="${TMPDIR:-/tmp}/dns.yaml"
+ rm -f $dns
+
+ # DNS record for master lb
+ if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_public_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_public_hostname }}"; then
+ IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global --format='value(address)')
+ if [[ ! -f $dns ]]; then
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
+ fi
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_public_hostname }}." --type A "$IP"
+ else
+ echo "DNS record for '{{ openshift_master_cluster_public_hostname }}' already exists"
+ fi
+
+ # DNS record for internal master lb
+ if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_hostname }}"; then
+ IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)')
+ if [[ ! -f $dns ]]; then
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
+ fi
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_hostname }}." --type A "$IP"
+ else
+ echo "DNS record for '{{ openshift_master_cluster_hostname }}' already exists"
+ fi
+
+ # DNS record for router lb
+ if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ wildcard_zone }}" 2>/dev/null | grep -q "{{ wildcard_zone }}"; then
+ IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)')
+ if [[ ! -f $dns ]]; then
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
+ fi
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ wildcard_zone }}." --type A "$IP"
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "*.{{ wildcard_zone }}." --type CNAME "{{ wildcard_zone }}."
+ else
+ echo "DNS record for '{{ wildcard_zone }}' already exists"
+ fi
+
+ # Commit all DNS changes, retrying if preconditions are not met
+ if [[ -f $dns ]]; then
+ if ! out="$( gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then
+ rc=$?
+ if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then
+ continue
+ fi
+ exit $rc
+ fi
+ fi
+ break
+done
+) &
+
+# Create bucket for registry
+(
+if ! gsutil ls -p "{{ gce_project_id }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}" &>/dev/null; then
+ gsutil mb -p "{{ gce_project_id }}" -l "{{ gce_region_name }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}"
+else
+ echo "Bucket '{{ openshift_hosted_registry_storage_gcs_bucket }}' already exists"
+fi
+) &
+
+# wait until all node groups are stable
+{% for node_group in provision_gce_node_groups %}
+# wait for stable {{ node_group.name }}
+( gcloud --project "{{ gce_project_id }}" compute instance-groups managed wait-until-stable "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --timeout=300) &
+{% endfor %}
+
+
+for i in `jobs -p`; do wait $i; done
diff --git a/roles/openshift_gcp/templates/remove.j2.sh b/roles/openshift_gcp/templates/remove.j2.sh
new file mode 100644
index 000000000..41ceab2b5
--- /dev/null
+++ b/roles/openshift_gcp/templates/remove.j2.sh
@@ -0,0 +1,156 @@
+#!/bin/bash
+
+set -euo pipefail
+
+function teardown_cmd() {
+ a=( $@ )
+ local name=$1
+ a=( "${a[@]:1}" )
+ local flag=0
+ local found=
+ for i in ${a[@]}; do
+ if [[ "$i" == "--"* ]]; then
+ found=true
+ break
+ fi
+ flag=$((flag+1))
+ done
+ if [[ -z "${found}" ]]; then
+ flag=$((flag+1))
+ fi
+ if gcloud --project "{{ gce_project_id }}" ${a[@]::$flag} describe "${name}" ${a[@]:$flag} &>/dev/null; then
+ gcloud --project "{{ gce_project_id }}" ${a[@]::$flag} delete -q "${name}" ${a[@]:$flag}
+ fi
+}
+
+function teardown() {
+ for i in `seq 1 20`; do
+ if teardown_cmd $@; then
+ break
+ fi
+ sleep 0.5
+ done
+}
+
+# Preemptively spin down the instances
+{% for node_group in provision_gce_node_groups %}
+# scale down {{ node_group.name }}
+(
+ # performs a delete and scale down as one operation to ensure maximum parallelism
+ if ! instances=$( gcloud --project "{{ gce_project_id }}" compute instance-groups managed list-instances "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --format='value[terminator=","](instance)' ); then
+ exit 0
+ fi
+ instances="${instances%?}"
+ if [[ -z "${instances}" ]]; then
+ echo "warning: No instances in {{ node_group.name }}" 1>&2
+ exit 0
+ fi
+ if ! gcloud --project "{{ gce_project_id }}" compute instance-groups managed delete-instances "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --instances "${instances}"; then
+ echo "warning: Unable to scale down the node group {{ node_group.name }}" 1>&2
+ exit 0
+ fi
+) &
+{% endfor %}
+
+# Bucket for registry
+(
+if gsutil ls -p "{{ gce_project_id }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}" &>/dev/null; then
+ gsutil -m rm -r "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}"
+fi
+) &
+
+# DNS
+(
+dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}"
+if gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
+ # Retry DNS changes until they succeed since this may be a shared resource
+ while true; do
+ dns="${TMPDIR:-/tmp}/dns.yaml"
+ rm -f "${dns}"
+
+ # export all dns records that match into a zone format, and turn each line into a set of args for
+ # record-sets transaction.
+ gcloud dns record-sets export --project "{{ gce_project_id }}" -z "${dns_zone}" --zone-file-format "${dns}"
+ if grep -F -e '{{ openshift_master_cluster_hostname }}' -e '{{ openshift_master_cluster_public_hostname }}' -e '{{ wildcard_zone }}' "${dns}" | \
+ awk '{ print "--name", $1, "--ttl", $2, "--type", $4, $5; }' > "${dns}.input"
+ then
+ rm -f "${dns}"
+ gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
+ cat "${dns}.input" | xargs -L1 gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file="${dns}" remove -z "${dns_zone}"
+
+ # Commit all DNS changes, retrying if preconditions are not met
+ if ! out="$( gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then
+ rc=$?
+ if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then
+ continue
+ fi
+ exit $rc
+ fi
+ fi
+ rm "${dns}.input"
+ break
+ done
+fi
+) &
+
+(
+# Router network rules
+teardown "{{ provision_prefix }}router-network-lb-rule" compute forwarding-rules --region "{{ gce_region_name }}"
+teardown "{{ provision_prefix }}router-network-lb-pool" compute target-pools --region "{{ gce_region_name }}"
+teardown "{{ provision_prefix }}router-network-lb-health-check" compute http-health-checks
+teardown "{{ provision_prefix }}router-network-lb-ip" compute addresses --region "{{ gce_region_name }}"
+
+# Internal master network rules
+teardown "{{ provision_prefix }}master-network-lb-rule" compute forwarding-rules --region "{{ gce_region_name }}"
+teardown "{{ provision_prefix }}master-network-lb-pool" compute target-pools --region "{{ gce_region_name }}"
+teardown "{{ provision_prefix }}master-network-lb-health-check" compute http-health-checks
+teardown "{{ provision_prefix }}master-network-lb-ip" compute addresses --region "{{ gce_region_name }}"
+) &
+
+(
+# Master SSL network rules
+teardown "{{ provision_prefix }}master-ssl-lb-rule" compute forwarding-rules --global
+teardown "{{ provision_prefix }}master-ssl-lb-target" compute target-tcp-proxies
+teardown "{{ provision_prefix }}master-ssl-lb-ip" compute addresses --global
+teardown "{{ provision_prefix }}master-ssl-lb-backend" compute backend-services --global
+teardown "{{ provision_prefix }}master-ssl-lb-health-check" compute health-checks
+) &
+
+#Firewall rules
+#['name']='parameters for "gcloud compute firewall-rules create"'
+#For all possible parameters see: gcloud compute firewall-rules create --help
+declare -A FW_RULES=(
+ ['icmp']=""
+ ['ssh-external']=""
+ ['ssh-internal']=""
+ ['master-internal']=""
+ ['master-external']=""
+ ['node-internal']=""
+ ['infra-node-internal']=""
+ ['infra-node-external']=""
+)
+for rule in "${!FW_RULES[@]}"; do
+ ( if gcloud --project "{{ gce_project_id }}" compute firewall-rules describe "{{ provision_prefix }}$rule" &>/dev/null; then
+ # retry a few times because this call can be flaky
+ for i in `seq 1 3`; do
+ if gcloud -q --project "{{ gce_project_id }}" compute firewall-rules delete "{{ provision_prefix }}$rule"; then
+ break
+ fi
+ done
+ fi ) &
+done
+
+for i in `jobs -p`; do wait $i; done
+
+{% for node_group in provision_gce_node_groups %}
+# teardown {{ node_group.name }} - any load balancers referencing these groups must be removed
+(
+ teardown "{{ provision_prefix }}ig-{{ node_group.suffix }}" compute instance-groups managed --zone "{{ gce_zone_name }}"
+ teardown "{{ provision_prefix }}instance-template-{{ node_group.name }}" compute instance-templates
+) &
+{% endfor %}
+
+for i in `jobs -p`; do wait $i; done
+
+# Network
+teardown "{{ gce_network_name }}" compute networks
diff --git a/roles/openshift_gcp_image_prep/files/partition.conf b/roles/openshift_gcp_image_prep/files/partition.conf
new file mode 100644
index 000000000..b87e5e0b6
--- /dev/null
+++ b/roles/openshift_gcp_image_prep/files/partition.conf
@@ -0,0 +1,3 @@
+[Service]
+ExecStartPost=-/usr/bin/growpart /dev/sda 1
+ExecStartPost=-/sbin/xfs_growfs /
diff --git a/roles/openshift_gcp_image_prep/tasks/main.yaml b/roles/openshift_gcp_image_prep/tasks/main.yaml
new file mode 100644
index 000000000..fee5ab618
--- /dev/null
+++ b/roles/openshift_gcp_image_prep/tasks/main.yaml
@@ -0,0 +1,18 @@
+---
+# GCE instances are starting with xfs AND barrier=1, which is only for extfs.
+- name: Remove barrier=1 from XFS fstab entries
+ lineinfile:
+ path: /etc/fstab
+ regexp: '^(.+)xfs(.+?),?barrier=1,?(.*?)$'
+ line: '\1xfs\2 \4'
+ backrefs: yes
+
+- name: Ensure the root filesystem has XFS group quota turned on
+ lineinfile:
+ path: /boot/grub2/grub.cfg
+ regexp: '^(.*)linux16 (.*)$'
+ line: '\1linux16 \2 rootflags=gquota'
+ backrefs: yes
+
+- name: Ensure the root partition grows on startup
+ copy: src=partition.conf dest=/etc/systemd/system/google-instance-setup.service.d/
diff --git a/roles/openshift_health_checker/action_plugins/openshift_health_check.py b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
index 8d35db6b5..326176273 100644
--- a/roles/openshift_health_checker/action_plugins/openshift_health_check.py
+++ b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
@@ -3,7 +3,10 @@ Ansible action plugin to execute health checks in OpenShift clusters.
"""
import sys
import os
+import base64
import traceback
+import errno
+import json
from collections import defaultdict
from ansible.plugins.action import ActionBase
@@ -38,8 +41,13 @@ class ActionModule(ActionBase):
# storing the information we need in the result.
result['playbook_context'] = task_vars.get('r_openshift_health_checker_playbook_context')
+ # if the user wants to write check results to files, they provide this directory:
+ output_dir = task_vars.get("openshift_checks_output_dir")
+ if output_dir:
+ output_dir = os.path.join(output_dir, task_vars["ansible_host"])
+
try:
- known_checks = self.load_known_checks(tmp, task_vars)
+ known_checks = self.load_known_checks(tmp, task_vars, output_dir)
args = self._task.args
requested_checks = normalize(args.get('checks', []))
@@ -65,21 +73,20 @@ class ActionModule(ActionBase):
for name in resolved_checks:
display.banner("CHECK [{} : {}]".format(name, task_vars["ansible_host"]))
- check = known_checks[name]
- check_results[name] = run_check(name, check, user_disabled_checks)
- if check.changed:
- check_results[name]["changed"] = True
+ check_results[name] = run_check(name, known_checks[name], user_disabled_checks, output_dir)
result["changed"] = any(r.get("changed") for r in check_results.values())
if any(r.get("failed") for r in check_results.values()):
result["failed"] = True
result["msg"] = "One or more checks failed"
+ write_result_to_output_dir(output_dir, result)
return result
- def load_known_checks(self, tmp, task_vars):
+ def load_known_checks(self, tmp, task_vars, output_dir=None):
"""Find all existing checks and return a mapping of names to instances."""
load_checks()
+ want_full_results = bool(output_dir)
known_checks = {}
for cls in OpenShiftCheck.subclasses():
@@ -90,7 +97,12 @@ class ActionModule(ActionBase):
"duplicate check name '{}' in: '{}' and '{}'"
"".format(name, full_class_name(cls), full_class_name(other_cls))
)
- known_checks[name] = cls(execute_module=self._execute_module, tmp=tmp, task_vars=task_vars)
+ known_checks[name] = cls(
+ execute_module=self._execute_module,
+ tmp=tmp,
+ task_vars=task_vars,
+ want_full_results=want_full_results
+ )
return known_checks
@@ -185,9 +197,11 @@ def normalize(checks):
return [name.strip() for name in checks if name.strip()]
-def run_check(name, check, user_disabled_checks):
+def run_check(name, check, user_disabled_checks, output_dir=None):
"""Run a single check if enabled and return a result dict."""
- if name in user_disabled_checks:
+
+ # determine if we're going to run the check (not inactive or disabled)
+ if name in user_disabled_checks or '*' in user_disabled_checks:
return dict(skipped=True, skipped_reason="Disabled by user request")
# pylint: disable=broad-except; capturing exceptions broadly is intentional,
@@ -201,12 +215,134 @@ def run_check(name, check, user_disabled_checks):
if not is_active:
return dict(skipped=True, skipped_reason="Not active for this host")
+ # run the check
+ result = {}
try:
- return check.run()
+ result = check.run()
except OpenShiftCheckException as exc:
- return dict(failed=True, msg=str(exc))
+ check.register_failure(exc)
+ except Exception as exc:
+ check.register_failure("\n".join([str(exc), traceback.format_exc()]))
+
+ # process the check state; compose the result hash, write files as needed
+ if check.changed:
+ result["changed"] = True
+ if check.failures or result.get("failed"):
+ if "msg" in result: # failure result has msg; combine with any registered failures
+ check.register_failure(result.get("msg"))
+ result["failures"] = [(fail.name, str(fail)) for fail in check.failures]
+ result["failed"] = True
+ result["msg"] = "\n".join(str(fail) for fail in check.failures)
+ write_to_output_file(output_dir, name + ".failures.json", result["failures"])
+ if check.logs:
+ write_to_output_file(output_dir, name + ".log.json", check.logs)
+ if check.files_to_save:
+ write_files_to_save(output_dir, check)
+
+ return result
+
+
+def prepare_output_dir(dirname):
+ """Create the directory, including parents. Return bool for success/failure."""
+ try:
+ os.makedirs(dirname)
+ return True
+ except OSError as exc:
+ # trying to create existing dir leads to error;
+ # that error is fine, but for any other, assume the dir is not there
+ return exc.errno == errno.EEXIST
+
+
+def copy_remote_file_to_dir(check, file_to_save, output_dir, fname):
+ """Copy file from remote host to local file in output_dir, if given."""
+ if not output_dir or not prepare_output_dir(output_dir):
+ return
+ local_file = os.path.join(output_dir, fname)
+
+ # pylint: disable=broad-except; do not need to do anything about failure to write dir/file
+ # and do not want exceptions to break anything.
+ try:
+ # NOTE: it would have been nice to copy the file directly without loading it into
+ # memory, but there does not seem to be a good way to do this via ansible.
+ result = check.execute_module("slurp", dict(src=file_to_save), register=False)
+ if result.get("failed"):
+ display.warning("Could not retrieve file {}: {}".format(file_to_save, result.get("msg")))
+ return
+
+ content = result["content"]
+ if result.get("encoding") == "base64":
+ content = base64.b64decode(content)
+ with open(local_file, "wb") as outfile:
+ outfile.write(content)
+ except Exception as exc:
+ display.warning("Failed writing remote {} to local {}: {}".format(file_to_save, local_file, exc))
+ return
+
+
+def _no_fail(obj):
+ # pylint: disable=broad-except; do not want serialization to fail for any reason
+ try:
+ return str(obj)
+ except Exception:
+ return "[not serializable]"
+
+
+def write_to_output_file(output_dir, filename, data):
+ """If output_dir provided, write data to file. Serialize as JSON if data is not a string."""
+
+ if not output_dir or not prepare_output_dir(output_dir):
+ return
+ filename = os.path.join(output_dir, filename)
+ try:
+ with open(filename, 'w') as outfile:
+ if isinstance(data, string_types):
+ outfile.write(data)
+ else:
+ json.dump(data, outfile, sort_keys=True, indent=4, default=_no_fail)
+ # pylint: disable=broad-except; do not want serialization/write to break for any reason
+ except Exception as exc:
+ display.warning("Could not write output file {}: {}".format(filename, exc))
+
+
+def write_result_to_output_dir(output_dir, result):
+ """If output_dir provided, write the result as json to result.json.
+
+ Success/failure of the write is recorded as "output_files" in the result hash afterward.
+ Otherwise this is much like write_to_output_file.
+ """
+
+ if not output_dir:
+ return
+ if not prepare_output_dir(output_dir):
+ result["output_files"] = "Error creating output directory " + output_dir
+ return
+
+ filename = os.path.join(output_dir, "result.json")
+ try:
+ with open(filename, 'w') as outfile:
+ json.dump(result, outfile, sort_keys=True, indent=4, default=_no_fail)
+ result["output_files"] = "Check results for this host written to " + filename
+ # pylint: disable=broad-except; do not want serialization/write to break for any reason
except Exception as exc:
- return dict(failed=True, msg=str(exc), exception=traceback.format_exc())
+ result["output_files"] = "Error writing check results to {}:\n{}".format(filename, exc)
+
+
+def write_files_to_save(output_dir, check):
+ """Write files to check subdir in output dir."""
+ if not output_dir:
+ return
+ output_dir = os.path.join(output_dir, check.name)
+ seen_file = defaultdict(lambda: 0)
+ for file_to_save in check.files_to_save:
+ fname = file_to_save.filename
+ while seen_file[fname]: # just to be sure we never re-write a file, append numbers as needed
+ seen_file[fname] += 1
+ fname = "{}.{}".format(fname, seen_file[fname])
+ seen_file[fname] += 1
+ if file_to_save.remote_filename:
+ copy_remote_file_to_dir(check, file_to_save.remote_filename, output_dir, fname)
+ else:
+ write_to_output_file(output_dir, fname, file_to_save.contents)
def full_class_name(cls):
diff --git a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
index 349655966..dcaf87eca 100644
--- a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
+++ b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
@@ -10,6 +10,7 @@ import traceback
from ansible.plugins.callback import CallbackBase
from ansible import constants as C
from ansible.utils.color import stringc
+from ansible.module_utils.six import string_types
FAILED_NO_MSG = u'Failed without returning a message.'
@@ -140,11 +141,19 @@ def deduplicate_failures(failures):
Returns a new list of failures such that identical failures from different
hosts are grouped together in a single entry. The relative order of failures
is preserved.
+
+ If failures is unhashable, the original list of failures is returned.
"""
groups = defaultdict(list)
for failure in failures:
group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host'))
- groups[group_key].append(failure)
+ try:
+ groups[group_key].append(failure)
+ except TypeError:
+ # abort and return original list of failures when failures has an
+ # unhashable type.
+ return failures
+
result = []
for failure in failures:
group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host'))
@@ -159,7 +168,10 @@ def format_failure(failure):
"""Return a list of pretty-formatted text entries describing a failure, including
relevant information about it. Expect that the list of text entries will be joined
by a newline separator when output to the user."""
- host = u', '.join(failure['host'])
+ if isinstance(failure['host'], string_types):
+ host = failure['host']
+ else:
+ host = u', '.join(failure['host'])
play = failure['play']
task = failure['task']
msg = failure['msg']
diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py
index 02ee1d0f9..28cb53cc5 100644
--- a/roles/openshift_health_checker/openshift_checks/__init__.py
+++ b/roles/openshift_health_checker/openshift_checks/__init__.py
@@ -2,8 +2,11 @@
Health checks for OpenShift clusters.
"""
+import json
import operator
import os
+import time
+import collections
from abc import ABCMeta, abstractmethod, abstractproperty
from importlib import import_module
@@ -27,7 +30,7 @@ class OpenShiftCheckException(Exception):
class OpenShiftCheckExceptionList(OpenShiftCheckException):
- """A container for multiple logging errors that may be detected in one check."""
+ """A container for multiple errors that may be detected in one check."""
def __init__(self, errors):
self.errors = errors
super(OpenShiftCheckExceptionList, self).__init__(
@@ -40,26 +43,53 @@ class OpenShiftCheckExceptionList(OpenShiftCheckException):
return self.errors[index]
+FileToSave = collections.namedtuple("FileToSave", "filename contents remote_filename")
+
+
+# pylint: disable=too-many-instance-attributes; all represent significantly different state.
+# Arguably they could be separated into two hashes, one for storing parameters, and one for
+# storing result state; but that smells more like clutter than clarity.
@six.add_metaclass(ABCMeta)
class OpenShiftCheck(object):
- """
- A base class for defining checks for an OpenShift cluster environment.
+ """A base class for defining checks for an OpenShift cluster environment.
- Expect optional params: method execute_module, dict task_vars, and string tmp.
+ Optional init params: method execute_module, dict task_vars, and string tmp
execute_module is expected to have a signature compatible with _execute_module
from ansible plugins/action/__init__.py, e.g.:
def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None, *args):
This is stored so that it can be invoked in subclasses via check.execute_module("name", args)
which provides the check's stored task_vars and tmp.
+
+ Optional init param: want_full_results
+ If the check can gather logs, tarballs, etc., do so when True; but no need to spend
+ the time if they're not wanted (won't be written to output directory).
"""
- def __init__(self, execute_module=None, task_vars=None, tmp=None):
+ def __init__(self, execute_module=None, task_vars=None, tmp=None, want_full_results=False):
+ # store a method for executing ansible modules from the check
self._execute_module = execute_module
+ # the task variables and tmpdir passed into the health checker task
self.task_vars = task_vars or {}
self.tmp = tmp
+ # a boolean for disabling the gathering of results (files, computations) that won't
+ # actually be recorded/used
+ self.want_full_results = want_full_results
+
+ # mainly for testing purposes; see execute_module_with_retries
+ self._module_retries = 3
+ self._module_retry_interval = 5 # seconds
+ # state to be recorded for inspection after the check runs:
+ #
# set to True when the check changes the host, for accurate total "changed" count
self.changed = False
+ # list of OpenShiftCheckException for check to report (alternative to returning a failed result)
+ self.failures = []
+ # list of FileToSave - files the check specifies to be written locally if so configured
+ self.files_to_save = []
+ # log messages for the check - tuples of (description, msg) where msg is serializable.
+ # These are intended to be a sequential record of what the check observed and determined.
+ self.logs = []
@abstractproperty
def name(self):
@@ -82,7 +112,13 @@ class OpenShiftCheck(object):
@abstractmethod
def run(self):
- """Executes a check, normally implemented as a module."""
+ """Executes a check against a host and returns a result hash similar to Ansible modules.
+
+ Actually the direction ahead is to record state in the attributes and
+ not bother building a result hash. Instead, return an empty hash and let
+ the action plugin fill it in. Or raise an OpenShiftCheckException.
+ Returning a hash may become deprecated if it does not prove necessary.
+ """
return {}
@classmethod
@@ -94,7 +130,43 @@ class OpenShiftCheck(object):
for subclass in subclass.subclasses():
yield subclass
- def execute_module(self, module_name=None, module_args=None):
+ def register_failure(self, error):
+ """Record in the check that a failure occurred.
+
+ Recorded failures are merged into the result hash for now. They are also saved to output directory
+ (if provided) <check>.failures.json and registered as a log entry for context <check>.log.json.
+ """
+ # It should be an exception; make it one if not
+ if not isinstance(error, OpenShiftCheckException):
+ error = OpenShiftCheckException(str(error))
+ self.failures.append(error)
+ # duplicate it in the logs so it can be seen in the context of any
+ # information that led to the failure
+ self.register_log("failure: " + error.name, str(error))
+
+ def register_log(self, context, msg):
+ """Record an entry for the check log.
+
+ Notes are intended to serve as context of the whole sequence of what the check observed.
+ They are be saved as an ordered list in a local check log file.
+ They are not to included in the result or in the ansible log; it's just for the record.
+ """
+ self.logs.append([context, msg])
+
+ def register_file(self, filename, contents=None, remote_filename=""):
+ """Record a file that a check makes available to be saved individually to output directory.
+
+ Either file contents should be passed in, or a file to be copied from the remote host
+ should be specified. Contents that are not a string are to be serialized as JSON.
+
+ NOTE: When copying a file from remote host, it is slurped into memory as base64, meaning
+ you should avoid using this on huge files (more than say 10M).
+ """
+ if contents is None and not remote_filename:
+ raise OpenShiftCheckException("File data/source not specified; this is a bug in the check.")
+ self.files_to_save.append(FileToSave(filename, contents, remote_filename))
+
+ def execute_module(self, module_name=None, module_args=None, save_as_name=None, register=True):
"""Invoke an Ansible module from a check.
Invoke stored _execute_module, normally copied from the action
@@ -106,6 +178,12 @@ class OpenShiftCheck(object):
Ansible version).
So e.g. check.execute_module("foo", dict(arg1=...))
+
+ save_as_name specifies a file name for saving the result to an output directory,
+ if needed, and is intended to uniquely identify the result of invoking execute_module.
+ If not provided, the module name will be used.
+ If register is set False, then the result won't be registered in logs or files to save.
+
Return: result hash from module execution.
"""
if self._execute_module is None:
@@ -113,7 +191,33 @@ class OpenShiftCheck(object):
self.__class__.__name__ +
" invoked execute_module without providing the method at initialization."
)
- return self._execute_module(module_name, module_args, self.tmp, self.task_vars)
+ result = self._execute_module(module_name, module_args, self.tmp, self.task_vars)
+ if result.get("changed"):
+ self.changed = True
+ for output in ["result", "stdout"]:
+ # output is often JSON; attempt to decode
+ try:
+ result[output + "_json"] = json.loads(result[output])
+ except (KeyError, ValueError):
+ pass
+
+ if register:
+ self.register_log("execute_module: " + module_name, result)
+ self.register_file(save_as_name or module_name + ".json", result)
+ return result
+
+ def execute_module_with_retries(self, module_name, module_args):
+ """Run execute_module and retry on failure."""
+ result = {}
+ tries = 0
+ while True:
+ res = self.execute_module(module_name, module_args)
+ if tries > self._module_retries or not res.get("failed"):
+ result.update(res)
+ return result
+ result["last_failed"] = res
+ tries += 1
+ time.sleep(self._module_retry_interval)
def get_var(self, *keys, **kwargs):
"""Get deeply nested values from task_vars.
@@ -171,8 +275,12 @@ class OpenShiftCheck(object):
'There is a bug in this check. While trying to convert variable \n'
' "{var}={value}"\n'
'the given converter cannot be used or failed unexpectedly:\n'
- '{error}'.format(var=".".join(keys), value=value, error=error)
- )
+ '{type}: {error}'.format(
+ var=".".join(keys),
+ value=value,
+ type=error.__class__.__name__,
+ error=error
+ ))
@staticmethod
def get_major_minor_version(openshift_image_tag):
@@ -214,7 +322,9 @@ class OpenShiftCheck(object):
mount_point = os.path.dirname(mount_point)
try:
- return mount_for_path[mount_point]
+ mount = mount_for_path[mount_point]
+ self.register_log("mount point for " + path, mount)
+ return mount
except KeyError:
known_mounts = ', '.join('"{}"'.format(mount) for mount in sorted(mount_for_path))
raise OpenShiftCheckException(
@@ -242,7 +352,7 @@ def load_checks(path=None, subpkg=""):
modules = modules + load_checks(os.path.join(path, name), subpkg + "." + name)
continue
- if name.endswith(".py") and not name.startswith(".") and name not in LOADER_EXCLUDES:
+ if name.endswith(".py") and name not in LOADER_EXCLUDES:
modules.append(import_module(__package__ + subpkg + "." + name[:-3]))
return modules
diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py
index f302fd14b..cdf56e959 100644
--- a/roles/openshift_health_checker/openshift_checks/disk_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py
@@ -70,6 +70,10 @@ class DiskAvailability(OpenShiftCheck):
# If it is not a number, then it should be a nested dict.
pass
+ self.register_log("recommended thresholds", self.recommended_disk_space_bytes)
+ if user_config:
+ self.register_log("user-configured thresholds", user_config)
+
# TODO: as suggested in
# https://github.com/openshift/openshift-ansible/pull/4436#discussion_r122180021,
# maybe we could support checking disk availability in paths that are
@@ -113,10 +117,7 @@ class DiskAvailability(OpenShiftCheck):
'in your Ansible inventory, and lower the recommended disk space availability\n'
'if necessary for this upgrade.').format(config_bytes)
- return {
- 'failed': True,
- 'msg': msg,
- }
+ self.register_failure(msg)
return {}
diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
index 866c74d7c..9c35f0f92 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
@@ -32,7 +32,12 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
# we use python-docker-py to check local docker for images, and skopeo
# to look for images available remotely without waiting to pull them.
dependencies = ["python-docker-py", "skopeo"]
- skopeo_img_check_command = "timeout 10 skopeo inspect --tls-verify=false"
+ skopeo_img_check_command = "timeout 10 skopeo inspect --tls-verify=false docker://{registry}/{image}"
+
+ def __init__(self, *args, **kwargs):
+ super(DockerImageAvailability, self).__init__(*args, **kwargs)
+ # record whether we could reach a registry or not (and remember results)
+ self.reachable_registries = {}
def is_active(self):
"""Skip hosts with unsupported deployment types."""
@@ -64,15 +69,21 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
unavailable_images = set(missing_images) - set(available_images)
if unavailable_images:
- return {
- "failed": True,
- "msg": (
- "One or more required Docker images are not available:\n {}\n"
- "Configured registries: {}\n"
- "Checked by: {}"
- ).format(",\n ".join(sorted(unavailable_images)), ", ".join(registries),
- self.skopeo_img_check_command),
- }
+ registries = [
+ reg if self.reachable_registries.get(reg, True) else reg + " (unreachable)"
+ for reg in registries
+ ]
+ msg = (
+ "One or more required Docker images are not available:\n {}\n"
+ "Configured registries: {}\n"
+ "Checked by: {}"
+ ).format(
+ ",\n ".join(sorted(unavailable_images)),
+ ", ".join(registries),
+ self.skopeo_img_check_command
+ )
+
+ return dict(failed=True, msg=msg)
return {}
@@ -128,31 +139,31 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
def local_images(self, images):
"""Filter a list of images and return those available locally."""
- return [
- image for image in images
- if self.is_image_local(image)
- ]
+ registries = self.known_docker_registries()
+ found_images = []
+ for image in images:
+ # docker could have the image name as-is or prefixed with any registry
+ imglist = [image] + [reg + "/" + image for reg in registries]
+ if self.is_image_local(imglist):
+ found_images.append(image)
+ return found_images
def is_image_local(self, image):
"""Check if image is already in local docker index."""
result = self.execute_module("docker_image_facts", {"name": image})
- if result.get("failed", False):
- return False
-
- return bool(result.get("images", []))
+ return bool(result.get("images")) and not result.get("failed")
def known_docker_registries(self):
"""Build a list of docker registries available according to inventory vars."""
- docker_facts = self.get_var("openshift", "docker")
- regs = set(docker_facts["additional_registries"])
+ regs = list(self.get_var("openshift.docker.additional_registries", default=[]))
deployment_type = self.get_var("openshift_deployment_type")
- if deployment_type == "origin":
- regs.update(["docker.io"])
- elif "enterprise" in deployment_type:
- regs.update(["registry.access.redhat.com"])
+ if deployment_type == "origin" and "docker.io" not in regs:
+ regs.append("docker.io")
+ elif "enterprise" in deployment_type and "registry.access.redhat.com" not in regs:
+ regs.append("registry.access.redhat.com")
- return list(regs)
+ return regs
def available_images(self, images, default_registries):
"""Search remotely for images. Returns: list of images found."""
@@ -165,17 +176,35 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
"""Use Skopeo to determine if required image exists in known registry(s)."""
registries = default_registries
- # if image already includes a registry, only use that
+ # If image already includes a registry, only use that.
+ # NOTE: This logic would incorrectly identify images that do not use a namespace, e.g.
+ # registry.access.redhat.com/rhel7 as if the registry were a namespace.
+ # It's not clear that there's any way to distinguish them, but fortunately
+ # the current set of images all look like [registry/]namespace/name[:version].
if image.count("/") > 1:
registry, image = image.split("/", 1)
registries = [registry]
for registry in registries:
- args = {
- "_raw_params": self.skopeo_img_check_command + " docker://{}/{}".format(registry, image)
- }
- result = self.execute_module("command", args)
+ if registry not in self.reachable_registries:
+ self.reachable_registries[registry] = self.connect_to_registry(registry)
+ if not self.reachable_registries[registry]:
+ continue
+
+ args = {"_raw_params": self.skopeo_img_check_command.format(registry=registry, image=image)}
+ result = self.execute_module_with_retries("command", args)
if result.get("rc", 0) == 0 and not result.get("failed"):
return True
+ if result.get("rc") == 124: # RC 124 == timed out; mark unreachable
+ self.reachable_registries[registry] = False
return False
+
+ def connect_to_registry(self, registry):
+ """Use ansible wait_for module to test connectivity from host to registry. Returns bool."""
+ # test a simple TCP connection
+ host, _, port = registry.partition(":")
+ port = port or 443
+ args = dict(host=host, port=port, state="started", timeout=30)
+ result = self.execute_module("wait_for", args)
+ return result.get("rc", 0) == 0 and not result.get("failed")
diff --git a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
index 7fc843fd7..986a01f38 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
@@ -72,7 +72,7 @@ class Elasticsearch(LoggingCheck):
for pod_name in pods_by_name.keys():
# Compare what each ES node reports as master and compare for split brain
get_master_cmd = self._build_es_curl_cmd(pod_name, "https://localhost:9200/_cat/master")
- master_name_str = self.exec_oc(get_master_cmd, [])
+ master_name_str = self.exec_oc(get_master_cmd, [], save_as_name="get_master_names.json")
master_names = (master_name_str or '').split(' ')
if len(master_names) > 1:
es_master_names.add(master_names[1])
@@ -113,7 +113,7 @@ class Elasticsearch(LoggingCheck):
# get ES cluster nodes
node_cmd = self._build_es_curl_cmd(list(pods_by_name.keys())[0], 'https://localhost:9200/_nodes')
- cluster_node_data = self.exec_oc(node_cmd, [])
+ cluster_node_data = self.exec_oc(node_cmd, [], save_as_name="get_es_nodes.json")
try:
cluster_nodes = json.loads(cluster_node_data)['nodes']
except (ValueError, KeyError):
@@ -142,7 +142,7 @@ class Elasticsearch(LoggingCheck):
errors = []
for pod_name in pods_by_name.keys():
cluster_health_cmd = self._build_es_curl_cmd(pod_name, 'https://localhost:9200/_cluster/health?pretty=true')
- cluster_health_data = self.exec_oc(cluster_health_cmd, [])
+ cluster_health_data = self.exec_oc(cluster_health_cmd, [], save_as_name='get_es_health.json')
try:
health_res = json.loads(cluster_health_data)
if not health_res or not health_res.get('status'):
@@ -171,7 +171,7 @@ class Elasticsearch(LoggingCheck):
errors = []
for pod_name in pods_by_name.keys():
df_cmd = 'exec {} -- df --output=ipcent,pcent /elasticsearch/persistent'.format(pod_name)
- disk_output = self.exec_oc(df_cmd, [])
+ disk_output = self.exec_oc(df_cmd, [], save_as_name='get_pv_diskspace.json')
lines = disk_output.splitlines()
# expecting one header looking like 'IUse% Use%' and one body line
body_re = r'\s*(\d+)%?\s+(\d+)%?\s*$'
diff --git a/roles/openshift_health_checker/openshift_checks/logging/logging.py b/roles/openshift_health_checker/openshift_checks/logging/logging.py
index ecd8adb64..06bdfebf6 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/logging.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/logging.py
@@ -78,7 +78,7 @@ class LoggingCheck(OpenShiftCheck):
"""Returns the namespace in which logging is configured to deploy."""
return self.get_var("openshift_logging_namespace", default="logging")
- def exec_oc(self, cmd_str="", extra_args=None):
+ def exec_oc(self, cmd_str="", extra_args=None, save_as_name=None):
"""
Execute an 'oc' command in the remote host.
Returns: output of command and namespace,
@@ -92,7 +92,7 @@ class LoggingCheck(OpenShiftCheck):
"extra_args": list(extra_args) if extra_args else [],
}
- result = self.execute_module("ocutil", args)
+ result = self.execute_module("ocutil", args, save_as_name=save_as_name)
if result.get("failed"):
if result['result'] == '[Errno 2] No such file or directory':
raise CouldNotUseOc(
diff --git a/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py b/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py
index d781db649..cacdf4213 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py
@@ -104,7 +104,7 @@ class LoggingIndexTime(LoggingCheck):
"https://logging-es:9200/project.{namespace}*/_count?q=message:{uuid}"
)
exec_cmd = exec_cmd.format(pod_name=pod_name, namespace=self.logging_namespace(), uuid=uuid)
- result = self.exec_oc(exec_cmd, [])
+ result = self.exec_oc(exec_cmd, [], save_as_name="query_for_uuid.json")
try:
count = json.loads(result)["count"]
diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py
index e9bae60a3..b90ebf6dd 100644
--- a/roles/openshift_health_checker/openshift_checks/mixins.py
+++ b/roles/openshift_health_checker/openshift_checks/mixins.py
@@ -36,7 +36,7 @@ class DockerHostMixin(object):
# NOTE: we would use the "package" module but it's actually an action plugin
# and it's not clear how to invoke one of those. This is about the same anyway:
- result = self.execute_module(
+ result = self.execute_module_with_retries(
self.get_var("ansible_pkg_mgr", default="yum"),
{"name": self.dependencies, "state": "present"},
)
@@ -49,5 +49,4 @@ class DockerHostMixin(object):
" {deps}\n{msg}"
).format(deps=',\n '.join(self.dependencies), msg=msg)
failed = result.get("failed", False) or result.get("rc", 0) != 0
- self.changed = result.get("changed", False)
return msg, failed
diff --git a/roles/openshift_health_checker/openshift_checks/package_availability.py b/roles/openshift_health_checker/openshift_checks/package_availability.py
index a86180b00..21355c2f0 100644
--- a/roles/openshift_health_checker/openshift_checks/package_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/package_availability.py
@@ -26,7 +26,7 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
packages.update(self.node_packages(rpm_prefix))
args = {"packages": sorted(set(packages))}
- return self.execute_module("check_yum_update", args)
+ return self.execute_module_with_retries("check_yum_update", args)
@staticmethod
def master_packages(rpm_prefix):
diff --git a/roles/openshift_health_checker/openshift_checks/package_update.py b/roles/openshift_health_checker/openshift_checks/package_update.py
index 1e9aecbe0..8464e8a5e 100644
--- a/roles/openshift_health_checker/openshift_checks/package_update.py
+++ b/roles/openshift_health_checker/openshift_checks/package_update.py
@@ -11,4 +11,4 @@ class PackageUpdate(NotContainerizedMixin, OpenShiftCheck):
def run(self):
args = {"packages": []}
- return self.execute_module("check_yum_update", args)
+ return self.execute_module_with_retries("check_yum_update", args)
diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py
index 0b795b6c4..d4aec3ed8 100644
--- a/roles/openshift_health_checker/openshift_checks/package_version.py
+++ b/roles/openshift_health_checker/openshift_checks/package_version.py
@@ -76,7 +76,7 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
],
}
- return self.execute_module("aos_version", args)
+ return self.execute_module_with_retries("aos_version", args)
def get_required_ovs_version(self):
"""Return the correct Open vSwitch version(s) for the current OpenShift version."""
diff --git a/roles/openshift_health_checker/test/action_plugin_test.py b/roles/openshift_health_checker/test/action_plugin_test.py
index c109ebd24..f14887303 100644
--- a/roles/openshift_health_checker/test/action_plugin_test.py
+++ b/roles/openshift_health_checker/test/action_plugin_test.py
@@ -3,10 +3,12 @@ import pytest
from ansible.playbook.play_context import PlayContext
from openshift_health_check import ActionModule, resolve_checks
-from openshift_checks import OpenShiftCheckException
+from openshift_health_check import copy_remote_file_to_dir, write_result_to_output_dir, write_to_output_file
+from openshift_checks import OpenShiftCheckException, FileToSave
-def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, run_exception=None, changed=False):
+def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, run_exception=None,
+ run_logs=None, run_files=None, changed=False, get_var_return=None):
"""Returns a new class that is compatible with OpenShiftCheck for testing."""
_name, _tags = name, tags
@@ -14,12 +16,16 @@ def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, ru
class FakeCheck(object):
name = _name
tags = _tags or []
- changed = False
- def __init__(self, execute_module=None, task_vars=None, tmp=None):
- pass
+ def __init__(self, **_):
+ self.changed = False
+ self.failures = []
+ self.logs = run_logs or []
+ self.files_to_save = run_files or []
def is_active(self):
+ if isinstance(is_active, Exception):
+ raise is_active
return is_active
def run(self):
@@ -28,6 +34,13 @@ def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, ru
raise run_exception
return run_return
+ def get_var(*args, **_):
+ return get_var_return
+
+ def register_failure(self, exc):
+ self.failures.append(OpenShiftCheckException(str(exc)))
+ return
+
return FakeCheck
@@ -98,23 +111,33 @@ def test_action_plugin_cannot_load_checks_with_the_same_name(plugin, task_vars,
assert failed(result, msg_has=['duplicate', 'duplicate_name', 'FakeCheck'])
-def test_action_plugin_skip_non_active_checks(plugin, task_vars, monkeypatch):
- checks = [fake_check(is_active=False)]
+@pytest.mark.parametrize('is_active, skipped_reason', [
+ (False, "Not active for this host"),
+ (Exception("borked"), "exception"),
+])
+def test_action_plugin_skip_non_active_checks(is_active, skipped_reason, plugin, task_vars, monkeypatch):
+ checks = [fake_check(is_active=is_active)]
monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks))
result = plugin.run(tmp=None, task_vars=task_vars)
- assert result['checks']['fake_check'] == dict(skipped=True, skipped_reason="Not active for this host")
+ assert result['checks']['fake_check'].get('skipped')
+ assert skipped_reason in result['checks']['fake_check'].get('skipped_reason')
assert not failed(result)
assert not changed(result)
assert not skipped(result)
-def test_action_plugin_skip_disabled_checks(plugin, task_vars, monkeypatch):
+@pytest.mark.parametrize('to_disable', [
+ 'fake_check',
+ ['fake_check', 'spam'],
+ '*,spam,eggs',
+])
+def test_action_plugin_skip_disabled_checks(to_disable, plugin, task_vars, monkeypatch):
checks = [fake_check('fake_check', is_active=True)]
monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks))
- task_vars['openshift_disable_check'] = 'fake_check'
+ task_vars['openshift_disable_check'] = to_disable
result = plugin.run(tmp=None, task_vars=task_vars)
assert result['checks']['fake_check'] == dict(skipped=True, skipped_reason="Disabled by user request")
@@ -123,10 +146,21 @@ def test_action_plugin_skip_disabled_checks(plugin, task_vars, monkeypatch):
assert not skipped(result)
+def test_action_plugin_run_list_checks(monkeypatch):
+ task = FakeTask('openshift_health_check', {'checks': []})
+ plugin = ActionModule(task, None, PlayContext(), None, None, None)
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {})
+ result = plugin.run()
+
+ assert failed(result, msg_has="Available checks")
+ assert not changed(result)
+ assert not skipped(result)
+
+
def test_action_plugin_run_check_ok(plugin, task_vars, monkeypatch):
check_return_value = {'ok': 'test'}
- check_class = fake_check(run_return=check_return_value)
- monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
+ check_class = fake_check(run_return=check_return_value, run_files=[None])
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
@@ -140,7 +174,7 @@ def test_action_plugin_run_check_ok(plugin, task_vars, monkeypatch):
def test_action_plugin_run_check_changed(plugin, task_vars, monkeypatch):
check_return_value = {'ok': 'test'}
check_class = fake_check(run_return=check_return_value, changed=True)
- monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
@@ -153,9 +187,9 @@ def test_action_plugin_run_check_changed(plugin, task_vars, monkeypatch):
def test_action_plugin_run_check_fail(plugin, task_vars, monkeypatch):
- check_return_value = {'failed': True}
+ check_return_value = {'failed': True, 'msg': 'this is a failure'}
check_class = fake_check(run_return=check_return_value)
- monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
@@ -166,24 +200,51 @@ def test_action_plugin_run_check_fail(plugin, task_vars, monkeypatch):
assert not skipped(result)
-def test_action_plugin_run_check_exception(plugin, task_vars, monkeypatch):
+@pytest.mark.parametrize('exc_class, expect_traceback', [
+ (OpenShiftCheckException, False),
+ (Exception, True),
+])
+def test_action_plugin_run_check_exception(plugin, task_vars, exc_class, expect_traceback, monkeypatch):
exception_msg = 'fake check has an exception'
- run_exception = OpenShiftCheckException(exception_msg)
+ run_exception = exc_class(exception_msg)
check_class = fake_check(run_exception=run_exception, changed=True)
- monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
assert failed(result['checks']['fake_check'], msg_has=exception_msg)
+ assert expect_traceback == ("Traceback" in result['checks']['fake_check']['msg'])
assert failed(result, msg_has=['failed'])
assert changed(result['checks']['fake_check'])
assert changed(result)
assert not skipped(result)
+def test_action_plugin_run_check_output_dir(plugin, task_vars, tmpdir, monkeypatch):
+ check_class = fake_check(
+ run_return={},
+ run_logs=[('thing', 'note')],
+ run_files=[
+ FileToSave('save.file', 'contents', None),
+ FileToSave('save.file', 'duplicate', None),
+ FileToSave('copy.file', None, 'foo'), # note: copy runs execute_module => exception
+ ],
+ )
+ task_vars['openshift_checks_output_dir'] = str(tmpdir)
+ check_class.get_var = lambda self, name, **_: task_vars.get(name)
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
+ monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
+
+ plugin.run(tmp=None, task_vars=task_vars)
+ assert any(path.basename == task_vars['ansible_host'] for path in tmpdir.listdir())
+ assert any(path.basename == 'fake_check.log.json' for path in tmpdir.visit())
+ assert any(path.basename == 'save.file' for path in tmpdir.visit())
+ assert any(path.basename == 'save.file.2' for path in tmpdir.visit())
+
+
def test_action_plugin_resolve_checks_exception(plugin, task_vars, monkeypatch):
- monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {})
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {})
result = plugin.run(tmp=None, task_vars=task_vars)
@@ -249,3 +310,38 @@ def test_resolve_checks_failure(names, all_checks, words_in_exception):
resolve_checks(names, all_checks)
for word in words_in_exception:
assert word in str(excinfo.value)
+
+
+@pytest.mark.parametrize('give_output_dir, result, expect_file', [
+ (False, None, False),
+ (True, dict(content="c3BhbQo=", encoding="base64"), True),
+ (True, dict(content="encoding error", encoding="base64"), False),
+ (True, dict(content="spam", no_encoding=None), True),
+ (True, dict(failed=True, msg="could not slurp"), False),
+])
+def test_copy_remote_file_to_dir(give_output_dir, result, expect_file, tmpdir):
+ check = fake_check()()
+ check.execute_module = lambda *args, **_: result
+ copy_remote_file_to_dir(check, "remote_file", str(tmpdir) if give_output_dir else "", "local_file")
+ assert expect_file == any(path.basename == "local_file" for path in tmpdir.listdir())
+
+
+def test_write_to_output_exceptions(tmpdir, monkeypatch, capsys):
+
+ class Spam(object):
+ def __str__(self):
+ raise Exception("break str")
+
+ test = {1: object(), 2: Spam()}
+ test[3] = test
+ write_result_to_output_dir(str(tmpdir), test)
+ assert "Error writing" in test["output_files"]
+
+ output_dir = tmpdir.join("eggs")
+ output_dir.write("spam") # so now it's not a dir
+ write_to_output_file(str(output_dir), "somefile", "somedata")
+ assert "Could not write" in capsys.readouterr()[1]
+
+ monkeypatch.setattr("openshift_health_check.prepare_output_dir", lambda *_: False)
+ write_result_to_output_dir(str(tmpdir), test)
+ assert "Error creating" in test["output_files"]
diff --git a/roles/openshift_health_checker/test/disk_availability_test.py b/roles/openshift_health_checker/test/disk_availability_test.py
index f4fd2dfed..9ae679b79 100644
--- a/roles/openshift_health_checker/test/disk_availability_test.py
+++ b/roles/openshift_health_checker/test/disk_availability_test.py
@@ -183,11 +183,12 @@ def test_fails_with_insufficient_disk_space(name, group_names, configured_min, a
ansible_mounts=ansible_mounts,
)
- result = DiskAvailability(fake_execute_module, task_vars).run()
+ check = DiskAvailability(fake_execute_module, task_vars)
+ check.run()
- assert result['failed']
+ assert check.failures
for chunk in 'below recommended'.split() + expect_chunks:
- assert chunk in result.get('msg', '')
+ assert chunk in str(check.failures[0])
@pytest.mark.parametrize('name,group_names,context,ansible_mounts,failed,extra_words', [
@@ -237,11 +238,11 @@ def test_min_required_space_changes_with_upgrade_context(name, group_names, cont
)
check = DiskAvailability(fake_execute_module, task_vars)
- result = check.run()
+ check.run()
- assert result.get("failed", False) == failed
+ assert bool(check.failures) == failed
for word in extra_words:
- assert word in result.get('msg', '')
+ assert word in str(check.failures[0])
def fake_execute_module(*args):
diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py
index 8d0a53df9..6a7c16c7e 100644
--- a/roles/openshift_health_checker/test/docker_image_availability_test.py
+++ b/roles/openshift_health_checker/test/docker_image_availability_test.py
@@ -3,6 +3,23 @@ import pytest
from openshift_checks.docker_image_availability import DockerImageAvailability
+@pytest.fixture()
+def task_vars():
+ return dict(
+ openshift=dict(
+ common=dict(
+ service_type='origin',
+ is_containerized=False,
+ is_atomic=False,
+ ),
+ docker=dict(),
+ ),
+ openshift_deployment_type='origin',
+ openshift_image_tag='',
+ group_names=['nodes', 'masters'],
+ )
+
+
@pytest.mark.parametrize('deployment_type, is_containerized, group_names, expect_active', [
("origin", True, [], True),
("openshift-enterprise", True, [], True),
@@ -15,12 +32,10 @@ from openshift_checks.docker_image_availability import DockerImageAvailability
("origin", False, ["nodes", "masters"], True),
("openshift-enterprise", False, ["etcd"], False),
])
-def test_is_active(deployment_type, is_containerized, group_names, expect_active):
- task_vars = dict(
- openshift=dict(common=dict(is_containerized=is_containerized)),
- openshift_deployment_type=deployment_type,
- group_names=group_names,
- )
+def test_is_active(task_vars, deployment_type, is_containerized, group_names, expect_active):
+ task_vars['openshift_deployment_type'] = deployment_type
+ task_vars['openshift']['common']['is_containerized'] = is_containerized
+ task_vars['group_names'] = group_names
assert DockerImageAvailability(None, task_vars).is_active() == expect_active
@@ -30,10 +45,10 @@ def test_is_active(deployment_type, is_containerized, group_names, expect_active
(True, False),
(False, True),
])
-def test_all_images_available_locally(is_containerized, is_atomic):
+def test_all_images_available_locally(task_vars, is_containerized, is_atomic):
def execute_module(module_name, module_args, *_):
if module_name == "yum":
- return {"changed": True}
+ return {}
assert module_name == "docker_image_facts"
assert 'name' in module_args
@@ -42,19 +57,9 @@ def test_all_images_available_locally(is_containerized, is_atomic):
'images': [module_args['name']],
}
- result = DockerImageAvailability(execute_module, task_vars=dict(
- openshift=dict(
- common=dict(
- service_type='origin',
- is_containerized=is_containerized,
- is_atomic=is_atomic,
- ),
- docker=dict(additional_registries=["docker.io"]),
- ),
- openshift_deployment_type='origin',
- openshift_image_tag='3.4',
- group_names=['nodes', 'masters'],
- )).run()
+ task_vars['openshift']['common']['is_containerized'] = is_containerized
+ task_vars['openshift']['common']['is_atomic'] = is_atomic
+ result = DockerImageAvailability(execute_module, task_vars).run()
assert not result.get('failed', False)
@@ -63,53 +68,36 @@ def test_all_images_available_locally(is_containerized, is_atomic):
False,
True,
])
-def test_all_images_available_remotely(available_locally):
+def test_all_images_available_remotely(task_vars, available_locally):
def execute_module(module_name, *_):
if module_name == 'docker_image_facts':
return {'images': [], 'failed': available_locally}
- return {'changed': False}
+ return {}
- result = DockerImageAvailability(execute_module, task_vars=dict(
- openshift=dict(
- common=dict(
- service_type='origin',
- is_containerized=False,
- is_atomic=False,
- ),
- docker=dict(additional_registries=["docker.io", "registry.access.redhat.com"]),
- ),
- openshift_deployment_type='origin',
- openshift_image_tag='v3.4',
- group_names=['nodes', 'masters'],
- )).run()
+ task_vars['openshift']['docker']['additional_registries'] = ["docker.io", "registry.access.redhat.com"]
+ task_vars['openshift_image_tag'] = 'v3.4'
+ check = DockerImageAvailability(execute_module, task_vars)
+ check._module_retry_interval = 0
+ result = check.run()
assert not result.get('failed', False)
-def test_all_images_unavailable():
- def execute_module(module_name=None, *_):
- if module_name == "command":
- return {
- 'failed': True,
- }
+def test_all_images_unavailable(task_vars):
+ def execute_module(module_name=None, *args):
+ if module_name == "wait_for":
+ return {}
+ elif module_name == "command":
+ return {'failed': True}
- return {
- 'changed': False,
- }
+ return {} # docker_image_facts failure
- actual = DockerImageAvailability(execute_module, task_vars=dict(
- openshift=dict(
- common=dict(
- service_type='origin',
- is_containerized=False,
- is_atomic=False,
- ),
- docker=dict(additional_registries=["docker.io"]),
- ),
- openshift_deployment_type="openshift-enterprise",
- openshift_image_tag='latest',
- group_names=['nodes', 'masters'],
- )).run()
+ task_vars['openshift']['docker']['additional_registries'] = ["docker.io"]
+ task_vars['openshift_deployment_type'] = "openshift-enterprise"
+ task_vars['openshift_image_tag'] = 'latest'
+ check = DockerImageAvailability(execute_module, task_vars)
+ check._module_retry_interval = 0
+ actual = check.run()
assert actual['failed']
assert "required Docker images are not available" in actual['msg']
@@ -125,62 +113,63 @@ def test_all_images_unavailable():
["dependencies can be installed via `yum`"]
),
])
-def test_skopeo_update_failure(message, extra_words):
+def test_skopeo_update_failure(task_vars, message, extra_words):
def execute_module(module_name=None, *_):
if module_name == "yum":
return {
"failed": True,
"msg": message,
- "changed": False,
}
- return {'changed': False}
+ return {}
- actual = DockerImageAvailability(execute_module, task_vars=dict(
- openshift=dict(
- common=dict(
- service_type='origin',
- is_containerized=False,
- is_atomic=False,
- ),
- docker=dict(additional_registries=["unknown.io"]),
- ),
- openshift_deployment_type="openshift-enterprise",
- openshift_image_tag='',
- group_names=['nodes', 'masters'],
- )).run()
+ task_vars['openshift']['docker']['additional_registries'] = ["unknown.io"]
+ task_vars['openshift_deployment_type'] = "openshift-enterprise"
+ check = DockerImageAvailability(execute_module, task_vars)
+ check._module_retry_interval = 0
+ actual = check.run()
assert actual["failed"]
for word in extra_words:
assert word in actual["msg"]
-@pytest.mark.parametrize("deployment_type,registries", [
- ("origin", ["unknown.io"]),
- ("openshift-enterprise", ["registry.access.redhat.com"]),
- ("openshift-enterprise", []),
-])
-def test_registry_availability(deployment_type, registries):
+@pytest.mark.parametrize(
+ "image, registries, connection_test_failed, skopeo_failed, "
+ "expect_success, expect_registries_reached", [
+ (
+ "spam/eggs:v1", ["test.reg"],
+ True, True,
+ False,
+ {"test.reg": False},
+ ),
+ (
+ "spam/eggs:v1", ["test.reg"],
+ False, True,
+ False,
+ {"test.reg": True},
+ ),
+ (
+ "eggs.reg/spam/eggs:v1", ["test.reg"],
+ False, False,
+ True,
+ {"eggs.reg": True},
+ ),
+ ])
+def test_registry_availability(image, registries, connection_test_failed, skopeo_failed,
+ expect_success, expect_registries_reached):
def execute_module(module_name=None, *_):
- return {
- 'changed': False,
- }
+ if module_name == "wait_for":
+ return dict(msg="msg", failed=connection_test_failed)
+ elif module_name == "command":
+ return dict(msg="msg", failed=skopeo_failed)
- actual = DockerImageAvailability(execute_module, task_vars=dict(
- openshift=dict(
- common=dict(
- service_type='origin',
- is_containerized=False,
- is_atomic=False,
- ),
- docker=dict(additional_registries=registries),
- ),
- openshift_deployment_type=deployment_type,
- openshift_image_tag='',
- group_names=['nodes', 'masters'],
- )).run()
+ check = DockerImageAvailability(execute_module, task_vars())
+ check._module_retry_interval = 0
- assert not actual.get("failed", False)
+ available = check.is_available_skopeo_image(image, registries)
+ assert available == expect_success
+ assert expect_registries_reached == check.reachable_registries
@pytest.mark.parametrize("deployment_type, is_containerized, groups, oreg_url, expected", [
@@ -257,7 +246,7 @@ def test_required_images(deployment_type, is_containerized, groups, oreg_url, ex
openshift_image_tag='vtest',
)
- assert expected == DockerImageAvailability("DUMMY", task_vars).required_images()
+ assert expected == DockerImageAvailability(task_vars=task_vars).required_images()
def test_containerized_etcd():
@@ -271,4 +260,4 @@ def test_containerized_etcd():
group_names=['etcd'],
)
expected = set(['registry.access.redhat.com/rhel7/etcd'])
- assert expected == DockerImageAvailability("DUMMY", task_vars).required_images()
+ assert expected == DockerImageAvailability(task_vars=task_vars).required_images()
diff --git a/roles/openshift_health_checker/test/elasticsearch_test.py b/roles/openshift_health_checker/test/elasticsearch_test.py
index 09bacd9ac..3fa5e8929 100644
--- a/roles/openshift_health_checker/test/elasticsearch_test.py
+++ b/roles/openshift_health_checker/test/elasticsearch_test.py
@@ -72,7 +72,7 @@ def test_check_elasticsearch():
assert_error_in_list('NoRunningPods', excinfo.value)
# canned oc responses to match so all the checks pass
- def exec_oc(cmd, args):
+ def exec_oc(cmd, args, **_):
if '_cat/master' in cmd:
return 'name logging-es'
elif '/_nodes' in cmd:
@@ -97,7 +97,7 @@ def test_check_running_es_pods():
def test_check_elasticsearch_masters():
pods = [plain_es_pod]
- check = canned_elasticsearch(task_vars_config_base, lambda *_: plain_es_pod['_test_master_name_str'])
+ check = canned_elasticsearch(task_vars_config_base, lambda *args, **_: plain_es_pod['_test_master_name_str'])
assert not check.check_elasticsearch_masters(pods_by_name(pods))
@@ -117,7 +117,7 @@ def test_check_elasticsearch_masters():
])
def test_check_elasticsearch_masters_error(pods, expect_error):
test_pods = list(pods)
- check = canned_elasticsearch(task_vars_config_base, lambda *_: test_pods.pop(0)['_test_master_name_str'])
+ check = canned_elasticsearch(task_vars_config_base, lambda *args, **_: test_pods.pop(0)['_test_master_name_str'])
assert_error_in_list(expect_error, check.check_elasticsearch_masters(pods_by_name(pods)))
@@ -129,7 +129,7 @@ es_node_list = {
def test_check_elasticsearch_node_list():
- check = canned_elasticsearch(task_vars_config_base, lambda *_: json.dumps(es_node_list))
+ check = canned_elasticsearch(task_vars_config_base, lambda *args, **_: json.dumps(es_node_list))
assert not check.check_elasticsearch_node_list(pods_by_name([plain_es_pod]))
@@ -151,13 +151,13 @@ def test_check_elasticsearch_node_list():
),
])
def test_check_elasticsearch_node_list_errors(pods, node_list, expect_error):
- check = canned_elasticsearch(task_vars_config_base, lambda cmd, args: json.dumps(node_list))
+ check = canned_elasticsearch(task_vars_config_base, lambda cmd, args, **_: json.dumps(node_list))
assert_error_in_list(expect_error, check.check_elasticsearch_node_list(pods_by_name(pods)))
def test_check_elasticsearch_cluster_health():
test_health_data = [{"status": "green"}]
- check = canned_elasticsearch(exec_oc=lambda *_: json.dumps(test_health_data.pop(0)))
+ check = canned_elasticsearch(exec_oc=lambda *args, **_: json.dumps(test_health_data.pop(0)))
assert not check.check_es_cluster_health(pods_by_name([plain_es_pod]))
@@ -175,12 +175,12 @@ def test_check_elasticsearch_cluster_health():
])
def test_check_elasticsearch_cluster_health_errors(pods, health_data, expect_error):
test_health_data = list(health_data)
- check = canned_elasticsearch(exec_oc=lambda *_: json.dumps(test_health_data.pop(0)))
+ check = canned_elasticsearch(exec_oc=lambda *args, **_: json.dumps(test_health_data.pop(0)))
assert_error_in_list(expect_error, check.check_es_cluster_health(pods_by_name(pods)))
def test_check_elasticsearch_diskspace():
- check = canned_elasticsearch(exec_oc=lambda *_: 'IUse% Use%\n 3% 4%\n')
+ check = canned_elasticsearch(exec_oc=lambda *args, **_: 'IUse% Use%\n 3% 4%\n')
assert not check.check_elasticsearch_diskspace(pods_by_name([plain_es_pod]))
@@ -199,5 +199,5 @@ def test_check_elasticsearch_diskspace():
),
])
def test_check_elasticsearch_diskspace_errors(disk_data, expect_error):
- check = canned_elasticsearch(exec_oc=lambda *_: disk_data)
+ check = canned_elasticsearch(exec_oc=lambda *args, **_: disk_data)
assert_error_in_list(expect_error, check.check_elasticsearch_diskspace(pods_by_name([plain_es_pod])))
diff --git a/roles/openshift_health_checker/test/logging_index_time_test.py b/roles/openshift_health_checker/test/logging_index_time_test.py
index 22566b295..c48ade9b8 100644
--- a/roles/openshift_health_checker/test/logging_index_time_test.py
+++ b/roles/openshift_health_checker/test/logging_index_time_test.py
@@ -102,7 +102,7 @@ def test_with_running_pods():
),
], ids=lambda argval: argval[0])
def test_wait_until_cmd_or_err_succeeds(name, json_response, uuid, timeout):
- check = canned_loggingindextime(lambda *_: json.dumps(json_response))
+ check = canned_loggingindextime(lambda *args, **_: json.dumps(json_response))
check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, uuid, timeout)
@@ -131,7 +131,7 @@ def test_wait_until_cmd_or_err_succeeds(name, json_response, uuid, timeout):
)
], ids=lambda argval: argval[0])
def test_wait_until_cmd_or_err(name, json_response, timeout, expect_error):
- check = canned_loggingindextime(lambda *_: json.dumps(json_response))
+ check = canned_loggingindextime(lambda *args, **_: json.dumps(json_response))
with pytest.raises(OpenShiftCheckException) as error:
check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, SAMPLE_UUID, timeout)
@@ -139,7 +139,7 @@ def test_wait_until_cmd_or_err(name, json_response, timeout, expect_error):
def test_curl_kibana_with_uuid():
- check = canned_loggingindextime(lambda *_: json.dumps({"statusCode": 404}))
+ check = canned_loggingindextime(lambda *args, **_: json.dumps({"statusCode": 404}))
check.generate_uuid = lambda: SAMPLE_UUID
assert SAMPLE_UUID == check.curl_kibana_with_uuid(plain_running_kibana_pod)
@@ -161,7 +161,7 @@ def test_curl_kibana_with_uuid():
),
], ids=lambda argval: argval[0])
def test_failed_curl_kibana_with_uuid(name, json_response, expect_error):
- check = canned_loggingindextime(lambda *_: json.dumps(json_response))
+ check = canned_loggingindextime(lambda *args, **_: json.dumps(json_response))
check.generate_uuid = lambda: SAMPLE_UUID
with pytest.raises(OpenShiftCheckException) as error:
diff --git a/roles/openshift_health_checker/test/openshift_check_test.py b/roles/openshift_health_checker/test/openshift_check_test.py
index 789784c77..bc0c3b26c 100644
--- a/roles/openshift_health_checker/test/openshift_check_test.py
+++ b/roles/openshift_health_checker/test/openshift_check_test.py
@@ -106,13 +106,40 @@ def test_get_var_convert(task_vars, keys, convert, expected):
assert dummy_check(task_vars).get_var(*keys, convert=convert) == expected
-@pytest.mark.parametrize("keys, convert", [
- (("bar", "baz"), int),
- (("bar.baz"), float),
- (("foo"), "bogus"),
- (("foo"), lambda a, b: 1),
- (("foo"), lambda a: 1 / 0),
+def convert_oscexc(_):
+ raise OpenShiftCheckException("known failure")
+
+
+def convert_exc(_):
+ raise Exception("failure unknown")
+
+
+@pytest.mark.parametrize("keys, convert, expect_text", [
+ (("bar", "baz"), int, "Cannot convert"),
+ (("bar.baz",), float, "Cannot convert"),
+ (("foo",), "bogus", "TypeError"),
+ (("foo",), lambda a, b: 1, "TypeError"),
+ (("foo",), lambda a: 1 / 0, "ZeroDivisionError"),
+ (("foo",), convert_oscexc, "known failure"),
+ (("foo",), convert_exc, "failure unknown"),
])
-def test_get_var_convert_error(task_vars, keys, convert):
- with pytest.raises(OpenShiftCheckException):
+def test_get_var_convert_error(task_vars, keys, convert, expect_text):
+ with pytest.raises(OpenShiftCheckException) as excinfo:
dummy_check(task_vars).get_var(*keys, convert=convert)
+ assert expect_text in str(excinfo.value)
+
+
+def test_register(task_vars):
+ check = dummy_check(task_vars)
+
+ check.register_failure(OpenShiftCheckException("spam"))
+ assert "spam" in str(check.failures[0])
+
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.register_file("spam") # no file contents specified
+ assert "not specified" in str(excinfo.value)
+
+ # normally execute_module registers the result file; test disabling that
+ check._execute_module = lambda *args, **_: dict()
+ check.execute_module("eggs", module_args={}, register=False)
+ assert not check.files_to_save
diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py
index e1bf29d2a..602f32989 100644
--- a/roles/openshift_health_checker/test/ovs_version_test.py
+++ b/roles/openshift_health_checker/test/ovs_version_test.py
@@ -50,7 +50,7 @@ def test_ovs_package_version(openshift_release, expected_ovs_version):
openshift_release=openshift_release,
openshift_image_tag='v' + openshift_release,
)
- return_value = object()
+ return_value = {} # note: check.execute_module modifies return hash contents
def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'rpm_version'
diff --git a/roles/openshift_health_checker/test/package_availability_test.py b/roles/openshift_health_checker/test/package_availability_test.py
index 1fe648b75..b34e8fbfc 100644
--- a/roles/openshift_health_checker/test/package_availability_test.py
+++ b/roles/openshift_health_checker/test/package_availability_test.py
@@ -49,14 +49,14 @@ def test_is_active(pkg_mgr, is_containerized, is_active):
),
])
def test_package_availability(task_vars, must_have_packages, must_not_have_packages):
- return_value = object()
+ return_value = {}
def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'check_yum_update'
assert 'packages' in module_args
assert set(module_args['packages']).issuperset(must_have_packages)
assert not set(module_args['packages']).intersection(must_not_have_packages)
- return return_value
+ return {'foo': return_value}
result = PackageAvailability(execute_module, task_vars).run()
- assert result is return_value
+ assert result['foo'] is return_value
diff --git a/roles/openshift_health_checker/test/package_update_test.py b/roles/openshift_health_checker/test/package_update_test.py
index 06489b0d7..85d3c9cab 100644
--- a/roles/openshift_health_checker/test/package_update_test.py
+++ b/roles/openshift_health_checker/test/package_update_test.py
@@ -2,14 +2,14 @@ from openshift_checks.package_update import PackageUpdate
def test_package_update():
- return_value = object()
+ return_value = {}
def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'check_yum_update'
assert 'packages' in module_args
# empty list of packages means "generic check if 'yum update' will work"
assert module_args['packages'] == []
- return return_value
+ return {'foo': return_value}
result = PackageUpdate(execute_module).run()
- assert result is return_value
+ assert result['foo'] is return_value
diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py
index e871f39f0..8564cd4db 100644
--- a/roles/openshift_health_checker/test/package_version_test.py
+++ b/roles/openshift_health_checker/test/package_version_test.py
@@ -52,7 +52,7 @@ def test_invalid_openshift_release_format():
])
def test_package_version(openshift_release):
- return_value = object()
+ return_value = {"foo": object()}
def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None, *_):
assert module_name == 'aos_version'
@@ -66,7 +66,7 @@ def test_package_version(openshift_release):
check = PackageVersion(execute_module, task_vars_for(openshift_release, 'origin'))
result = check.run()
- assert result is return_value
+ assert result == return_value
@pytest.mark.parametrize('deployment_type,openshift_release,expected_docker_version', [
@@ -79,7 +79,7 @@ def test_package_version(openshift_release):
])
def test_docker_package_version(deployment_type, openshift_release, expected_docker_version):
- return_value = object()
+ return_value = {"foo": object()}
def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'aos_version'
@@ -93,7 +93,7 @@ def test_docker_package_version(deployment_type, openshift_release, expected_doc
check = PackageVersion(execute_module, task_vars_for(openshift_release, deployment_type))
result = check.run()
- assert result is return_value
+ assert result == return_value
@pytest.mark.parametrize('group_names,is_containerized,is_active', [
diff --git a/roles/openshift_health_checker/test/zz_failure_summary_test.py b/roles/openshift_health_checker/test/zz_failure_summary_test.py
index 0fc258133..69f27653c 100644
--- a/roles/openshift_health_checker/test/zz_failure_summary_test.py
+++ b/roles/openshift_health_checker/test/zz_failure_summary_test.py
@@ -65,6 +65,21 @@ import pytest
},
],
),
+ # if a failure contain an unhashable value, it will not be deduplicated
+ (
+ [
+ {
+ 'host': 'master1',
+ 'msg': {'unhashable': 'value'},
+ },
+ ],
+ [
+ {
+ 'host': 'master1',
+ 'msg': {'unhashable': 'value'},
+ },
+ ],
+ ),
])
def test_deduplicate_failures(failures, deduplicated):
assert deduplicate_failures(failures) == deduplicated
diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml
index 08c1d849e..712a2a591 100644
--- a/roles/openshift_hosted/defaults/main.yml
+++ b/roles/openshift_hosted/defaults/main.yml
@@ -5,8 +5,8 @@ r_openshift_hosted_router_use_firewalld: "{{ os_firewall_use_firewalld | default
r_openshift_hosted_registry_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
r_openshift_hosted_registry_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
-openshift_hosted_router_wait: "{{ not openshift_master_bootstrap_enabled | default(True) }}"
-openshift_hosted_registry_wait: "{{ not openshift_master_bootstrap_enabled | default(True) }}"
+openshift_hosted_router_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}"
+openshift_hosted_registry_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}"
registry_volume_claim: 'registry-claim'
diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml
index d73c290ff..48f53aef8 100644
--- a/roles/openshift_hosted/tasks/registry/registry.yml
+++ b/roles/openshift_hosted/tasks/registry/registry.yml
@@ -137,7 +137,7 @@
edits: "{{ openshift_hosted_registry_edits }}"
force: "{{ True|bool in openshift_hosted_registry_force }}"
-- when: openshift_hosted_registry_wait
+- when: openshift_hosted_registry_wait | bool
block:
- name: Ensure OpenShift registry correctly rolls out (best-effort today)
command: |
diff --git a/roles/openshift_hosted/tasks/registry/secure.yml b/roles/openshift_hosted/tasks/registry/secure.yml
index a8a6f6fc8..434b679df 100644
--- a/roles/openshift_hosted/tasks/registry/secure.yml
+++ b/roles/openshift_hosted/tasks/registry/secure.yml
@@ -1,7 +1,7 @@
---
- name: Configure facts for docker-registry
set_fact:
- openshift_hosted_registry_routecertificates: "{{ ('routecertificates' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routecertificates, {}) }}"
+ openshift_hosted_registry_routecertificates: "{{ ('routecertificates' in openshift.hosted.registry.keys()) | ternary(openshift_hosted_registry_routecertificates, {}) }}"
openshift_hosted_registry_routehost: "{{ ('routehost' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routehost, False) }}"
openshift_hosted_registry_routetermination: "{{ ('routetermination' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routetermination, 'passthrough') }}"
diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml
index 68ec7233e..2a42b5a7c 100644
--- a/roles/openshift_hosted/tasks/router/router.yml
+++ b/roles/openshift_hosted/tasks/router/router.yml
@@ -94,7 +94,7 @@
stats_port: "{{ item.stats_port }}"
with_items: "{{ openshift_hosted_routers }}"
-- when: openshift_hosted_router_wait
+- when: openshift_hosted_router_wait | bool
block:
- name: Ensure OpenShift router correctly rolls out (best-effort today)
command: |
diff --git a/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml
index 11478263c..72754df2e 100644
--- a/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_PREFIX}registry-console
+ name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml
index 80cc4233b..6811ece28 100644
--- a/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_NAME}
+ name: ${IMAGE_NAME}:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml
index 0e3d006a7..298f8039e 100644
--- a/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_PREFIX}registry-console
+ name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml
index 80cc4233b..6811ece28 100644
--- a/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_NAME}
+ name: ${IMAGE_NAME}:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml
index 28feac4e6..dace26793 100644
--- a/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_PREFIX}registry-console
+ name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml
index 80cc4233b..6811ece28 100644
--- a/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_NAME}
+ name: ${IMAGE_NAME}:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
index 8bf98ba41..f821efd6b 100644
--- a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_PREFIX}registry-console
+ name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml
index 80cc4233b..6811ece28 100644
--- a/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_NAME}
+ name: ${IMAGE_NAME}:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml
index bbaf76c17..019d836fe 100644
--- a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_PREFIX}registry-console
+ name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml
index 80cc4233b..6811ece28 100644
--- a/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_NAME}
+ name: ${IMAGE_NAME}:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index a77df9986..de5e25061 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -134,6 +134,7 @@
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
+ openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector }}"
openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}"
openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}"
openshift_logging_es_ca_ext: "{{ openshift_logging_es_ops_ca_ext }}"
@@ -165,6 +166,7 @@
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
+ openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector }}"
openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}"
openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}"
openshift_logging_es_ca_ext: "{{ openshift_logging_es_ops_ca_ext }}"
diff --git a/roles/openshift_logging_curator/vars/main.yml b/roles/openshift_logging_curator/vars/main.yml
index 97525479e..95bf462d1 100644
--- a/roles/openshift_logging_curator/vars/main.yml
+++ b/roles/openshift_logging_curator/vars/main.yml
@@ -1,3 +1,3 @@
---
-__latest_curator_version: "3_5"
-__allowed_curator_versions: ["3_5", "3_6"]
+__latest_curator_version: "3_6"
+__allowed_curator_versions: ["3_5", "3_6", "3_7"]
diff --git a/roles/openshift_logging_elasticsearch/vars/main.yml b/roles/openshift_logging_elasticsearch/vars/main.yml
index 20fa63543..09e2ee4d0 100644
--- a/roles/openshift_logging_elasticsearch/vars/main.yml
+++ b/roles/openshift_logging_elasticsearch/vars/main.yml
@@ -1,6 +1,6 @@
---
-__latest_es_version: "3_5"
-__allowed_es_versions: ["3_5", "3_6"]
+__latest_es_version: "3_6"
+__allowed_es_versions: ["3_5", "3_6", "3_7"]
__allowed_es_types: ["data-master", "data-client", "master", "client"]
__es_log_appenders: ['file', 'console']
__kibana_index_modes: ["unique", "shared_ops"]
diff --git a/roles/openshift_logging_fluentd/vars/main.yml b/roles/openshift_logging_fluentd/vars/main.yml
index ec8e565c3..92a426952 100644
--- a/roles/openshift_logging_fluentd/vars/main.yml
+++ b/roles/openshift_logging_fluentd/vars/main.yml
@@ -1,5 +1,5 @@
---
-__latest_fluentd_version: "3_5"
-__allowed_fluentd_versions: ["3_5", "3_6"]
+__latest_fluentd_version: "3_6"
+__allowed_fluentd_versions: ["3_5", "3_6", "3_7"]
__allowed_fluentd_types: ["hosted", "secure-aggregator", "secure-host"]
__allowed_mux_client_modes: ["minimal", "maximal"]
diff --git a/roles/openshift_logging_kibana/vars/main.yml b/roles/openshift_logging_kibana/vars/main.yml
index 87b281c4b..241877a02 100644
--- a/roles/openshift_logging_kibana/vars/main.yml
+++ b/roles/openshift_logging_kibana/vars/main.yml
@@ -1,3 +1,3 @@
---
-__latest_kibana_version: "3_5"
-__allowed_kibana_versions: ["3_5", "3_6"]
+__latest_kibana_version: "3_6"
+__allowed_kibana_versions: ["3_5", "3_6", "3_7"]
diff --git a/roles/openshift_logging_mux/vars/main.yml b/roles/openshift_logging_mux/vars/main.yml
index 4234b74e2..e7b57f4b5 100644
--- a/roles/openshift_logging_mux/vars/main.yml
+++ b/roles/openshift_logging_mux/vars/main.yml
@@ -1,3 +1,3 @@
---
-__latest_mux_version: "3_5"
-__allowed_mux_versions: ["3_5", "3_6"]
+__latest_mux_version: "3_6"
+__allowed_mux_versions: ["3_5", "3_6", "3_7"]
diff --git a/roles/openshift_manageiq/vars/main.yml b/roles/openshift_manageiq/vars/main.yml
index 7ccc2fc3b..f142f89f0 100644
--- a/roles/openshift_manageiq/vars/main.yml
+++ b/roles/openshift_manageiq/vars/main.yml
@@ -3,6 +3,9 @@ manage_iq_tasks:
- resource_kind: role
resource_name: admin
user: management-admin
+- resource_kind: role
+ resource_name: admin
+ user: system:serviceaccount:management-infra:management-admin
- resource_kind: cluster-role
resource_name: management-infra-admin
user: system:serviceaccount:management-infra:management-admin
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index 71bb09a76..4c8d6fdad 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -24,7 +24,7 @@ oreg_url: ''
oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}"
oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker"
oreg_auth_credentials_replace: False
-
+l_bind_docker_reg_auth: False
# NOTE
# r_openshift_master_*_default may be defined external to this role.
diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml
index b0237141b..a657668a9 100644
--- a/roles/openshift_master/meta/main.yml
+++ b/roles/openshift_master/meta/main.yml
@@ -14,19 +14,3 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: lib_os_firewall
-- role: openshift_master_facts
-- role: openshift_hosted_facts
-- role: openshift_master_certificates
-- role: openshift_etcd_client_certificates
- etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
- etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
- etcd_cert_prefix: "master.etcd-"
- when: groups.oo_etcd_to_config | default([]) | length != 0
-- role: openshift_clock
-- role: openshift_cloud_provider
-- role: openshift_builddefaults
-- role: openshift_buildoverrides
-- role: nickhammond.logrotate
-- role: contiv
- contiv_role: netmaster
- when: openshift_use_contiv | default(False) | bool
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 373ec0806..6203bfc7b 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -177,6 +177,8 @@
local_facts:
no_proxy_etcd_host_ips: "{{ openshift_no_proxy_etcd_host_ips }}"
+- include: registry_auth.yml
+
- name: Install the systemd units
include: systemd_units.yml
@@ -251,22 +253,6 @@
- restart master controllers
when: openshift_master_bootstrap_enabled | default(False)
-- name: Check for credentials file for registry auth
- stat:
- path: "{{oreg_auth_credentials_path }}"
- when:
- - oreg_auth_user is defined
- register: master_oreg_auth_credentials_stat
-
-- name: Create credentials for registry auth
- command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
- when:
- - oreg_auth_user is defined
- - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
- notify:
- - restart master api
- - restart master controllers
-
- include: set_loopback_context.yml
when:
- openshift.common.version_gte_3_2_or_1_2
diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml
new file mode 100644
index 000000000..96b6c614e
--- /dev/null
+++ b/roles/openshift_master/tasks/registry_auth.yml
@@ -0,0 +1,27 @@
+---
+- name: Check for credentials file for registry auth
+ stat:
+ path: "{{ oreg_auth_credentials_path }}"
+ when: oreg_auth_user is defined
+ register: master_oreg_auth_credentials_stat
+
+# Container images may need the registry credentials
+- name: Setup ro mount of /root/.docker for containerized hosts
+ set_fact:
+ l_bind_docker_reg_auth: True
+ when:
+ - openshift.common.is_containerized | bool
+ - oreg_auth_user is defined
+ - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ notify:
+ - restart master api
+ - restart master controllers
+
+- name: Create credentials for registry auth
+ command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
+ when:
+ - oreg_auth_user is defined
+ - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ notify:
+ - restart master api
+ - restart master controllers
diff --git a/roles/openshift_master/tasks/update_etcd_client_urls.yml b/roles/openshift_master/tasks/update_etcd_client_urls.yml
new file mode 100644
index 000000000..1ab105808
--- /dev/null
+++ b/roles/openshift_master/tasks/update_etcd_client_urls.yml
@@ -0,0 +1,8 @@
+---
+- yedit:
+ src: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ key: 'etcdClientInfo.urls'
+ value: "{{ openshift.master.etcd_urls }}"
+ notify:
+ - restart master api
+ - restart master controllers
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
index f06448d71..a184a59f6 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
@@ -12,7 +12,17 @@ Requires={{ openshift.docker.service_name }}.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
Environment=GOTRACEBACK=crash
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-api
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host \
+ --name {{ openshift.common.service_type }}-master-api \
+ --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api \
+ -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} \
+ -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock \
+ -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \
+ {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \
+ -v /etc/pki:/etc/pki:ro \
+ {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
+ {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api \
+ --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-api
LimitNOFILE=131072
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
index b7f36491b..2ded05f53 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
@@ -11,7 +11,17 @@ PartOf={{ openshift.docker.service_name }}.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
Environment=GOTRACEBACK=crash
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-controllers
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-controllers --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host \
+ --name {{ openshift.common.service_type }}-master-controllers \
+ --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers \
+ -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} \
+ -v /var/run/docker.sock:/var/run/docker.sock \
+ -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \
+ {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \
+ -v /etc/pki:/etc/pki:ro \
+ {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
+ {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers \
+ --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-controllers
LimitNOFILE=131072
diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py
index e767772ce..56c864ec7 100644
--- a/roles/openshift_master_facts/filter_plugins/openshift_master.py
+++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py
@@ -380,11 +380,6 @@ class OpenIDIdentityProvider(IdentityProviderOauthBase):
if 'extra_authorize_parameters' in self._idp:
self._idp['extraAuthorizeParameters'] = self._idp.pop('extra_authorize_parameters')
- if 'extraAuthorizeParameters' in self._idp:
- if 'include_granted_scopes' in self._idp['extraAuthorizeParameters']:
- val = ansible_bool(self._idp['extraAuthorizeParameters'].pop('include_granted_scopes'))
- self._idp['extraAuthorizeParameters']['include_granted_scopes'] = val
-
def validate(self):
''' validate this idp instance '''
IdentityProviderOauthBase.validate(self)
diff --git a/roles/openshift_metrics/tasks/pre_install.yaml b/roles/openshift_metrics/tasks/pre_install.yaml
index 2e2013d40..d6756f9b9 100644
--- a/roles/openshift_metrics/tasks/pre_install.yaml
+++ b/roles/openshift_metrics/tasks/pre_install.yaml
@@ -10,7 +10,7 @@
is invalid, must be one of: emptydir, pv, dynamic
when:
- openshift_metrics_cassandra_storage_type not in openshift_metrics_cassandra_storage_types
- - "not {{ openshift_metrics_heapster_standalone | bool }}"
+ - not (openshift_metrics_heapster_standalone | bool)
- name: list existing secrets
command: >
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index f1e64f3aa..433e92201 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -60,7 +60,7 @@ openshift_deployment_type: origin
openshift_node_bootstrap: False
r_openshift_node_os_firewall_deny: []
-r_openshift_node_os_firewall_allow:
+default_r_openshift_node_os_firewall_allow:
- service: Kubernetes kubelet
port: 10250/tcp
- service: http
@@ -79,12 +79,14 @@ r_openshift_node_os_firewall_allow:
- service: Kubernetes service NodePort UDP
port: "{{ openshift_node_port_range | default('') }}/udp"
cond: "{{ openshift_node_port_range is defined }}"
+# Allow multiple port ranges to be added to the role
+r_openshift_node_os_firewall_allow: "{{ default_r_openshift_node_os_firewall_allow | union(openshift_node_open_ports | default([])) }}"
oreg_url: ''
oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}"
oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker"
oreg_auth_credentials_replace: False
-
+l_bind_docker_reg_auth: False
# NOTE
# r_openshift_node_*_default may be defined external to this role.
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index 855b0a8d8..25a6fc721 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -29,8 +29,5 @@
- not (node_service_status_changed | default(false) | bool)
- not openshift_node_bootstrap
-- name: reload sysctl.conf
- command: /sbin/sysctl -p
-
- name: reload systemd units
command: systemctl daemon-reload
diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml
index 7af3f54b5..2759188f3 100644
--- a/roles/openshift_node/tasks/config.yml
+++ b/roles/openshift_node/tasks/config.yml
@@ -2,18 +2,6 @@
- name: Install the systemd units
include: systemd_units.yml
-- name: Check for tuned package
- command: rpm -q tuned
- args:
- warn: no
- register: tuned_installed
- changed_when: false
- failed_when: false
-
-- name: Set atomic-guest tuned profile
- command: "tuned-adm profile atomic-guest"
- when: tuned_installed.rc == 0 and openshift.common.is_atomic | bool
-
- name: Start and enable openvswitch service
systemd:
name: openvswitch.service
@@ -107,5 +95,9 @@
msg: Node failed to start please inspect the logs and try again
when: node_start_result | failed
+- name: Setup tuned
+ include: tuned.yml
+ static: yes
+
- set_fact:
node_service_status_changed: "{{ node_start_result | changed }}"
diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml
index 02b8ee67c..265bf2c46 100644
--- a/roles/openshift_node/tasks/install.yml
+++ b/roles/openshift_node/tasks/install.yml
@@ -1,11 +1,9 @@
---
-# We have to add tuned-profiles in the same transaction otherwise we run into depsolving
-# problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging.
- when: not openshift.common.is_containerized | bool
block:
- name: Install Node package
package:
- name: "{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
- name: Install sdn-ovs package
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 22ff6dfd2..ef79b6ac0 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -59,38 +59,22 @@
# The atomic-openshift-node service will set this parameter on
# startup, but if the network service is restarted this setting is
# lost. Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1372388
-#
-# Use lineinfile w/ a handler for this task until
-# https://github.com/ansible/ansible/pull/24277 is included in an
-# ansible release and we can use the sysctl module.
-- name: Persist net.ipv4.ip_forward sysctl entry
- lineinfile: dest=/etc/sysctl.conf regexp='^net.ipv4.ip_forward' line='net.ipv4.ip_forward=1'
- notify:
- - reload sysctl.conf
+- sysctl:
+ name: net.ipv4.ip_forward
+ value: 1
+ sysctl_file: "/etc/sysctl.d/99-openshift.conf"
+ reload: yes
- name: include bootstrap node config
include: bootstrap.yml
when: openshift_node_bootstrap
+- include: registry_auth.yml
+
- name: include standard node config
include: config.yml
when: not openshift_node_bootstrap
-- name: Check for credentials file for registry auth
- stat:
- path: "{{oreg_auth_credentials_path }}"
- when:
- - oreg_auth_user is defined
- register: node_oreg_auth_credentials_stat
-
-- name: Create credentials for registry auth
- command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
- when:
- - oreg_auth_user is defined
- - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
- notify:
- - restart node
-
- name: Configure AWS Cloud Provider Settings
lineinfile:
dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
diff --git a/roles/openshift_node/tasks/registry_auth.yml b/roles/openshift_node/tasks/registry_auth.yml
new file mode 100644
index 000000000..f370bb260
--- /dev/null
+++ b/roles/openshift_node/tasks/registry_auth.yml
@@ -0,0 +1,25 @@
+---
+- name: Check for credentials file for registry auth
+ stat:
+ path: "{{ oreg_auth_credentials_path }}"
+ when: oreg_auth_user is defined
+ register: node_oreg_auth_credentials_stat
+
+# Container images may need the registry credentials
+- name: Setup ro mount of /root/.docker for containerized hosts
+ set_fact:
+ l_bind_docker_reg_auth: True
+ when:
+ - openshift.common.is_containerized | bool
+ - oreg_auth_user is defined
+ - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ notify:
+ - restart node
+
+- name: Create credentials for registry auth
+ command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
+ when:
+ - oreg_auth_user is defined
+ - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ notify:
+ - restart node
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index 57094f28e..4ab10b95f 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -21,7 +21,22 @@ EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/
ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION}
+ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \
+ --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node \
+ -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} \
+ -e HOST=/rootfs -e HOST_ETC=/host-etc \
+ -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} \
+ -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node \
+ {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \
+ -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro \
+ -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw \
+ -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker \
+ -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch \
+ -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni \
+ -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log \
+ -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro \
+ {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
+ {{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf
diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml
index 18d6a1645..5aa8aecec 100644
--- a/roles/openshift_prometheus/defaults/main.yaml
+++ b/roles/openshift_prometheus/defaults/main.yaml
@@ -11,7 +11,7 @@ openshift_prometheus_node_selector: {"region":"infra"}
openshift_prometheus_image_proxy: "openshift/oauth-proxy:v1.0.0"
openshift_prometheus_image_prometheus: "openshift/prometheus:v2.0.0-dev"
openshift_prometheus_image_alertmanager: "openshift/prometheus-alertmanager:dev"
-openshift_prometheus_image_alertbuffer: "ilackarms/message-buffer"
+openshift_prometheus_image_alertbuffer: "openshift/prometheus-alert-buffer:v0.0.1"
# additional prometheus rules file
openshift_prometheus_additional_rules_file: null
diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml
index 93bdda3e8..a9bce2fb1 100644
--- a/roles/openshift_prometheus/tasks/install_prometheus.yaml
+++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml
@@ -107,7 +107,10 @@
- name: annotate prometheus service
command: >
{{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}
- service prometheus 'service.alpha.openshift.io/serving-cert-secret-name=prometheus-tls'
+ service prometheus
+ prometheus.io/scrape='true'
+ prometheus.io/scheme=https
+ service.alpha.openshift.io/serving-cert-secret-name=prometheus-tls
- name: annotate alerts service
command: >
diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml
index 59ce505d3..47d7be05a 100644
--- a/roles/openshift_sanitize_inventory/tasks/main.yml
+++ b/roles/openshift_sanitize_inventory/tasks/main.yml
@@ -12,6 +12,27 @@
deployment_type is deprecated in favor of openshift_deployment_type.
Please specify only openshift_deployment_type, or make both the same.
+# osm_cluster_network_cidr, osm_host_subnet_length and openshift_portal_net are
+# now required to avoid changes that may occur between releases
+#
+# Note: We will skip these checks when some tests run which don't
+# actually do any insalling/upgrading/scaling/etc..
+# Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1451023
+- when:
+ - not testing_skip_some_requirements|default(False)|bool
+ assert:
+ that:
+ - "osm_cluster_network_cidr is defined"
+ - "osm_host_subnet_length is defined"
+ - "openshift_portal_net is defined"
+ msg: >
+ osm_cluster_network_cidr, osm_host_subnet_length, and openshift_portal_net are required inventory
+ variables. If you are upgrading or scaling up these variables should match what is currently used
+ in the cluster. If you don't remember what these values are you can find them in
+ /etc/origin/master/master-config.yaml on a master with the names clusterNetworkCIDR
+ (osm_cluster_network_cidr), hostSubnetLength (osm_host_subnet_length),
+ and serviceNetworkCIDR (openshift_portal_net).
+
- name: Standardize on latest variable names
set_fact:
# goal is to deprecate deployment_type in favor of openshift_deployment_type.
diff --git a/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js b/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js
index 1f25cc39f..16a307c06 100644
--- a/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js
+++ b/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js
@@ -1,2 +1 @@
window.OPENSHIFT_CONSTANTS.ENABLE_TECH_PREVIEW_FEATURE.service_catalog_landing_page = true;
-window.OPENSHIFT_CONSTANTS.ENABLE_TECH_PREVIEW_FEATURE.pod_presets = true;
diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md
index a059745a6..d0bc0e028 100644
--- a/roles/openshift_storage_glusterfs/README.md
+++ b/roles/openshift_storage_glusterfs/README.md
@@ -76,10 +76,11 @@ GlusterFS cluster into a new or existing OpenShift cluster:
| Name | Default value | Description |
|--------------------------------------------------|-------------------------|-----------------------------------------|
| openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready
-| openshift_storage_glusterfs_namespace | 'glusterfs' | Namespace in which to create GlusterFS resources
+| openshift_storage_glusterfs_namespace | 'glusterfs' | Namespace/project in which to create GlusterFS resources
| openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized
| openshift_storage_glusterfs_name | 'storage' | A name to identify the GlusterFS cluster, which will be used in resource names
| openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name
+| openshift_storage_glusterfs_use_default_selector | False | Whether to use a default node selector for the GlusterFS namespace/project. If False, the namespace/project will have no restricting node selector. If True, uses pre-existing or default (e.g. osm_default_node_selector) node selectors. **NOTE:** If True, nodes which will host GlusterFS pods must already have the additional labels.
| openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster
| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7'
| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods
@@ -91,7 +92,7 @@ GlusterFS cluster into a new or existing OpenShift cluster:
| openshift_storage_glusterfs_heketi_admin_key | auto-generated | String to use as secret key for performing heketi commands as admin
| openshift_storage_glusterfs_heketi_user_key | auto-generated | String to use as secret key for performing heketi commands as user that can only view or modify volumes
| openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi
-| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the full URL to the heketi service.
+| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the FQDN or IP address to the heketi service.
| openshift_storage_glusterfs_heketi_port | 8080 | TCP port for external heketi service **NOTE:** This has no effect in native mode
| openshift_storage_glusterfs_heketi_executor | 'kubernetes' | Selects how a native heketi service will manage GlusterFS nodes: 'kubernetes' for native nodes, 'ssh' for external nodes
| openshift_storage_glusterfs_heketi_ssh_port | 22 | SSH port for external GlusterFS nodes via native heketi
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
index 0b3d3aef1..148549887 100644
--- a/roles/openshift_storage_glusterfs/defaults/main.yml
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -3,6 +3,7 @@ openshift_storage_glusterfs_timeout: 300
openshift_storage_glusterfs_is_native: True
openshift_storage_glusterfs_name: 'storage'
openshift_storage_glusterfs_nodeselector: "glusterfs={{ openshift_storage_glusterfs_name }}-host"
+openshift_storage_glusterfs_use_default_selector: False
openshift_storage_glusterfs_storageclass: True
openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}"
openshift_storage_glusterfs_version: 'latest'
@@ -31,6 +32,7 @@ openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.na
openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}"
openshift_storage_glusterfs_registry_name: 'registry'
openshift_storage_glusterfs_registry_nodeselector: "glusterfs={{ openshift_storage_glusterfs_registry_name }}-host"
+openshift_storage_glusterfs_registry_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}"
openshift_storage_glusterfs_registry_storageclass: False
openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}"
openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}"
@@ -58,9 +60,9 @@ r_openshift_storage_glusterfs_os_firewall_deny: []
r_openshift_storage_glusterfs_os_firewall_allow:
- service: glusterfs_sshd
port: "2222/tcp"
-- service: glusterfs_daemon
- port: "24007/tcp"
- service: glusterfs_management
+ port: "24007/tcp"
+- service: glusterfs_rdma
port: "24008/tcp"
- service: glusterfs_bricks
port: "49152-49251/tcp"
diff --git a/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml
new file mode 100644
index 000000000..9ebb0d5ec
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml
@@ -0,0 +1,143 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: heketi-template
+ deploy-heketi: support
+ annotations:
+ description: Bootstrap Heketi installation
+ tags: glusterfs,heketi,installation
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-service
+ deploy-heketi: support
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: deploy-heketi-${CLUSTER_NAME}
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: ${HEKETI_ROUTE}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-route
+ deploy-heketi: support
+ spec:
+ to:
+ kind: Service
+ name: deploy-heketi-${CLUSTER_NAME}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-dc
+ deploy-heketi: support
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+ deploy-heketi: support
+ spec:
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: ${HEKETI_EXECUTOR}
+ - name: HEKETI_FSTAB
+ value: /var/lib/heketi/fstab
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ - name: HEKETI_KUBE_NAMESPACE
+ value: ${HEKETI_KUBE_NAMESPACE}
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ - name: topology
+ mountPath: ${TOPOLOGY_PATH}
+ - name: config
+ mountPath: /etc/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ - name: topology
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-topology-secret
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
+- name: HEKETI_KUBE_NAMESPACE
+ displayName: Namespace
+ description: Set the namespace where the GlusterFS pods reside
+ value: default
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
+- name: IMAGE_NAME
+ displayName: heketi container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: heketi container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
+ value: glusterfs
+- name: TOPOLOGY_PATH
+ displayName: heketi topology file location
+ required: True
diff --git a/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml
new file mode 100644
index 000000000..8c5e1ded3
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml
@@ -0,0 +1,136 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: glusterfs
+ labels:
+ glusterfs: template
+ annotations:
+ description: GlusterFS DaemonSet template
+ tags: glusterfs
+objects:
+- kind: DaemonSet
+ apiVersion: extensions/v1beta1
+ metadata:
+ name: glusterfs-${CLUSTER_NAME}
+ labels:
+ glusterfs: ${CLUSTER_NAME}-daemonset
+ annotations:
+ description: GlusterFS DaemonSet
+ tags: glusterfs
+ spec:
+ selector:
+ matchLabels:
+ glusterfs: ${CLUSTER_NAME}-pod
+ template:
+ metadata:
+ name: glusterfs-${CLUSTER_NAME}
+ labels:
+ glusterfs: ${CLUSTER_NAME}-pod
+ glusterfs-node: pod
+ spec:
+ nodeSelector: "${{NODE_LABELS}}"
+ hostNetwork: true
+ containers:
+ - name: glusterfs
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - name: glusterfs-heketi
+ mountPath: "/var/lib/heketi"
+ - name: glusterfs-run
+ mountPath: "/run"
+ - name: glusterfs-lvm
+ mountPath: "/run/lvm"
+ - name: glusterfs-etc
+ mountPath: "/etc/glusterfs"
+ - name: glusterfs-logs
+ mountPath: "/var/log/glusterfs"
+ - name: glusterfs-config
+ mountPath: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ mountPath: "/dev"
+ - name: glusterfs-misc
+ mountPath: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ mountPath: "/sys/fs/cgroup"
+ readOnly: true
+ - name: glusterfs-ssl
+ mountPath: "/etc/ssl"
+ readOnly: true
+ securityContext:
+ capabilities: {}
+ privileged: true
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 40
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 25
+ successThreshold: 1
+ failureThreshold: 15
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 40
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 25
+ successThreshold: 1
+ failureThreshold: 15
+ resources: {}
+ terminationMessagePath: "/dev/termination-log"
+ volumes:
+ - name: glusterfs-heketi
+ hostPath:
+ path: "/var/lib/heketi"
+ - name: glusterfs-run
+ emptyDir: {}
+ - name: glusterfs-lvm
+ hostPath:
+ path: "/run/lvm"
+ - name: glusterfs-etc
+ hostPath:
+ path: "/etc/glusterfs"
+ - name: glusterfs-logs
+ hostPath:
+ path: "/var/log/glusterfs"
+ - name: glusterfs-config
+ hostPath:
+ path: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ hostPath:
+ path: "/dev"
+ - name: glusterfs-misc
+ hostPath:
+ path: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ hostPath:
+ path: "/sys/fs/cgroup"
+ - name: glusterfs-ssl
+ hostPath:
+ path: "/etc/ssl"
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+parameters:
+- name: NODE_LABELS
+ displayName: Daemonset Node Labels
+ description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\'
+ value: '{ "glusterfs": "storage-host" }'
+- name: IMAGE_NAME
+ displayName: GlusterFS container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: GlusterFS container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml
new file mode 100644
index 000000000..61b6a8c13
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml
@@ -0,0 +1,134 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-template
+ annotations:
+ description: Heketi service deployment template
+ tags: glusterfs,heketi
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-service
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: heketi
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: ${HEKETI_ROUTE}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-route
+ spec:
+ to:
+ kind: Service
+ name: heketi-${CLUSTER_NAME}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-dc
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+ spec:
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: ${HEKETI_EXECUTOR}
+ - name: HEKETI_FSTAB
+ value: /var/lib/heketi/fstab
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ - name: HEKETI_KUBE_NAMESPACE
+ value: ${HEKETI_KUBE_NAMESPACE}
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ - name: config
+ mountPath: /etc/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ glusterfs:
+ endpoints: heketi-db-${CLUSTER_NAME}-endpoints
+ path: heketidbstorage
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
+- name: HEKETI_KUBE_NAMESPACE
+ displayName: Namespace
+ description: Set the namespace where the GlusterFS pods reside
+ value: default
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
+- name: IMAGE_NAME
+ displayName: heketi container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: heketi container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
+ value: glusterfs
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
index a31c5bd5e..bc0dde17d 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
@@ -15,6 +15,7 @@
oc_project:
state: present
name: "{{ glusterfs_namespace }}"
+ node_selector: "{% if glusterfs_use_default_selector %}{{ omit }}{% endif %}"
when: glusterfs_is_native or glusterfs_heketi_is_native or glusterfs_storageclass
- name: Delete pre-existing heketi resources
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
index 7a2987883..012c722ff 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
@@ -5,6 +5,7 @@
glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native | bool }}"
glusterfs_name: "{{ openshift_storage_glusterfs_name }}"
glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | default(['storagenode', openshift_storage_glusterfs_name] | join('=')) | map_from_pairs }}"
+ glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}"
glusterfs_storageclass: "{{ openshift_storage_glusterfs_storageclass }}"
glusterfs_image: "{{ openshift_storage_glusterfs_image }}"
glusterfs_version: "{{ openshift_storage_glusterfs_version }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
index 17f87578d..1bcab8e49 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -5,6 +5,7 @@
glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native | bool }}"
glusterfs_name: "{{ openshift_storage_glusterfs_registry_name }}"
glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | default(['storagenode', openshift_storage_glusterfs_registry_name] | join('=')) | map_from_pairs }}"
+ glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_registry_use_default_selector }}"
glusterfs_storageclass: "{{ openshift_storage_glusterfs_registry_storageclass }}"
glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}"
glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}"
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-endpoints.yml.j2
new file mode 100644
index 000000000..11c9195bb
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-service.yml.j2
new file mode 100644
index 000000000..3f869d2b7
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2
new file mode 100644
index 000000000..095fb780f
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2
@@ -0,0 +1,13 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: glusterfs-{{ glusterfs_name }}
+provisioner: kubernetes.io/glusterfs
+parameters:
+ resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
+ restuser: "admin"
+{% if glusterfs_heketi_admin_key is defined %}
+ secretNamespace: "{{ glusterfs_namespace }}"
+ secretName: "heketi-{{ glusterfs_name }}-admin-secret"
+{%- endif -%}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/heketi-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-endpoints.yml.j2
new file mode 100644
index 000000000..99cbdf748
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/heketi-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-service.yml.j2
new file mode 100644
index 000000000..dcb896441
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2
new file mode 100644
index 000000000..579b11bb7
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2
@@ -0,0 +1,36 @@
+{
+ "_port_comment": "Heketi Server Port Number",
+ "port" : "8080",
+
+ "_use_auth": "Enable JWT authorization. Please enable for deployment",
+ "use_auth" : false,
+
+ "_jwt" : "Private keys for access",
+ "jwt" : {
+ "_admin" : "Admin has access to all APIs",
+ "admin" : {
+ "key" : "My Secret"
+ },
+ "_user" : "User only has access to /volumes endpoint",
+ "user" : {
+ "key" : "My Secret"
+ }
+ },
+
+ "_glusterfs_comment": "GlusterFS Configuration",
+ "glusterfs" : {
+
+ "_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh",
+ "executor" : "{{ glusterfs_heketi_executor }}",
+
+ "_db_comment": "Database file name",
+ "db" : "/var/lib/heketi/heketi.db",
+
+ "sshexec" : {
+ "keyfile" : "/etc/heketi/private_key",
+ "port" : "{{ glusterfs_heketi_ssh_port }}",
+ "user" : "{{ glusterfs_heketi_ssh_user }}",
+ "sudo" : {{ glusterfs_heketi_ssh_sudo | lower }}
+ }
+ }
+}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/topology.json.j2
new file mode 100644
index 000000000..d6c28f6dd
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/topology.json.j2
@@ -0,0 +1,49 @@
+{
+ "clusters": [
+{%- set clusters = {} -%}
+{%- for node in glusterfs_nodes -%}
+ {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%}
+ {%- if cluster in clusters -%}
+ {%- set _dummy = clusters[cluster].append(node) -%}
+ {%- else -%}
+ {%- set _dummy = clusters.update({cluster: [ node, ]}) -%}
+ {%- endif -%}
+{%- endfor -%}
+{%- for cluster in clusters -%}
+ {
+ "nodes": [
+{%- for node in clusters[cluster] -%}
+ {
+ "node": {
+ "hostnames": {
+ "manage": [
+{%- if 'glusterfs_hostname' in hostvars[node] -%}
+ "{{ hostvars[node].glusterfs_hostname }}"
+{%- elif 'openshift' in hostvars[node] -%}
+ "{{ hostvars[node].openshift.node.nodename }}"
+{%- else -%}
+ "{{ node }}"
+{%- endif -%}
+ ],
+ "storage": [
+{%- if 'glusterfs_ip' in hostvars[node] -%}
+ "{{ hostvars[node].glusterfs_ip }}"
+{%- else -%}
+ "{{ hostvars[node].openshift.common.ip }}"
+{%- endif -%}
+ ]
+ },
+ "zone": {{ hostvars[node].glusterfs_zone | default(1) }}
+ },
+ "devices": [
+{%- for device in hostvars[node].glusterfs_devices -%}
+ "{{ device }}"{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+}
diff --git a/roles/openshift_version/defaults/main.yml b/roles/openshift_version/defaults/main.yml
index 01a1a7472..53d10f1f8 100644
--- a/roles/openshift_version/defaults/main.yml
+++ b/roles/openshift_version/defaults/main.yml
@@ -1,2 +1,3 @@
---
openshift_protect_installed_version: True
+version_install_base_package: False
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index 204abe27e..1ff99adf8 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -5,11 +5,15 @@
is_containerized: "{{ openshift.common.is_containerized | default(False) | bool }}"
is_atomic: "{{ openshift.common.is_atomic | default(False) | bool }}"
+# This is only needed on masters and nodes; version_install_base_package
+# should be set by a play externally.
- name: Install the base package for versioning
package:
name: "{{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
- when: not is_containerized | bool
+ when:
+ - not is_containerized | bool
+ - version_install_base_package | bool
# Block attempts to install origin without specifying some kind of version information.
# This is because the latest tags for origin are usually alpha builds, which should not
diff --git a/roles/os_firewall/tasks/iptables.yml b/roles/os_firewall/tasks/iptables.yml
index 0af5abf38..2d74f2e48 100644
--- a/roles/os_firewall/tasks/iptables.yml
+++ b/roles/os_firewall/tasks/iptables.yml
@@ -33,7 +33,7 @@
register: result
delegate_to: "{{item}}"
run_once: true
- with_items: "{{ ansible_play_hosts }}"
+ with_items: "{{ ansible_play_batch }}"
- name: need to pause here, otherwise the iptables service starting can sometimes cause ssh to fail
pause:
diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml
index 453044a6e..2a2cf40f3 100644
--- a/roles/rhel_subscribe/tasks/main.yml
+++ b/roles/rhel_subscribe/tasks/main.yml
@@ -41,15 +41,19 @@
redhat_subscription:
username: "{{ rhel_subscription_user }}"
password: "{{ rhel_subscription_pass }}"
+ register: rh_subscription
+ until: rh_subscription | succeeded
- name: Retrieve the OpenShift Pool ID
command: subscription-manager list --available --matches="{{ rhel_subscription_pool }}" --pool-only
register: openshift_pool_id
+ until: openshift_pool_id | succeeded
changed_when: False
- name: Determine if OpenShift Pool Already Attached
command: subscription-manager list --consumed --matches="{{ rhel_subscription_pool }}" --pool-only
register: openshift_pool_attached
+ until: openshift_pool_attached | succeeded
changed_when: False
when: openshift_pool_id.stdout == ''
@@ -59,6 +63,8 @@
- name: Attach to OpenShift Pool
command: subscription-manager subscribe --pool {{ openshift_pool_id.stdout_lines[0] }}
+ register: subscribe_pool
+ until: subscribe_pool | succeeded
when: openshift_pool_id.stdout != ''
- include: enterprise.yml