summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--callback_plugins/aa_version_requirement.py2
-rw-r--r--files/origin-components/apiserver-template.yaml3
-rw-r--r--inventory/hosts.example6
-rw-r--r--openshift-ansible.spec41
-rw-r--r--playbooks/byo/rhel_subscribe.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml21
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_excluders.yml (renamed from playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml)2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml11
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh25
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml57
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/config.yml77
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml93
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml37
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml37
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml33
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml99
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml102
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml99
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml103
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml106
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml99
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml103
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml106
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml99
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml113
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml111
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml109
-rw-r--r--playbooks/init/main.yml2
-rw-r--r--playbooks/init/repos.yml8
-rw-r--r--playbooks/init/validate_hostnames.yml34
-rw-r--r--playbooks/openshift-master/private/config.yml1
-rw-r--r--playbooks/openshift-node/private/restart.yml1
-rw-r--r--playbooks/prerequisites.yml2
-rw-r--r--requirements.txt2
-rw-r--r--roles/container_runtime/defaults/main.yml2
-rw-r--r--roles/container_runtime/tasks/docker_upgrade_check.yml67
-rw-r--r--roles/container_runtime/templates/crio.conf.j22
-rw-r--r--roles/contiv/meta/main.yml2
-rw-r--r--roles/contiv/tasks/default_network.yml13
-rw-r--r--roles/contiv/tasks/netmaster_iptables.yml8
-rw-r--r--roles/contiv/tasks/pkgMgrInstallers/centos-install.yml2
-rw-r--r--roles/contiv/templates/aci-gw.service5
-rw-r--r--roles/contiv/templates/netmaster.service5
-rw-r--r--roles/contiv/templates/netplugin.service5
-rw-r--r--roles/contiv_facts/tasks/rpm.yml9
-rw-r--r--roles/etcd/defaults/main.yaml2
-rw-r--r--roles/openshift_cli/tasks/main.yml2
-rwxr-xr-xroles/openshift_examples/examples-sync.sh2
l---------roles/openshift_examples/files/examples/latest1
l---------roles/openshift_examples/files/examples/v3.7/v3.81
-rw-r--r--roles/openshift_examples/files/examples/v3.8/db-templates/mariadb-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.8/db-templates/mongodb-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.8/db-templates/mysql-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.8/db-templates/postgresql-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.8/db-templates/redis-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-centos7.json21
-rw-r--r--roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-rhel7.json21
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/httpd.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-ephemeral-template.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-persistent-template.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql.json3
l---------roles/openshift_examples/files/examples/v3.9/latest1
-rw-r--r--roles/openshift_hosted/README.md7
-rw-r--r--roles/openshift_hosted/meta/main.yml2
-rw-r--r--roles/openshift_hosted_facts/meta/main.yml15
-rw-r--r--roles/openshift_hosted_facts/tasks/main.yml1
-rw-r--r--roles/openshift_logging/tasks/annotate_ops_projects.yaml19
-rw-r--r--roles/openshift_logging/tasks/delete_logging.yaml18
-rw-r--r--roles/openshift_logging/tasks/main.yaml1
-rw-r--r--roles/openshift_logging_fluentd/tasks/label_and_wait.yaml1
-rw-r--r--roles/openshift_master/defaults/main.yml42
-rw-r--r--roles/openshift_master/vars/main.yml41
-rw-r--r--roles/openshift_node/handlers/main.yml6
-rw-r--r--roles/openshift_node/tasks/config.yml4
-rw-r--r--roles/openshift_node/tasks/container_images.yml20
-rw-r--r--roles/openshift_node/tasks/dnsmasq.yml43
-rw-r--r--roles/openshift_node/tasks/dnsmasq_install.yml43
-rw-r--r--roles/openshift_node/tasks/docker/upgrade.yml42
-rw-r--r--roles/openshift_node/tasks/main.yml1
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml23
-rw-r--r--roles/openshift_node/tasks/upgrade.yml159
-rw-r--r--roles/openshift_node/tasks/upgrade/config_changes.yml77
-rw-r--r--roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml14
-rw-r--r--roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml15
-rw-r--r--roles/openshift_node/tasks/upgrade/restart.yml9
-rw-r--r--roles/openshift_node/tasks/upgrade/rpm_upgrade.yml33
-rw-r--r--roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml19
-rw-r--r--roles/openshift_node/tasks/upgrade/stop_services.yml43
-rw-r--r--roles/openshift_node/tasks/upgrade_pre.yml56
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j24
-rw-r--r--roles/openshift_prometheus/README.md2
-rw-r--r--roles/openshift_prometheus/defaults/main.yaml3
-rw-r--r--roles/openshift_prometheus/tasks/install_prometheus.yaml3
-rw-r--r--roles/openshift_repos/tasks/main.yaml7
-rw-r--r--roles/openshift_repos/tasks/rhel_repos.yml34
-rw-r--r--roles/openshift_storage_glusterfs/README.md17
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml2
-rw-r--r--roles/openshift_storage_glusterfs/meta/main.yml2
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml1
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml1
-rw-r--r--roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j24
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j24
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j24
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j24
-rw-r--r--roles/openshift_storage_nfs/meta/main.yml2
-rw-r--r--roles/rhel_subscribe/tasks/enterprise.yml18
-rw-r--r--roles/rhel_subscribe/tasks/main.yml64
-rw-r--r--roles/rhel_subscribe/tasks/satellite.yml5
-rw-r--r--roles/template_service_broker/defaults/main.yml1
-rw-r--r--roles/template_service_broker/tasks/install.yml3
126 files changed, 1158 insertions, 1834 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 822a6dca2..1ca23082d 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.9.0-0.6.0 ./
+3.9.0-0.10.0 ./
diff --git a/callback_plugins/aa_version_requirement.py b/callback_plugins/aa_version_requirement.py
index 110b3d673..1093acdae 100644
--- a/callback_plugins/aa_version_requirement.py
+++ b/callback_plugins/aa_version_requirement.py
@@ -29,7 +29,7 @@ else:
# Set to minimum required Ansible version
-REQUIRED_VERSION = '2.4.0.0'
+REQUIRED_VERSION = '2.4.1.0'
DESCRIPTION = "Supported versions: %s or newer" % REQUIRED_VERSION
diff --git a/files/origin-components/apiserver-template.yaml b/files/origin-components/apiserver-template.yaml
index 1b42597af..035e4734b 100644
--- a/files/origin-components/apiserver-template.yaml
+++ b/files/origin-components/apiserver-template.yaml
@@ -15,6 +15,8 @@ parameters:
apiVersion: config.templateservicebroker.openshift.io/v1
templateNamespaces:
- openshift
+- name: NODE_SELECTOR
+ value: "{}"
objects:
# to create the tsb server
@@ -59,6 +61,7 @@ objects:
path: /healthz
port: 8443
scheme: HTTPS
+ nodeSelector: "${{NODE_SELECTOR}}"
volumes:
- name: serving-cert
secret:
diff --git a/inventory/hosts.example b/inventory/hosts.example
index c18a53671..d857cd1a7 100644
--- a/inventory/hosts.example
+++ b/inventory/hosts.example
@@ -641,6 +641,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_prometheus_storage_volume_size=10Gi
#openshift_prometheus_storage_labels={'storage': 'prometheus'}
#openshift_prometheus_storage_type='pvc'
+#openshift_prometheus_storage_class=glusterfs-storage
# For prometheus-alertmanager
#openshift_prometheus_alertmanager_storage_kind=nfs
#openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
@@ -650,6 +651,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_prometheus_alertmanager_storage_volume_size=10Gi
#openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
#openshift_prometheus_alertmanager_storage_type='pvc'
+#openshift_prometheus_alertmanager_storage_class=glusterfs-storage
# For prometheus-alertbuffer
#openshift_prometheus_alertbuffer_storage_kind=nfs
#openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
@@ -659,6 +661,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_prometheus_alertbuffer_storage_volume_size=10Gi
#openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
#openshift_prometheus_alertbuffer_storage_type='pvc'
+#openshift_prometheus_alertbuffer_storage_class=glusterfs-storage
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
@@ -672,6 +675,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_prometheus_storage_volume_size=10Gi
#openshift_prometheus_storage_labels={'storage': 'prometheus'}
#openshift_prometheus_storage_type='pvc'
+#openshift_prometheus_storage_class=glusterfs-storage
# For prometheus-alertmanager
#openshift_prometheus_alertmanager_storage_kind=nfs
#openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
@@ -681,6 +685,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_prometheus_alertmanager_storage_volume_size=10Gi
#openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
#openshift_prometheus_alertmanager_storage_type='pvc'
+#openshift_prometheus_alertmanager_storage_class=glusterfs-storage
# For prometheus-alertbuffer
#openshift_prometheus_alertbuffer_storage_kind=nfs
#openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
@@ -690,6 +695,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_prometheus_alertbuffer_storage_volume_size=10Gi
#openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
#openshift_prometheus_alertbuffer_storage_type='pvc'
+#openshift_prometheus_alertbuffer_storage_class=glusterfs-storage
#
# Option C - none -- Prometheus, alertmanager and alertbuffer will use emptydir volumes
# which are destroyed when pods are deleted
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index ff5107d98..0d5964dda 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -10,7 +10,7 @@
Name: openshift-ansible
Version: 3.9.0
-Release: 0.6.0%{?dist}
+Release: 0.10.0%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
@@ -285,6 +285,45 @@ Atomic OpenShift Utilities includes
%changelog
+* Thu Dec 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.10.0
+- Bump requirements.txt to Ansible 2.4.1 (rteague@redhat.com)
+- Commit to stabalize RHSM operations. This code is derived from contrib
+ (mazzystr@gmail.com)
+- Contiv systemd fixes (flamingo@2thebatcave.com)
+- Combine openshift_master/vars with defaults (mgugino@redhat.com)
+- crio: change socket path to /var/run/crio/crio.sock (gscrivan@redhat.com)
+- Remove version requirement from openvswitch package, since listed version got
+ removed from repo (riffraff@hobbes.alephone.org)
+
+* Thu Dec 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.9.0
+- etcd: use Fedora /latest/ instead of hardcoding the version
+ (gscrivan@redhat.com)
+- docker: use Fedora /latest/ instead of hardcoding the version
+ (gscrivan@redhat.com)
+- upgrade node mark 2 (mgugino@redhat.com)
+- Refactor node upgrade to include less serial tasks (mgugino@redhat.com)
+- fix 1519808. Only annotate ops projects when openshift_logging_use_ops=true
+ (jcantril@redhat.com)
+- Ensure that clients are version bound (sdodson@redhat.com)
+- Support for making glusterfs storage class a default one.
+ (jmencak@redhat.com)
+- Add support for storage classes to openshift_prometheus role.
+ (jmencak@redhat.com)
+- Do not escalate privileges in logging stack deployment task
+ (iacopo.rozzo@amadeus.com)
+- Multimaster openshift+contiv fixes (landillo@cisco.com)
+- Sync latest image-streams and templates (alexandre.lossent@cern.ch)
+
+* Tue Dec 12 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.8.0
+- Remove empty openshift_hosted_facts role (mgugino@redhat.com)
+- Refactor upgrade codepaths step 1 (mgugino@redhat.com)
+
+* Tue Dec 12 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.7.0
+- Remove bad openshift_examples symlink (rteague@redhat.com)
+- Changing the node group format to a list. (kwoodson@redhat.com)
+- Bump RPM version requirement (sdodson@redhat.com)
+- Clarify version selection in README (mgugino@redhat.com)
+
* Tue Dec 12 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.6.0
- add openshift_master_api_port var to example inventory (jdiaz@redhat.com)
- Allow 2 sets of hostnames for openstack provider (bdobreli@redhat.com)
diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml
index 5a877809a..3cb11a457 100644
--- a/playbooks/byo/rhel_subscribe.yml
+++ b/playbooks/byo/rhel_subscribe.yml
@@ -8,6 +8,7 @@
when:
- deployment_type == 'openshift-enterprise'
- ansible_distribution == "RedHat"
- - lookup('env', 'rhel_skip_subscription') | default(rhsub_skip, True) | default('no', True) | lower in ['no', 'false']
+ - rhsub_user | default(False)
+ - rhsub_pass | default(False)
- role: openshift_repos
- role: os_update_latest
diff --git a/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml b/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml
index 6e953be69..ed97d539c 100644
--- a/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml
+++ b/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml
@@ -1,22 +1 @@
---
-- name: Check Docker image count
- shell: "docker images -aq | wc -l"
- register: docker_image_count
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- debug: var=docker_image_count.stdout
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- name: Remove unused Docker images for Docker 1.10+ migration
- shell: "docker rmi `docker images -aq`"
- # Will fail on images still in use:
- failed_when: false
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- name: Check Docker image count
- shell: "docker images -aq | wc -l"
- register: docker_image_count
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- debug: var=docker_image_count.stdout
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_excluders.yml
index 33ed6a283..858912379 100644
--- a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml
+++ b/playbooks/common/openshift-cluster/upgrades/disable_excluders.yml
@@ -1,6 +1,6 @@
---
- name: Disable excluders
- hosts: oo_masters_to_config
+ hosts: "{{ l_upgrade_excluder_hosts }}"
gather_facts: no
roles:
- role: openshift_excluder
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
deleted file mode 100644
index ab3171c9a..000000000
--- a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- name: Disable excluders
- hosts: oo_nodes_to_upgrade:!oo_masters_to_config
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: disable
- r_openshift_excluder_verify_upgrade: true
- r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
- r_openshift_excluder_package_state: latest
- r_openshift_excluder_docker_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
index fcb828808..38aa9df47 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -19,7 +19,9 @@
msg: Cannot upgrade Docker on Atomic operating systems.
when: openshift.common.is_atomic | bool
- - include_tasks: upgrade_check.yml
+ - include_role:
+ name: container_runtime
+ tasks_from: docker_upgrade_check.yml
when: docker_upgrade is not defined or docker_upgrade | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh b/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh
deleted file mode 100644
index 8635eab0d..000000000
--- a/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-
-# Stop any running containers
-running_container_ids=`docker ps -q`
-if test -n "$running_container_ids"
-then
- docker stop $running_container_ids
-fi
-
-# Delete all containers
-container_ids=`docker ps -a -q`
-if test -n "$container_ids"
-then
- docker rm -f -v $container_ids
-fi
-
-# Delete all images (forcefully)
-image_ids=`docker images -aq`
-if test -n "$image_ids"
-then
- # Some layers are deleted recursively and are no longer present
- # when docker goes to remove them:
- docker rmi -f `docker images -aq` || true
-fi
-
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
index 2e3a7ae8b..ed97d539c 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
@@ -1,58 +1 @@
---
-
-# This snippet determines if a Docker upgrade is required by checking the inventory
-# variables, the available packages, and sets l_docker_upgrade to True if so.
-
-- set_fact:
- docker_upgrade: True
- when: docker_upgrade is not defined
-
-- name: Check if Docker is installed
- command: rpm -q docker
- args:
- warn: no
- register: pkg_check
- failed_when: pkg_check.rc > 1
- changed_when: no
-
-- name: Get current version of Docker
- command: "{{ repoquery_installed }} --qf '%{version}' docker"
- register: curr_docker_version
- retries: 4
- until: curr_docker_version | succeeded
- changed_when: false
-
-- name: Get latest available version of Docker
- command: >
- {{ repoquery_cmd }} --qf '%{version}' "docker"
- register: avail_docker_version
- retries: 4
- until: avail_docker_version | succeeded
- # Don't expect docker rpm to be available on hosts that don't already have it installed:
- when: pkg_check.rc == 0
- failed_when: false
- changed_when: false
-
-- fail:
- msg: This playbook requires access to Docker 1.12 or later
- # Disable the 1.12 requirement if the user set a specific Docker version
- when: docker_version is not defined and (docker_upgrade is not defined or docker_upgrade | bool == True) and (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout | version_compare('1.12','<')))
-
-# Default l_docker_upgrade to False, we'll set to True if an upgrade is required:
-- set_fact:
- l_docker_upgrade: False
-
-# Make sure a docker_version is set if none was requested:
-- set_fact:
- docker_version: "{{ avail_docker_version.stdout }}"
- when: pkg_check.rc == 0 and docker_version is not defined
-
-- name: Flag for Docker upgrade if necessary
- set_fact:
- l_docker_upgrade: True
- when: pkg_check.rc == 0 and curr_docker_version.stdout | version_compare(docker_version,'<')
-
-- name: Flag to delete all images prior to upgrade if crossing Docker 1.10 boundary
- set_fact:
- docker_upgrade_nuke_images: True
- when: l_docker_upgrade | bool and docker_upgrade_nuke_images is not defined and curr_docker_version.stdout | version_compare('1.10','<') and docker_version | version_compare('1.10','>=')
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index 5454a6680..8ee83819e 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -8,7 +8,7 @@
- import_playbook: ../../../init/facts.yml
- name: Ensure firewall is not switched during upgrade
- hosts: oo_all_hosts
+ hosts: "{{ l_upgrade_no_switch_firewall_hosts | default('oo_all_hosts') }}"
vars:
openshift_master_installed_version: "{{ hostvars[groups.oo_first_master.0].openshift.common.version }}"
tasks:
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/config.yml b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
new file mode 100644
index 000000000..d5b82d9a0
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
@@ -0,0 +1,77 @@
+---
+
+# Pre-upgrade
+- import_playbook: ../initialize_nodes_to_upgrade.yml
+
+- import_playbook: verify_cluster.yml
+
+- name: Update repos on upgrade hosts
+ hosts: "{{ l_upgrade_repo_hosts }}"
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: "{{ l_upgrade_no_proxy_hosts }}"
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- name: OpenShift Health Checks
+ hosts: "{{ l_upgrade_health_check_hosts }}"
+ any_errors_fatal: true
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: upgrade
+ post_tasks:
+ - name: Run health checks (upgrade)
+ action: openshift_health_check
+ args:
+ checks:
+ - disk_availability
+ - memory_availability
+ - docker_image_availability
+
+- import_playbook: ../disable_excluders.yml
+
+- import_playbook: ../../../../init/version.yml
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+# If we're only upgrading nodes, we need to ensure masters are already upgraded
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when:
+ - l_upgrade_nodes_only | default(False) | bool
+ - openshift.common.version != openshift_version
+
+# If we're only upgrading nodes, skip this.
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
+ when: not (l_upgrade_nodes_only | default(False)) | bool
+
+- name: Verify upgrade targets
+ hosts: "{{ l_upgrade_verify_targets_hosts }}"
+ roles:
+ - role: openshift_facts
+ tasks:
+ - include_tasks: verify_upgrade_targets.yml
+
+- name: Verify docker upgrade targets
+ hosts: "{{ l_upgrade_docker_target_hosts }}"
+ tasks:
+ - include_role:
+ name: container_runtime
+ tasks_from: docker_upgrade_check.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml
deleted file mode 100644
index 8ecae4539..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: Flag pre-upgrade checks complete for hosts without errors
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - set_fact:
- pre_upgrade_complete: True
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
deleted file mode 100644
index 18a08eb99..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-# Only check if docker upgrade is required if docker_upgrade is not
-# already set to False.
-- include_tasks: ../../docker/upgrade_check.yml
- when:
- - docker_upgrade is not defined or (docker_upgrade | bool)
- - not (openshift.common.is_atomic | bool)
-
-# Additional checks for Atomic hosts:
-
-- name: Determine available Docker
- shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
- register: g_atomic_docker_version_result
- when: openshift.common.is_atomic | bool
-
-- set_fact:
- l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
- when: openshift.common.is_atomic | bool
-
-- fail:
- msg: This playbook requires access to Docker 1.12 or later
- when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<')
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
new file mode 100644
index 000000000..2ab9f852c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
@@ -0,0 +1,93 @@
+---
+# Verify a few items before we proceed with upgrade process.
+
+- name: Verify upgrade can proceed on first master
+ hosts: oo_first_master
+ gather_facts: no
+ tasks:
+ - fail:
+ msg: >
+ This upgrade is only supported for origin and openshift-enterprise
+ deployment types
+ when: deployment_type not in ['origin','openshift-enterprise']
+
+ # Error out in situations where the user has older versions specified in their
+ # inventory in any of the openshift_release, openshift_image_tag, and
+ # openshift_pkg_version variables. These must be removed or updated to proceed
+ # with upgrade.
+ # TODO: Should we block if you're *over* the next major release version as well?
+ - fail:
+ msg: >
+ openshift_pkg_version is {{ openshift_pkg_version }} which is not a
+ valid version for a {{ openshift_upgrade_target }} upgrade
+ when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(openshift_upgrade_target ,'<')
+
+ - fail:
+ msg: >
+ openshift_image_tag is {{ openshift_image_tag }} which is not a
+ valid version for a {{ openshift_upgrade_target }} upgrade
+ when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(openshift_upgrade_target ,'<')
+
+ - set_fact:
+ openshift_release: "{{ openshift_release[1:] }}"
+ when: openshift_release is defined and openshift_release[0] == 'v'
+
+ - fail:
+ msg: >
+ openshift_release is {{ openshift_release }} which is not a
+ valid release for a {{ openshift_upgrade_target }} upgrade
+ when: openshift_release is defined and not openshift_release | version_compare(openshift_upgrade_target ,'=')
+
+- name: Verify master processes
+ hosts: oo_masters_to_config
+ roles:
+ - lib_utils
+ - openshift_facts
+ tasks:
+ - name: Read master storage backend setting
+ yedit:
+ state: list
+ src: /etc/origin/master/master-config.yaml
+ key: kubernetesMasterConfig.apiServerArguments.storage-backend
+ register: _storage_backend
+
+ - fail:
+ msg: "Storage backend in /etc/origin/master/master-config.yaml must be set to 'etcd3' before the upgrade can continue"
+ when:
+ # assuming the master-config.yml is properly configured, i.e. the value is a list
+ - _storage_backend.result | default([], true) | length == 0 or _storage_backend.result[0] != "etcd3"
+
+ - debug:
+ msg: "Storage backend is set to etcd3"
+
+ - openshift_facts:
+ role: master
+ local_facts:
+ ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+
+ - when: openshift.common.is_containerized | bool
+ block:
+ - set_fact:
+ master_services:
+ - "{{ openshift_service_type }}-master"
+
+ # In case of the non-ha to ha upgrade.
+ - name: Check if the {{ openshift_service_type }}-master-api.service exists
+ command: >
+ systemctl list-units {{ openshift_service_type }}-master-api.service --no-legend
+ register: master_api_service_status
+
+ - set_fact:
+ master_services:
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ when:
+ - master_api_service_status.stdout_lines | length > 0
+ - (openshift_service_type + '-master-api.service') in master_api_service_status.stdout_lines[0]
+
+ - name: Ensure Master is running
+ service:
+ name: "{{ item }}"
+ state: started
+ enabled: yes
+ with_items: "{{ master_services }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
deleted file mode 100644
index bef95546d..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-- name: Verify master processes
- hosts: oo_masters_to_config
- roles:
- - openshift_facts
- tasks:
- - openshift_facts:
- role: master
- local_facts:
- ha: "{{ groups.oo_masters_to_config | length > 1 }}"
-
- - when: openshift.common.is_containerized | bool
- block:
- - set_fact:
- master_services:
- - "{{ openshift_service_type }}-master"
-
- # In case of the non-ha to ha upgrade.
- - name: Check if the {{ openshift_service_type }}-master-api.service exists
- command: >
- systemctl list-units {{ openshift_service_type }}-master-api.service --no-legend
- register: master_api_service_status
-
- - set_fact:
- master_services:
- - "{{ openshift_service_type }}-master-api"
- - "{{ openshift_service_type }}-master-controllers"
- when:
- - master_api_service_status.stdout_lines | length > 0
- - (openshift_service_type + '-master-api.service') in master_api_service_status.stdout_lines[0]
-
- - name: Ensure Master is running
- service:
- name: "{{ item }}"
- state: started
- enabled: yes
- with_items: "{{ master_services }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml
deleted file mode 100644
index f75ae3b15..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-- name: Verify all masters has etcd3 storage backend set
- hosts: oo_masters_to_config
- gather_facts: no
- roles:
- - lib_utils
- tasks:
- - name: Read master storage backend setting
- yedit:
- state: list
- src: /etc/origin/master/master-config.yaml
- key: kubernetesMasterConfig.apiServerArguments.storage-backend
- register: _storage_backend
-
- - fail:
- msg: "Storage backend in /etc/origin/master/master-config.yaml must be set to 'etcd3' before the upgrade can continue"
- when:
- # assuming the master-config.yml is properly configured, i.e. the value is a list
- - _storage_backend.result | default([], true) | length == 0 or _storage_backend.result[0] != "etcd3"
-
- - debug:
- msg: "Storage backend is set to etcd3"
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
deleted file mode 100644
index 2a8de50a2..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- name: OpenShift Health Checks
- hosts: oo_all_hosts
- any_errors_fatal: true
- roles:
- - openshift_health_checker
- vars:
- - r_openshift_health_checker_playbook_context: upgrade
- post_tasks:
- - name: Run health checks (upgrade)
- action: openshift_health_check
- args:
- checks:
- - disk_availability
- - memory_availability
- - docker_image_availability
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
deleted file mode 100644
index 3c0017891..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-- name: Verify upgrade can proceed on first master
- hosts: oo_first_master
- gather_facts: no
- tasks:
- - fail:
- msg: >
- This upgrade is only supported for origin and openshift-enterprise
- deployment types
- when: deployment_type not in ['origin','openshift-enterprise']
-
- # Error out in situations where the user has older versions specified in their
- # inventory in any of the openshift_release, openshift_image_tag, and
- # openshift_pkg_version variables. These must be removed or updated to proceed
- # with upgrade.
- # TODO: Should we block if you're *over* the next major release version as well?
- - fail:
- msg: >
- openshift_pkg_version is {{ openshift_pkg_version }} which is not a
- valid version for a {{ openshift_upgrade_target }} upgrade
- when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(openshift_upgrade_target ,'<')
-
- - fail:
- msg: >
- openshift_image_tag is {{ openshift_image_tag }} which is not a
- valid version for a {{ openshift_upgrade_target }} upgrade
- when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(openshift_upgrade_target ,'<')
-
- - set_fact:
- openshift_release: "{{ openshift_release[1:] }}"
- when: openshift_release is defined and openshift_release[0] == 'v'
-
- - fail:
- msg: >
- openshift_release is {{ openshift_release }} which is not a
- valid release for a {{ openshift_upgrade_target }} upgrade
- when: openshift_release is defined and not openshift_release | version_compare(openshift_upgrade_target ,'=')
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index f7a85545b..a3cb1d0f9 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -1,16 +1,25 @@
---
+- name: Prepull images and rpms before doing rolling restart
+ hosts: oo_nodes_to_upgrade:!oo_masters_to_config
+ roles:
+ - role: openshift_facts
+ tasks:
+ - include_role:
+ name: openshift_node
+ tasks_from: upgrade_pre.yml
+ vars:
+ openshift_node_upgrade_in_progress: True
+
- name: Drain and upgrade nodes
hosts: oo_nodes_to_upgrade:!oo_masters_to_config
# This var must be set with -e on invocation, as it is not a per-host inventory var
# and is evaluated early. Values such as "20%" can also be used.
serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
-
+ roles:
+ - lib_openshift
+ - openshift_facts
pre_tasks:
- - name: Load lib_openshift modules
- import_role:
- name: lib_openshift
-
# TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
# or docker actually needs an upgrade before proceeding. Perhaps best to save this until
# we merge upgrade functionality into the base roles and a normal config.yml playbook run.
@@ -33,18 +42,12 @@
retries: 60
delay: 60
- roles:
- - openshift_facts
post_tasks:
- include_role:
name: openshift_node
tasks_from: upgrade.yml
vars:
openshift_node_upgrade_in_progress: True
- - include_role:
- name: openshift_excluder
- vars:
- r_openshift_excluder_action: enable
- name: Set node schedulability
oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
@@ -55,3 +58,11 @@
register: node_schedulable
until: node_schedulable|succeeded
when: node_unschedulable|changed
+
+- name: Re-enable excluders
+ hosts: oo_nodes_to_upgrade:!oo_masters_to_config
+ tasks:
+ - include_role:
+ name: openshift_excluder
+ vars:
+ r_openshift_excluder_action: enable
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
index 9f9399ff9..a5ad3801d 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -15,99 +15,24 @@
openshift_upgrade_target: '3.6'
openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
-# Pre-upgrade
-
-- import_playbook: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
-- import_playbook: ../../../../openshift-master/private/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include_tasks: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
- import_playbook: validator.yml
- tags:
- - pre_upgrade
-- import_playbook: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include_tasks: ../cleanup_unused_images.yml
+ - set_fact:
+ pre_upgrade_complete: True
+
+# Pre-upgrade completed
- import_playbook: ../upgrade_control_plane.yml
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index 7374160d6..1498db4c5 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -12,106 +12,34 @@
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
- import_playbook: ../init.yml
- tags:
- - pre_upgrade
+ vars:
+ l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
tasks:
- set_fact:
openshift_upgrade_target: '3.6'
openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
-# Pre-upgrade
-- import_playbook: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
-- import_playbook: ../../../../openshift-master/private/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include_tasks: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_masters_to_config"
- import_playbook: validator.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include_tasks: ../cleanup_unused_images.yml
+ - set_fact:
+ pre_upgrade_complete: True
+
+# Pre-upgrade completed
- import_playbook: ../upgrade_control_plane.yml
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
index de9bf098e..6958652d8 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -17,93 +17,22 @@
openshift_upgrade_target: '3.6'
openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
-# Pre-upgrade
-- import_playbook: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_nodes_to_config"
+ l_upgrade_no_proxy_hosts: "oo_all_hosts"
+ l_upgrade_health_check_hosts: "oo_nodes_to_config"
+ l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
+ l_upgrade_docker_target_hosts: "oo_nodes_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
+ l_upgrade_nodes_only: True
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- import_playbook: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include_tasks: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include_tasks: ../cleanup_unused_images.yml
+# Pre-upgrade completed
- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
index 9ec788e76..4daa9e490 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -15,103 +15,24 @@
openshift_upgrade_target: '3.7'
openshift_upgrade_min: '3.6'
-# Pre-upgrade
-
-- import_playbook: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_etcd3_backend.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
-- import_playbook: ../../../../openshift-master/private/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include_tasks: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
- import_playbook: validator.yml
- tags:
- - pre_upgrade
-- import_playbook: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include_tasks: ../cleanup_unused_images.yml
+ - set_fact:
+ pre_upgrade_complete: True
+
+# Pre-upgrade completed
- import_playbook: ../upgrade_control_plane.yml
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index ad67b6c44..1750148d4 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -12,110 +12,34 @@
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
- import_playbook: ../init.yml
- tags:
- - pre_upgrade
+ vars:
+ l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
tasks:
- set_fact:
openshift_upgrade_target: '3.7'
openshift_upgrade_min: '3.6'
-# Pre-upgrade
-- import_playbook: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_etcd3_backend.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
-- import_playbook: ../../../../openshift-master/private/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include_tasks: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_masters_to_config"
- import_playbook: validator.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include_tasks: ../cleanup_unused_images.yml
+ - set_fact:
+ pre_upgrade_complete: True
+
+# Pre-upgrade completed
- import_playbook: ../upgrade_control_plane.yml
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
index 27a7f67ea..16d95514c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
@@ -17,93 +17,22 @@
openshift_upgrade_target: '3.7'
openshift_upgrade_min: '3.6'
-# Pre-upgrade
-- import_playbook: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_nodes_to_config"
+ l_upgrade_no_proxy_hosts: "oo_all_hosts"
+ l_upgrade_health_check_hosts: "oo_nodes_to_config"
+ l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
+ l_upgrade_docker_target_hosts: "oo_nodes_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
+ l_upgrade_nodes_only: True
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- import_playbook: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include_tasks: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include_tasks: ../cleanup_unused_images.yml
+# Pre-upgrade completed
- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
index 60ec79df5..0f74e0137 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
@@ -15,103 +15,24 @@
openshift_upgrade_target: '3.8'
openshift_upgrade_min: '3.7'
-# Pre-upgrade
-
-- import_playbook: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_etcd3_backend.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
-- import_playbook: ../../../../openshift-master/private/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include_tasks: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
- import_playbook: validator.yml
- tags:
- - pre_upgrade
-- import_playbook: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include_tasks: ../cleanup_unused_images.yml
+ - set_fact:
+ pre_upgrade_complete: True
+
+# Pre-upgrade completed
- import_playbook: ../upgrade_control_plane.yml
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
index c1a3f64f2..08bfd239f 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
@@ -12,110 +12,34 @@
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
- import_playbook: ../init.yml
- tags:
- - pre_upgrade
+ vars:
+ l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
tasks:
- set_fact:
openshift_upgrade_target: '3.8'
openshift_upgrade_min: '3.7'
-# Pre-upgrade
-- import_playbook: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_etcd3_backend.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
-- import_playbook: ../../../../openshift-master/private/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include_tasks: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_masters_to_config"
- import_playbook: validator.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include_tasks: ../cleanup_unused_images.yml
+ - set_fact:
+ pre_upgrade_complete: True
+
+# Pre-upgrade completed
- import_playbook: ../upgrade_control_plane.yml
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
index dd716b241..b5f1038fd 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
@@ -17,93 +17,22 @@
openshift_upgrade_target: '3.8'
openshift_upgrade_min: '3.7'
-# Pre-upgrade
-- import_playbook: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_nodes_to_config"
+ l_upgrade_no_proxy_hosts: "oo_all_hosts"
+ l_upgrade_health_check_hosts: "oo_nodes_to_config"
+ l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
+ l_upgrade_docker_target_hosts: "oo_nodes_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
+ l_upgrade_nodes_only: True
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- import_playbook: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include_tasks: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include_tasks: ../cleanup_unused_images.yml
+# Pre-upgrade completed
- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
index 1e704b66c..0aea5069d 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
@@ -3,121 +3,32 @@
# Full Control Plane + Nodes Upgrade
#
- import_playbook: ../init.yml
- tags:
- - pre_upgrade
- name: Configure the upgrade target for the common upgrade tasks
hosts: oo_all_hosts
- tags:
- - pre_upgrade
tasks:
- set_fact:
openshift_upgrade_target: '3.9'
openshift_upgrade_min: '3.7'
-# Pre-upgrade
-
-- import_playbook: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_etcd3_backend.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- import_playbook: ../../../../openshift-master/private/validate_restart.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tasks:
- - include_tasks: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - import_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
- import_playbook: validator.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include_tasks: ../cleanup_unused_images.yml
+ - set_fact:
+ pre_upgrade_complete: True
+
+# Pre-upgrade completed
- import_playbook: ../upgrade_control_plane.yml
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index a9689da1f..05aa737c6 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -12,116 +12,35 @@
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
- import_playbook: ../init.yml
- tags:
- - pre_upgrade
+ vars:
+ l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
tasks:
- set_fact:
openshift_upgrade_target: '3.9'
openshift_upgrade_min: '3.7'
-# Pre-upgrade
-- import_playbook: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_etcd3_backend.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../disable_master_excluders.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- import_playbook: ../../../../openshift-master/private/validate_restart.yml
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_masters_to_config"
-- name: Verify upgrade targets
- hosts: oo_masters_to_config
- tasks:
- - include_tasks: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
+- import_playbook: validator.yml
-- name: Verify docker upgrade targets
+- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- import_playbook: validator.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
+ - set_fact:
+ pre_upgrade_complete: True
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+# Pre-upgrade completed
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include_tasks: ../cleanup_unused_images.yml
- import_playbook: ../upgrade_control_plane.yml
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
index d95cfa4e1..1d1b255c1 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
@@ -5,111 +5,30 @@
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
- import_playbook: ../init.yml
- tags:
- - pre_upgrade
- name: Configure the upgrade target for the common upgrade tasks
hosts: oo_all_hosts
- tags:
- - pre_upgrade
tasks:
- set_fact:
openshift_upgrade_target: '3.9'
openshift_upgrade_min: '3.7'
-# Pre-upgrade
-- import_playbook: ../initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../disable_node_excluders.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
- tags:
- - pre_upgrade
+- import_playbook: ../pre/config.yml
vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
+ l_upgrade_repo_hosts: "oo_nodes_to_config"
+ l_upgrade_no_proxy_hosts: "oo_all_hosts"
+ l_upgrade_health_check_hosts: "oo_nodes_to_config"
+ l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
+ l_upgrade_docker_target_hosts: "oo_nodes_to_config"
+ l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
+ l_upgrade_nodes_only: True
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- import_playbook: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- name: Verify upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include_tasks: ../pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- name: Verify docker upgrade targets
- hosts: oo_nodes_to_upgrade
- tasks:
- - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- import_playbook: ../pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+ - set_fact:
+ pre_upgrade_complete: True
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include_tasks: ../cleanup_unused_images.yml
+# Pre-upgrade completed
- import_playbook: ../upgrade_nodes.yml
diff --git a/playbooks/init/main.yml b/playbooks/init/main.yml
index b2b972a7d..06e8ba504 100644
--- a/playbooks/init/main.yml
+++ b/playbooks/init/main.yml
@@ -23,8 +23,6 @@
- import_playbook: validate_hostnames.yml
when: not (skip_validate_hostnames | default(False))
-- import_playbook: repos.yml
-
- import_playbook: version.yml
when: not (skip_verison | default(False))
diff --git a/playbooks/init/repos.yml b/playbooks/init/repos.yml
index a7114fc80..048b09e60 100644
--- a/playbooks/init/repos.yml
+++ b/playbooks/init/repos.yml
@@ -3,6 +3,14 @@
hosts: oo_all_hosts
gather_facts: no
tasks:
+ - name: subscribe instances to Red Hat Subscription Manager
+ include_role:
+ name: rhel_subscribe
+ when:
+ - ansible_distribution == 'RedHat'
+ - deployment_type == 'openshift-enterprise'
+ - rhsub_user | default(False)
+ - rhsub_pass | default(False)
- name: initialize openshift repos
include_role:
name: openshift_repos
diff --git a/playbooks/init/validate_hostnames.yml b/playbooks/init/validate_hostnames.yml
index be2e6a15a..86e0b2416 100644
--- a/playbooks/init/validate_hostnames.yml
+++ b/playbooks/init/validate_hostnames.yml
@@ -1,6 +1,7 @@
---
- name: Validate node hostnames
hosts: oo_nodes_to_config
+ any_errors_fatal: true
tasks:
- name: Query DNS for IP address of {{ openshift.common.hostname }}
shell:
@@ -8,16 +9,35 @@
register: lookupip
changed_when: false
failed_when: false
- - name: Warn user about bad openshift_hostname values
- pause:
- prompt:
+
+ - name: Validate openshift_hostname when defined
+ fail:
+ msg: >
The hostname {{ openshift.common.hostname }} for {{ ansible_nodename }}
doesn't resolve to an IP address owned by this host. Please set
openshift_hostname variable to a hostname that when resolved on the host
- in question resolves to an IP address matching an interface on this
- host. This host will fail liveness checks for pods utilizing hostPorts,
- press ENTER to continue or CTRL-C to abort.
- seconds: "{{ 10 if openshift_override_hostname_check | default(false) | bool else omit }}"
+ in question resolves to an IP address matching an interface on this host.
+ This will ensure proper functionality of OpenShift networking features.
+ Inventory setting: openshift_hostname={{ openshift_hostname }}
+ This check can be overridden by setting openshift_hostname_check=false in
+ the inventory.
+ See https://docs.openshift.org/latest/install_config/install/advanced_install.html#configuring-host-variables
when:
- lookupip.stdout != '127.0.0.1'
- lookupip.stdout not in ansible_all_ipv4_addresses
+ - openshift_hostname_check | default(true)
+
+ - name: Validate openshift_ip exists on node when defined
+ fail:
+ msg: >
+ The IP address {{ openshift_ip }} does not exist on {{ ansible_nodename }}.
+ Please set the openshift_ip variable to an IP address of this node.
+ This will ensure proper functionality of OpenShift networking features.
+ Inventory setting: openshift_ip={{ openshift_ip }}
+ This check can be overridden by setting openshift_ip_check=false in
+ the inventory.
+ See https://docs.openshift.org/latest/install_config/install/advanced_install.html#configuring-host-variables
+ when:
+ - openshift_ip is defined
+ - openshift_ip not in ansible_all_ipv4_addresses
+ - openshift_ip_check | default(true)
diff --git a/playbooks/openshift-master/private/config.yml b/playbooks/openshift-master/private/config.yml
index 9f6d5afcc..15d301ddb 100644
--- a/playbooks/openshift-master/private/config.yml
+++ b/playbooks/openshift-master/private/config.yml
@@ -180,7 +180,6 @@
}}"
roles:
- role: openshift_master_facts
- - role: openshift_hosted_facts
- role: openshift_clock
- role: openshift_cloud_provider
- role: openshift_builddefaults
diff --git a/playbooks/openshift-node/private/restart.yml b/playbooks/openshift-node/private/restart.yml
index 0786bd7d3..6a10585b9 100644
--- a/playbooks/openshift-node/private/restart.yml
+++ b/playbooks/openshift-node/private/restart.yml
@@ -5,6 +5,7 @@
roles:
- lib_openshift
+ - openshift_facts
tasks:
- name: Restart docker
diff --git a/playbooks/prerequisites.yml b/playbooks/prerequisites.yml
index 7b7868cfe..5ba62a6d6 100644
--- a/playbooks/prerequisites.yml
+++ b/playbooks/prerequisites.yml
@@ -3,6 +3,8 @@
vars:
skip_verison: True
+- import_playbook: init/repos.yml
+
# This is required for container runtime for crio, only needs to run once.
- name: Configure os_firewall
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config:oo_nodes_to_config
diff --git a/requirements.txt b/requirements.txt
index be1bde18e..67cdaaff5 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,6 +1,6 @@
# Versions are pinned to prevent pypi releases arbitrarily breaking
# tests with new APIs/semantics. We want to update versions deliberately.
-ansible==2.4.0.0
+ansible==2.4.1.0
boto==2.34.0
click==6.7
pyOpenSSL==16.2.0
diff --git a/roles/container_runtime/defaults/main.yml b/roles/container_runtime/defaults/main.yml
index d7eb8663f..dd185cb38 100644
--- a/roles/container_runtime/defaults/main.yml
+++ b/roles/container_runtime/defaults/main.yml
@@ -115,7 +115,7 @@ l_crio_image: "{{ openshift_crio_systemcontainer_image_override | default(l_crio
# systemcontainers_docker #
# ----------------------- #
l_crt_docker_image_prepend_dict:
- Fedora: "registry.fedoraproject.org/f25"
+ Fedora: "registry.fedoraproject.org/latest"
Centos: "docker.io/gscrivano"
RedHat: "registry.access.redhat.com/openshift3"
diff --git a/roles/container_runtime/tasks/docker_upgrade_check.yml b/roles/container_runtime/tasks/docker_upgrade_check.yml
new file mode 100644
index 000000000..f29619f42
--- /dev/null
+++ b/roles/container_runtime/tasks/docker_upgrade_check.yml
@@ -0,0 +1,67 @@
+---
+
+# This snippet determines if a Docker upgrade is required by checking the inventory
+# variables, the available packages, and sets l_docker_upgrade to True if so.
+
+- set_fact:
+ docker_upgrade: True
+ when: docker_upgrade is not defined
+
+- name: Check if Docker is installed
+ command: rpm -q docker
+ args:
+ warn: no
+ register: pkg_check
+ failed_when: pkg_check.rc > 1
+ changed_when: no
+
+- name: Get current version of Docker
+ command: "{{ repoquery_installed }} --qf '%{version}' docker"
+ register: curr_docker_version
+ retries: 4
+ until: curr_docker_version | succeeded
+ changed_when: false
+
+- name: Get latest available version of Docker
+ command: >
+ {{ repoquery_cmd }} --qf '%{version}' "docker"
+ register: avail_docker_version
+ retries: 4
+ until: avail_docker_version | succeeded
+ # Don't expect docker rpm to be available on hosts that don't already have it installed:
+ when: pkg_check.rc == 0
+ failed_when: false
+ changed_when: false
+
+- fail:
+ msg: This playbook requires access to Docker 1.12 or later
+ # Disable the 1.12 requirement if the user set a specific Docker version
+ when: docker_version is not defined and (docker_upgrade is not defined or docker_upgrade | bool == True) and (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout | version_compare('1.12','<')))
+
+# Default l_docker_upgrade to False, we'll set to True if an upgrade is required:
+- set_fact:
+ l_docker_upgrade: False
+
+# Make sure a docker_version is set if none was requested:
+- set_fact:
+ docker_version: "{{ avail_docker_version.stdout }}"
+ when: pkg_check.rc == 0 and docker_version is not defined
+
+- name: Flag for Docker upgrade if necessary
+ set_fact:
+ l_docker_upgrade: True
+ when: pkg_check.rc == 0 and curr_docker_version.stdout | version_compare(docker_version,'<')
+
+# Additional checks for Atomic hosts:
+- name: Determine available Docker
+ shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
+ register: g_atomic_docker_version_result
+ when: openshift.common.is_atomic | bool
+
+- set_fact:
+ l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
+ when: openshift.common.is_atomic | bool
+
+- fail:
+ msg: This playbook requires access to Docker 1.12 or later
+ when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<')
diff --git a/roles/container_runtime/templates/crio.conf.j2 b/roles/container_runtime/templates/crio.conf.j2
index 3f066a17f..0a1ff2e0a 100644
--- a/roles/container_runtime/templates/crio.conf.j2
+++ b/roles/container_runtime/templates/crio.conf.j2
@@ -27,7 +27,7 @@ storage_option = [
[crio.api]
# listen is the path to the AF_LOCAL socket on which crio will listen.
-listen = "/var/run/crio.sock"
+listen = "/var/run/crio/crio.sock"
# stream_address is the IP address on which the stream server will listen
stream_address = ""
diff --git a/roles/contiv/meta/main.yml b/roles/contiv/meta/main.yml
index a2c2f98a7..52b9d09dd 100644
--- a/roles/contiv/meta/main.yml
+++ b/roles/contiv/meta/main.yml
@@ -21,7 +21,7 @@ dependencies:
etcd_client_port: 22379
etcd_conf_dir: /etc/contiv-etcd/
etcd_data_dir: /var/lib/contiv-etcd/
- etcd_ca_host: "{{ inventory_hostname }}"
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_cert_config_dir: /etc/contiv-etcd/
etcd_url_scheme: http
etcd_peer_url_scheme: http
diff --git a/roles/contiv/tasks/default_network.yml b/roles/contiv/tasks/default_network.yml
index f679443e0..8a928ea54 100644
--- a/roles/contiv/tasks/default_network.yml
+++ b/roles/contiv/tasks/default_network.yml
@@ -8,51 +8,64 @@
- name: Contiv | Set globals
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" global set --fabric-mode {{ contiv_fabric_mode }} --vlan-range {{ contiv_vlan_range }} --fwd-mode {{ netplugin_fwd_mode }} --private-subnet {{ contiv_private_ext_subnet }}'
+ run_once: true
- name: Contiv | Set arp mode to flood if ACI
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" global set --arp-mode flood'
when: contiv_fabric_mode == "aci"
+ run_once: true
- name: Contiv | Check if default-net exists
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net ls'
register: net_result
+ run_once: true
- name: Contiv | Create default-net
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_default_subnet }} -e {{ contiv_encap_mode }} -p {{ contiv_default_network_tag }} --gateway {{ contiv_default_gw }} default-net'
when: net_result.stdout.find("default-net") == -1
+ run_once: true
- name: Contiv | Create host access infra network for VxLan routing case
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_h1_subnet_default }} --gateway={{ contiv_h1_gw_default }} --nw-type="infra" contivh1'
when: (contiv_encap_mode == "vxlan") and (netplugin_fwd_mode == "routing")
+ run_once: true
#- name: Contiv | Create an allow-all policy for the default-group
# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy create ose-allow-all-policy'
# when: contiv_fabric_mode == "aci"
+# run_once: true
- name: Contiv | Set up aci external contract to consume default external contract
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" external-contracts create -c -a {{ apic_default_external_contract }} oseExtToConsume'
when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true)
+ run_once: true
- name: Contiv | Set up aci external contract to provide default external contract
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" external-contracts create -p -a {{ apic_default_external_contract }} oseExtToProvide'
when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true)
+ run_once: true
- name: Contiv | Create aci default-group
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" group create default-net default-group'
when: contiv_fabric_mode == "aci"
+ run_once: true
- name: Contiv | Add external contracts to the default-group
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" group create -e oseExtToConsume -e oseExtToProvide default-net default-group'
when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true)
+ run_once: true
#- name: Contiv | Add policy rule 1 for allow-all policy
# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy rule-add -d in --action allow ose-allow-all-policy 1'
# when: contiv_fabric_mode == "aci"
+# run_once: true
#- name: Contiv | Add policy rule 2 for allow-all policy
# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy rule-add -d out --action allow ose-allow-all-policy 2'
# when: contiv_fabric_mode == "aci"
+# run_once: true
- name: Contiv | Create default aci app profile
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" app-profile create -g default-group {{ apic_default_app_profile }}'
when: contiv_fabric_mode == "aci"
+ run_once: true
diff --git a/roles/contiv/tasks/netmaster_iptables.yml b/roles/contiv/tasks/netmaster_iptables.yml
index 07bb16ea7..c98e7b6a5 100644
--- a/roles/contiv/tasks/netmaster_iptables.yml
+++ b/roles/contiv/tasks/netmaster_iptables.yml
@@ -13,9 +13,15 @@
- name: Netmaster IPtables | Open Netmaster with iptables
command: /sbin/iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "contiv"
with_items:
- - "{{ netmaster_port }}"
- "{{ contiv_rpc_port1 }}"
- "{{ contiv_rpc_port2 }}"
- "{{ contiv_rpc_port3 }}"
when: iptablesrules.stdout.find("contiv") == -1
notify: Save iptables rules
+
+- name: Netmaster IPtables | Open netmaster main port
+ command: /sbin/iptables -I INPUT 1 -p tcp -s {{ item }} --dport {{ netmaster_port }} -j ACCEPT -m comment --comment "contiv"
+ with_items:
+ - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + netmaster_interface].ipv4.address)|list }}"
+ when: iptablesrules.stdout.find("contiv") == -1
+ notify: Save iptables rules
diff --git a/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml b/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml
index 62b4716a3..a4d260279 100644
--- a/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml
+++ b/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml
@@ -27,7 +27,7 @@
- name: PkgMgr RHEL/CentOS | Install ovs
yum:
- pkg=openvswitch-2.5.0-2.el7.x86_64
+ pkg=openvswitch
state=present
environment:
http_proxy: "{{ http_proxy|default('') }}"
diff --git a/roles/contiv/templates/aci-gw.service b/roles/contiv/templates/aci-gw.service
index 90bb98001..9b3f12567 100644
--- a/roles/contiv/templates/aci-gw.service
+++ b/roles/contiv/templates/aci-gw.service
@@ -6,5 +6,8 @@ After=auditd.service systemd-user-sessions.service time-sync.target {{ openshift
ExecStart={{ bin_dir }}/aci_gw.sh start
ExecStop={{ bin_dir }}/aci_gw.sh stop
KillMode=control-group
-Restart=on-failure
+Restart=always
RestartSec=10
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/contiv/templates/netmaster.service b/roles/contiv/templates/netmaster.service
index a602c955e..ce7d0c75e 100644
--- a/roles/contiv/templates/netmaster.service
+++ b/roles/contiv/templates/netmaster.service
@@ -6,5 +6,8 @@ After=auditd.service systemd-user-sessions.service contiv-etcd.service
EnvironmentFile=/etc/default/netmaster
ExecStart={{ bin_dir }}/netmaster $NETMASTER_ARGS
KillMode=control-group
-Restart=on-failure
+Restart=always
RestartSec=10
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/contiv/templates/netplugin.service b/roles/contiv/templates/netplugin.service
index dc7b95bb5..6358d89ec 100644
--- a/roles/contiv/templates/netplugin.service
+++ b/roles/contiv/templates/netplugin.service
@@ -6,3 +6,8 @@ After=auditd.service systemd-user-sessions.service contiv-etcd.service
EnvironmentFile=/etc/default/netplugin
ExecStart={{ bin_dir }}/netplugin $NETPLUGIN_ARGS
KillMode=control-group
+Restart=always
+RestartSec=10
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/contiv_facts/tasks/rpm.yml b/roles/contiv_facts/tasks/rpm.yml
index 07401a6dd..d12436f96 100644
--- a/roles/contiv_facts/tasks/rpm.yml
+++ b/roles/contiv_facts/tasks/rpm.yml
@@ -6,10 +6,17 @@
failed_when: false
check_mode: no
+- name: RPM | Determine if firewalld enabled
+ command: "systemctl status firewalld.service"
+ register: ss
+ changed_when: false
+ failed_when: false
+ check_mode: no
+
- name: Set the has_firewalld fact
set_fact:
has_firewalld: true
- when: s.rc == 0
+ when: s.rc == 0 and ss.rc == 0
- name: Determine if iptables-services installed
command: "rpm -q iptables-services"
diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml
index 3038ed9f6..86cea5c46 100644
--- a/roles/etcd/defaults/main.yaml
+++ b/roles/etcd/defaults/main.yaml
@@ -10,7 +10,7 @@ r_etcd_common_embedded_etcd: false
osm_etcd_image: 'registry.access.redhat.com/rhel7/etcd'
etcd_image_dict:
- origin: "registry.fedoraproject.org/f26/etcd"
+ origin: "registry.fedoraproject.org/latest/etcd"
openshift-enterprise: "{{ osm_etcd_image }}"
etcd_image: "{{ etcd_image_dict[openshift_deployment_type | default('origin')] }}"
diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml
index a90143aa3..888aa8f0c 100644
--- a/roles/openshift_cli/tasks/main.yml
+++ b/roles/openshift_cli/tasks/main.yml
@@ -1,6 +1,6 @@
---
- name: Install clients
- package: name={{ openshift_service_type }}-clients state=present
+ package: name={{ openshift_service_type }}-clients{{ openshift_pkg_version | default('') }} state=present
when: not openshift.common.is_containerized | bool
register: result
until: result | success
diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh
index 1d5fba990..68a0e8857 100755
--- a/roles/openshift_examples/examples-sync.sh
+++ b/roles/openshift_examples/examples-sync.sh
@@ -5,7 +5,7 @@
#
# This script should be run from openshift-ansible/roles/openshift_examples
-XPAAS_VERSION=ose-v1.4.6
+XPAAS_VERSION=ose-v1.4.7
ORIGIN_VERSION=${1:-v3.7}
RHAMP_TAG=2.0.0.GA
EXAMPLES_BASE=$(pwd)/files/examples/${ORIGIN_VERSION}
diff --git a/roles/openshift_examples/files/examples/latest b/roles/openshift_examples/files/examples/latest
new file mode 120000
index 000000000..8cad94b63
--- /dev/null
+++ b/roles/openshift_examples/files/examples/latest
@@ -0,0 +1 @@
+v3.9 \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/v3.7/v3.8 b/roles/openshift_examples/files/examples/v3.7/v3.8
deleted file mode 120000
index 8ddcf661c..000000000
--- a/roles/openshift_examples/files/examples/v3.7/v3.8
+++ /dev/null
@@ -1 +0,0 @@
-v3.8 \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/v3.8/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/db-templates/mariadb-persistent-template.json
index 217ef11dd..92be8f42e 100644
--- a/roles/openshift_examples/files/examples/v3.8/db-templates/mariadb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.8/db-templates/mariadb-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "mariadb-persistent",
"annotations": {
- "openshift.io/display-name": "MariaDB (Persistent)",
+ "openshift.io/display-name": "MariaDB",
"description": "MariaDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mariadb",
"tags": "database,mariadb",
diff --git a/roles/openshift_examples/files/examples/v3.8/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/db-templates/mongodb-persistent-template.json
index 97e4128a4..4e3e64d48 100644
--- a/roles/openshift_examples/files/examples/v3.8/db-templates/mongodb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.8/db-templates/mongodb-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "mongodb-persistent",
"annotations": {
- "openshift.io/display-name": "MongoDB (Persistent)",
+ "openshift.io/display-name": "MongoDB",
"description": "MongoDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mongodb",
"tags": "database,mongodb",
diff --git a/roles/openshift_examples/files/examples/v3.8/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/db-templates/mysql-persistent-template.json
index 48ac114fd..6ac80f3a0 100644
--- a/roles/openshift_examples/files/examples/v3.8/db-templates/mysql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.8/db-templates/mysql-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "mysql-persistent",
"annotations": {
- "openshift.io/display-name": "MySQL (Persistent)",
+ "openshift.io/display-name": "MySQL",
"description": "MySQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mysql-database",
"tags": "database,mysql",
diff --git a/roles/openshift_examples/files/examples/v3.8/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/db-templates/postgresql-persistent-template.json
index 8a2d23907..190509112 100644
--- a/roles/openshift_examples/files/examples/v3.8/db-templates/postgresql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.8/db-templates/postgresql-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "postgresql-persistent",
"annotations": {
- "openshift.io/display-name": "PostgreSQL (Persistent)",
+ "openshift.io/display-name": "PostgreSQL",
"description": "PostgreSQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-postgresql",
"tags": "database,postgresql",
diff --git a/roles/openshift_examples/files/examples/v3.8/db-templates/redis-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/db-templates/redis-persistent-template.json
index e0e0a88d5..d1103d3af 100644
--- a/roles/openshift_examples/files/examples/v3.8/db-templates/redis-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.8/db-templates/redis-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "redis-persistent",
"annotations": {
- "openshift.io/display-name": "Redis (Persistent)",
+ "openshift.io/display-name": "Redis",
"description": "Redis in-memory data structure store, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-redis",
"tags": "database,redis",
diff --git a/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-centos7.json
index e7af160d9..0aa586061 100644
--- a/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-centos7.json
+++ b/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-centos7.json
@@ -407,7 +407,7 @@
"annotations": {
"openshift.io/display-name": "Python (Latest)",
"openshift.io/provider-display-name": "Red Hat, Inc.",
- "description": "Build and run Python applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
+ "description": "Build and run Python applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python",
@@ -415,7 +415,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "3.5"
+ "name": "3.6"
}
},
{
@@ -485,6 +485,23 @@
"kind": "DockerImage",
"name": "centos/python-35-centos7:latest"
}
+ },
+ {
+ "name": "3.6",
+ "annotations": {
+ "openshift.io/display-name": "Python 3.6",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "description": "Build and run Python 3.6 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:3.6,python",
+ "version": "3.6",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/python-36-centos7:latest"
+ }
}
]
}
diff --git a/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-rhel7.json
index 2b082fc75..7ab9f4e59 100644
--- a/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-rhel7.json
+++ b/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-rhel7.json
@@ -407,7 +407,7 @@
"annotations": {
"openshift.io/display-name": "Python (Latest)",
"openshift.io/provider-display-name": "Red Hat, Inc.",
- "description": "Build and run Python applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
+ "description": "Build and run Python applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python",
@@ -415,7 +415,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "3.5"
+ "name": "3.6"
}
},
{
@@ -485,6 +485,23 @@
"kind": "DockerImage",
"name": "registry.access.redhat.com/rhscl/python-35-rhel7:latest"
}
+ },
+ {
+ "name": "3.6",
+ "annotations": {
+ "openshift.io/display-name": "Python 3.6",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "description": "Build and run Python 3.6 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:3.6,python",
+ "version": "3.6",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/python-36-rhel7:latest"
+ }
}
]
}
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql-persistent.json
index 86ddc184a..40b4eaa81 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "cakephp-mysql-persistent",
"annotations": {
- "openshift.io/display-name": "CakePHP + MySQL (Persistent)",
+ "openshift.io/display-name": "CakePHP + MySQL",
"description": "An example CakePHP application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/cakephp-ex/blob/master/README.md.",
"tags": "quickstart,php,cakephp",
"iconClass": "icon-php",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.",
"labels": {
- "template": "cakephp-mysql-persistent"
+ "template": "cakephp-mysql-persistent",
+ "app": "cakephp-mysql-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql.json
index 3c964bd6a..ecd90e495 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.",
"labels": {
- "template": "cakephp-mysql-example"
+ "template": "cakephp-mysql-example",
+ "app": "cakephp-mysql-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql-persistent.json
index 0a10c5fbc..17a155600 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "dancer-mysql-persistent",
"annotations": {
- "openshift.io/display-name": "Dancer + MySQL (Persistent)",
+ "openshift.io/display-name": "Dancer + MySQL",
"description": "An example Dancer application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
"tags": "quickstart,perl,dancer",
"iconClass": "icon-perl",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
"labels": {
- "template": "dancer-mysql-persistent"
+ "template": "dancer-mysql-persistent",
+ "app": "dancer-mysql-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql.json
index 6122d5436..abf711535 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
"labels": {
- "template": "dancer-mysql-example"
+ "template": "dancer-mysql-example",
+ "app": "dancer-mysql-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql-persistent.json
index f3b5838fa..c8dab0b53 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "django-psql-persistent",
"annotations": {
- "openshift.io/display-name": "Django + PostgreSQL (Persistent)",
+ "openshift.io/display-name": "Django + PostgreSQL",
"description": "An example Django application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
"tags": "quickstart,python,django",
"iconClass": "icon-python",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
"labels": {
- "template": "django-psql-persistent"
+ "template": "django-psql-persistent",
+ "app": "django-psql-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql.json
index b21295df2..6395defda 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
"labels": {
- "template": "django-psql-example"
+ "template": "django-psql-example",
+ "app": "django-psql-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/httpd.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/httpd.json
index 3771280bf..d7cab4889 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/httpd.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/httpd.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/httpd-ex/blob/master/README.md.",
"labels": {
- "template": "httpd-example"
+ "template": "httpd-example",
+ "app": "httpd-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-ephemeral-template.json
index 28b4b9d81..6186bba10 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-ephemeral-template.json
@@ -15,6 +15,9 @@
}
},
"message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
+ "labels": {
+ "app": "jenkins-ephemeral"
+ },
"objects": [
{
"kind": "Route",
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-persistent-template.json
index 4915bb12c..59dc0d22b 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "jenkins-persistent",
"annotations": {
- "openshift.io/display-name": "Jenkins (Persistent)",
+ "openshift.io/display-name": "Jenkins",
"description": "Jenkins service, with persistent storage.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-jenkins",
"tags": "instant-app,jenkins",
@@ -15,6 +15,9 @@
}
},
"message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
+ "labels": {
+ "app": "jenkins-persistent"
+ },
"objects": [
{
"kind": "Route",
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb-persistent.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb-persistent.json
index 7f2a5d804..f04adaa67 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "nodejs-mongo-persistent",
"annotations": {
- "openshift.io/display-name": "Node.js + MongoDB (Persistent)",
+ "openshift.io/display-name": "Node.js + MongoDB",
"description": "An example Node.js application with a MongoDB database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
"tags": "quickstart,nodejs",
"iconClass": "icon-nodejs",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
"labels": {
- "template": "nodejs-mongo-persistent"
+ "template": "nodejs-mongo-persistent",
+ "app": "nodejs-mongo-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb.json
index b3afae46e..0ce36dba5 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
"labels": {
- "template": "nodejs-mongodb-example"
+ "template": "nodejs-mongodb-example",
+ "app": "nodejs-mongodb-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql-persistent.json
index 1c03be28a..10e9382cc 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "rails-pgsql-persistent",
"annotations": {
- "openshift.io/display-name": "Rails + PostgreSQL (Persistent)",
+ "openshift.io/display-name": "Rails + PostgreSQL",
"description": "An example Rails application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
"tags": "quickstart,ruby,rails",
"iconClass": "icon-ruby",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
"labels": {
- "template": "rails-pgsql-persistent"
+ "template": "rails-pgsql-persistent",
+ "app": "rails-pgsql-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql.json
index 240289d33..8ec2c8ea6 100644
--- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
"labels": {
- "template": "rails-postgresql-example"
+ "template": "rails-postgresql-example",
+ "app": "rails-postgresql-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.9/latest b/roles/openshift_examples/files/examples/v3.9/latest
deleted file mode 120000
index b9bc2fdcb..000000000
--- a/roles/openshift_examples/files/examples/v3.9/latest
+++ /dev/null
@@ -1 +0,0 @@
-latest \ No newline at end of file
diff --git a/roles/openshift_hosted/README.md b/roles/openshift_hosted/README.md
index a1c2c3956..3cf5d97c5 100644
--- a/roles/openshift_hosted/README.md
+++ b/roles/openshift_hosted/README.md
@@ -43,9 +43,9 @@ variables also control configuration behavior:
**NOTE:** Configuring a value for
`openshift_hosted_registry_storage_glusterfs_ips` with a `glusterfs_registry`
-host group is not allowed. Specifying a `glusterfs_registry` host group
-indicates that a new GlusterFS cluster should be configured, whereas
-specifying `openshift_hosted_registry_storage_glusterfs_ips` indicates wanting
+host group is not allowed. Specifying a `glusterfs_registry` host group
+indicates that a new GlusterFS cluster should be configured, whereas
+specifying `openshift_hosted_registry_storage_glusterfs_ips` indicates wanting
to use a pre-configured GlusterFS cluster for the registry storage.
_
@@ -53,7 +53,6 @@ _
Dependencies
------------
-* openshift_hosted_facts
* openshift_persistent_volumes
Example Playbook
diff --git a/roles/openshift_hosted/meta/main.yml b/roles/openshift_hosted/meta/main.yml
index 1d70ef7eb..ac9e241a5 100644
--- a/roles/openshift_hosted/meta/main.yml
+++ b/roles/openshift_hosted/meta/main.yml
@@ -12,6 +12,6 @@ galaxy_info:
categories:
- cloud
dependencies:
-- role: openshift_hosted_facts
+- role: openshift_facts
- role: lib_openshift
- role: lib_os_firewall
diff --git a/roles/openshift_hosted_facts/meta/main.yml b/roles/openshift_hosted_facts/meta/main.yml
deleted file mode 100644
index dd2de07bc..000000000
--- a/roles/openshift_hosted_facts/meta/main.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-galaxy_info:
- author: Andrew Butcher
- description: OpenShift Hosted Facts
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 1.9
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
-dependencies:
-- role: openshift_facts
diff --git a/roles/openshift_hosted_facts/tasks/main.yml b/roles/openshift_hosted_facts/tasks/main.yml
deleted file mode 100644
index ed97d539c..000000000
--- a/roles/openshift_hosted_facts/tasks/main.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/roles/openshift_logging/tasks/annotate_ops_projects.yaml b/roles/openshift_logging/tasks/annotate_ops_projects.yaml
index fcb4c94d3..59d6098d4 100644
--- a/roles/openshift_logging/tasks/annotate_ops_projects.yaml
+++ b/roles/openshift_logging/tasks/annotate_ops_projects.yaml
@@ -1,17 +1,20 @@
---
-- oc_obj:
- state: list
- kind: project
- name: "{{ item }}"
- with_items: "{{ __default_logging_ops_projects }}"
+- command: >
+ {{ openshift.common.client_binary }}
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ get namespaces -o jsonpath={.items[*].metadata.name} {{ __default_logging_ops_projects | join(' ') }}
register: __logging_ops_projects
- name: Annotate Operations Projects
oc_edit:
kind: ns
- name: "{{ item.item }}"
+ name: "{{ project }}"
separator: '#'
content:
metadata#annotations#openshift.io/logging.ui.hostname: "{{ openshift_logging_kibana_ops_hostname }}"
- with_items: "{{ __logging_ops_projects.results }}"
- when: item.results.stderr is not defined
+ with_items: "{{ __logging_ops_projects.stdout.split(' ') }}"
+ loop_control:
+ loop_var: project
+ when:
+ - __logging_ops_projects.stderr | length == 0
+ - openshift_logging_use_ops | default(false) | bool
diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml
index ffed956a4..af36d67c6 100644
--- a/roles/openshift_logging/tasks/delete_logging.yaml
+++ b/roles/openshift_logging/tasks/delete_logging.yaml
@@ -107,6 +107,24 @@
- logging-fluentd
- logging-mux
+# remove annotations added by logging
+- command: >
+ {{ openshift.common.client_binary }}
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ get namespaces -o name {{ __default_logging_ops_projects | join(' ') }}
+ register: __logging_ops_projects
+
+- name: Remove Annotation of Operations Projects
+ command: >
+ {{ openshift.common.client_binary }}
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ annotate {{ project }} openshift.io/logging.ui.hostname-
+ with_items: "{{ __logging_ops_projects.stdout_lines }}"
+ loop_control:
+ loop_var: project
+ when:
+ - __logging_ops_projects.stderr | length == 0
+
## EventRouter
- include_role:
name: openshift_logging_eventrouter
diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml
index 91db457d1..9949bb95d 100644
--- a/roles/openshift_logging/tasks/main.yaml
+++ b/roles/openshift_logging/tasks/main.yaml
@@ -31,3 +31,4 @@
local_action: file path="{{local_tmp.stdout}}" state=absent
tags: logging_cleanup
changed_when: False
+ become: no
diff --git a/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml b/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml
index e92a35f27..12b4f5bfd 100644
--- a/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml
+++ b/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml
@@ -8,3 +8,4 @@
# wait half a second between labels
- local_action: command sleep {{ openshift_logging_fluentd_label_delay | default('.5') }}
+ become: no
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index 38b2fd8b8..efd119299 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -54,6 +54,48 @@ ha_svc_template_path: "native-cluster"
openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}"
+openshift_master_loopback_config: "{{ openshift_master_config_dir }}/openshift-master.kubeconfig"
+loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}"
+openshift_master_session_secrets_file: "{{ openshift_master_config_dir }}/session-secrets.yaml"
+openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json"
+
+scheduler_config:
+ kind: Policy
+ apiVersion: v1
+ predicates: "{{ openshift_master_scheduler_predicates
+ | default(openshift_master_scheduler_current_predicates
+ | default(openshift_master_scheduler_default_predicates)) }}"
+ priorities: "{{ openshift_master_scheduler_priorities
+ | default(openshift_master_scheduler_current_priorities
+ | default(openshift_master_scheduler_default_priorities)) }}"
+
+openshift_master_valid_grant_methods:
+- auto
+- prompt
+- deny
+
+openshift_master_is_scaleup_host: False
+
+# These defaults assume forcing journald persistence, fsync to disk once
+# a second, rate-limiting to 10,000 logs a second, no forwarding to
+# syslog or wall, using 8GB of disk space maximum, using 10MB journal
+# files, keeping only a days worth of logs per journal file, and
+# retaining journal files no longer than a month.
+journald_vars_to_replace:
+- { var: Storage, val: persistent }
+- { var: Compress, val: yes }
+- { var: SyncIntervalSec, val: 1s }
+- { var: RateLimitInterval, val: 1s }
+- { var: RateLimitBurst, val: 10000 }
+- { var: SystemMaxUse, val: 8G }
+- { var: SystemKeepFree, val: 20% }
+- { var: SystemMaxFileSize, val: 10M }
+- { var: MaxRetentionSec, val: 1month }
+- { var: MaxFileSec, val: 1day }
+- { var: ForwardToSyslog, val: no }
+- { var: ForwardToWall, val: no }
+
+
# NOTE
# r_openshift_master_*_default may be defined external to this role.
# openshift_use_*, if defined, may affect other roles or play behavior.
diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml
deleted file mode 100644
index 0c681c764..000000000
--- a/roles/openshift_master/vars/main.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-openshift_master_loopback_config: "{{ openshift_master_config_dir }}/openshift-master.kubeconfig"
-loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}"
-openshift_master_session_secrets_file: "{{ openshift_master_config_dir }}/session-secrets.yaml"
-openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json"
-
-scheduler_config:
- kind: Policy
- apiVersion: v1
- predicates: "{{ openshift_master_scheduler_predicates
- | default(openshift_master_scheduler_current_predicates
- | default(openshift_master_scheduler_default_predicates)) }}"
- priorities: "{{ openshift_master_scheduler_priorities
- | default(openshift_master_scheduler_current_priorities
- | default(openshift_master_scheduler_default_priorities)) }}"
-
-openshift_master_valid_grant_methods:
-- auto
-- prompt
-- deny
-
-openshift_master_is_scaleup_host: False
-
-# These defaults assume forcing journald persistence, fsync to disk once
-# a second, rate-limiting to 10,000 logs a second, no forwarding to
-# syslog or wall, using 8GB of disk space maximum, using 10MB journal
-# files, keeping only a days worth of logs per journal file, and
-# retaining journal files no longer than a month.
-journald_vars_to_replace:
-- { var: Storage, val: persistent }
-- { var: Compress, val: yes }
-- { var: SyncIntervalSec, val: 1s }
-- { var: RateLimitInterval, val: 1s }
-- { var: RateLimitBurst, val: 10000 }
-- { var: SystemMaxUse, val: 8G }
-- { var: SystemKeepFree, val: 20% }
-- { var: SystemMaxFileSize, val: 10M }
-- { var: MaxRetentionSec, val: 1month }
-- { var: MaxFileSec, val: 1day }
-- { var: ForwardToSyslog, val: no }
-- { var: ForwardToWall, val: no }
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index 170a3dc6e..1d9797f84 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -4,11 +4,15 @@
name: NetworkManager
state: restarted
enabled: True
+ when:
+ - (not skip_node_svc_handlers | default(False) | bool)
- name: restart dnsmasq
systemd:
name: dnsmasq
state: restarted
+ when:
+ - (not skip_node_svc_handlers | default(False) | bool)
- name: restart openvswitch
systemd:
@@ -47,3 +51,5 @@
- name: reload systemd units
command: systemctl daemon-reload
+ when:
+ - (not skip_node_svc_handlers | default(False) | bool)
diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml
index e5c80bd09..5d66de0a3 100644
--- a/roles/openshift_node/tasks/config.yml
+++ b/roles/openshift_node/tasks/config.yml
@@ -2,6 +2,10 @@
- name: Install the systemd units
include_tasks: systemd_units.yml
+- name: Pull container images
+ include_tasks: container_images.yml
+ when: openshift.common.is_containerized | bool
+
- name: Start and enable openvswitch service
systemd:
name: openvswitch.service
diff --git a/roles/openshift_node/tasks/container_images.yml b/roles/openshift_node/tasks/container_images.yml
new file mode 100644
index 000000000..0b8c806ae
--- /dev/null
+++ b/roles/openshift_node/tasks/container_images.yml
@@ -0,0 +1,20 @@
+---
+- name: Install Node system container
+ include_tasks: node_system_container.yml
+ when:
+ - l_is_node_system_container | bool
+
+- name: Install OpenvSwitch system containers
+ include_tasks: openvswitch_system_container.yml
+ when:
+ - openshift_node_use_openshift_sdn | bool
+ - l_is_openvswitch_system_container | bool
+
+- name: Pre-pull openvswitch image
+ command: >
+ docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
+ when:
+ - openshift_node_use_openshift_sdn | bool
+ - not l_is_openvswitch_system_container | bool
diff --git a/roles/openshift_node/tasks/dnsmasq.yml b/roles/openshift_node/tasks/dnsmasq.yml
index f210a3a21..31ca46ec0 100644
--- a/roles/openshift_node/tasks/dnsmasq.yml
+++ b/roles/openshift_node/tasks/dnsmasq.yml
@@ -1,43 +1,4 @@
---
-- name: Check for NetworkManager service
- command: >
- systemctl show NetworkManager
- register: nm_show
- changed_when: false
- ignore_errors: True
-
-- name: Set fact using_network_manager
- set_fact:
- network_manager_active: "{{ True if 'ActiveState=active' in nm_show.stdout else False }}"
-
-- name: Install dnsmasq
- package: name=dnsmasq state=installed
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
-
-- name: ensure origin/node directory exists
- file:
- state: directory
- path: "{{ item }}"
- owner: root
- group: root
- mode: '0700'
- with_items:
- - /etc/origin
- - /etc/origin/node
-
-# this file is copied to /etc/dnsmasq.d/ when the node starts and is removed
-# when the node stops. A dbus-message is sent to dnsmasq to add the same entries
-# so that dnsmasq doesn't need to be restarted. Once we can use dnsmasq 2.77 or
-# newer we can use --server-file option to update the servers dynamically and
-# reload them by sending dnsmasq a SIGHUP. We write the file in case someone else
-# triggers a restart of dnsmasq but not a node restart.
-- name: Install node-dnsmasq.conf
- template:
- src: node-dnsmasq.conf.j2
- dest: /etc/origin/node/node-dnsmasq.conf
-
- name: Install dnsmasq configuration
template:
src: origin-dns.conf.j2
@@ -63,7 +24,3 @@
# Dynamic NetworkManager based dispatcher
- include_tasks: dnsmasq/network-manager.yml
when: network_manager_active | bool
-
-# Relies on ansible in order to configure static config
-- include_tasks: dnsmasq/no-network-manager.yml
- when: not network_manager_active | bool
diff --git a/roles/openshift_node/tasks/dnsmasq_install.yml b/roles/openshift_node/tasks/dnsmasq_install.yml
new file mode 100644
index 000000000..9f66bf12d
--- /dev/null
+++ b/roles/openshift_node/tasks/dnsmasq_install.yml
@@ -0,0 +1,43 @@
+---
+- name: Check for NetworkManager service
+ command: >
+ systemctl show NetworkManager
+ register: nm_show
+ changed_when: false
+ ignore_errors: True
+
+- name: Set fact using_network_manager
+ set_fact:
+ network_manager_active: "{{ True if 'ActiveState=active' in nm_show.stdout else False }}"
+
+- name: Install dnsmasq
+ package: name=dnsmasq state=installed
+ when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
+
+- name: ensure origin/node directory exists
+ file:
+ state: directory
+ path: "{{ item }}"
+ owner: root
+ group: root
+ mode: '0700'
+ with_items:
+ - /etc/origin
+ - /etc/origin/node
+
+# this file is copied to /etc/dnsmasq.d/ when the node starts and is removed
+# when the node stops. A dbus-message is sent to dnsmasq to add the same entries
+# so that dnsmasq doesn't need to be restarted. Once we can use dnsmasq 2.77 or
+# newer we can use --server-file option to update the servers dynamically and
+# reload them by sending dnsmasq a SIGHUP. We write the file in case someone else
+# triggers a restart of dnsmasq but not a node restart.
+- name: Install node-dnsmasq.conf
+ template:
+ src: node-dnsmasq.conf.j2
+ dest: /etc/origin/node/node-dnsmasq.conf
+
+# Relies on ansible in order to configure static config
+- include_tasks: dnsmasq/no-network-manager.yml
+ when: not network_manager_active | bool
diff --git a/roles/openshift_node/tasks/docker/upgrade.yml b/roles/openshift_node/tasks/docker/upgrade.yml
deleted file mode 100644
index c13a6cf6c..000000000
--- a/roles/openshift_node/tasks/docker/upgrade.yml
+++ /dev/null
@@ -1,42 +0,0 @@
----
-# input variables:
-# - openshift_service_type
-# - openshift.common.is_containerized
-# - docker_upgrade_nuke_images
-# - docker_version
-# - skip_docker_restart
-
-- name: Check Docker image count
- shell: "docker images -aq | wc -l"
- register: docker_image_count
-
-- debug: var=docker_image_count.stdout
-
-# TODO(jchaloup): put all docker_upgrade_nuke_images into a block with only one condition
-- name: Remove all containers and images
- script: nuke_images.sh
- register: nuke_images_result
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- name: Check Docker image count
- shell: "docker images -aq | wc -l"
- register: docker_image_count
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- debug: var=docker_image_count.stdout
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- service:
- name: docker
- state: stopped
- register: l_openshift_node_upgrade_docker_stop_result
- until: not l_openshift_node_upgrade_docker_stop_result | failed
- retries: 3
- delay: 30
-
-- name: Upgrade Docker
- package: name=docker{{ '-' + docker_version }} state=present
- register: result
- until: result | success
-
-# starting docker happens back in ../main.yml where it calls ../restart.yml
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 32c5f495f..946deb4d3 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -6,6 +6,7 @@
- deployment_type == 'openshift-enterprise'
- not openshift_use_crio
+- include_tasks: dnsmasq_install.yml
- include_tasks: dnsmasq.yml
- name: setup firewall
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index c532147b1..262ee698b 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -16,29 +16,10 @@
- name: include ovs service environment file
include_tasks: config/install-ovs-service-env-file.yml
- - name: Install Node system container
- include_tasks: node_system_container.yml
- when:
- - l_is_node_system_container | bool
-
- - name: Install OpenvSwitch system containers
- include_tasks: openvswitch_system_container.yml
+ - include_tasks: config/install-ovs-docker-service-file.yml
when:
- openshift_node_use_openshift_sdn | bool
- - l_is_openvswitch_system_container | bool
-
-- block:
- - name: Pre-pull openvswitch image
- command: >
- docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }}
- register: pull_result
- changed_when: "'Downloaded newer image' in pull_result.stdout"
-
- - include_tasks: config/install-ovs-docker-service-file.yml
- when:
- - openshift.common.is_containerized | bool
- - openshift_node_use_openshift_sdn | bool
- - not l_is_openvswitch_system_container | bool
+ - not l_is_openvswitch_system_container | bool
- include_tasks: config/configure-node-settings.yml
- include_tasks: config/configure-proxy-settings.yml
diff --git a/roles/openshift_node/tasks/upgrade.yml b/roles/openshift_node/tasks/upgrade.yml
index 9f333645a..87556533a 100644
--- a/roles/openshift_node/tasks/upgrade.yml
+++ b/roles/openshift_node/tasks/upgrade.yml
@@ -10,161 +10,29 @@
# tasks file for openshift_node_upgrade
-- include_tasks: registry_auth.yml
+- name: stop services for upgrade
+ include_tasks: upgrade/stop_services.yml
-- name: Stop node and openvswitch services
- service:
- name: "{{ item }}"
- state: stopped
- with_items:
- - "{{ openshift_service_type }}-node"
- - openvswitch
- failed_when: false
-
-- name: Stop additional containerized services
- service:
- name: "{{ item }}"
- state: stopped
- with_items:
- - "{{ openshift_service_type }}-master-controllers"
- - "{{ openshift_service_type }}-master-api"
- - etcd_container
- failed_when: false
- when: openshift.common.is_containerized | bool
-
-- name: Pre-pull node image
- command: >
- docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }}
- register: pull_result
- changed_when: "'Downloaded newer image' in pull_result.stdout"
- when: openshift.common.is_containerized | bool
-
-- name: Pre-pull openvswitch image
- command: >
- docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }}
- register: pull_result
- changed_when: "'Downloaded newer image' in pull_result.stdout"
- when:
- - openshift.common.is_containerized | bool
- - openshift_use_openshift_sdn | bool
-
-- include_tasks: docker/upgrade.yml
- vars:
- # We will restart Docker ourselves after everything is ready:
- skip_docker_restart: True
+# Ensure actually install latest package.
+- name: download docker upgrade rpm
+ command: "{{ ansible_pkg_mgr }} install -C -y docker{{ '-' + docker_version }}"
+ register: result
+ until: result | success
when:
- l_docker_upgrade is defined
- l_docker_upgrade | bool
-- include_tasks: "{{ node_config_hook }}"
- when: node_config_hook is defined
-
-- include_tasks: upgrade/rpm_upgrade.yml
+- name: install pre-pulled rpms.
+ include_tasks: upgrade/rpm_upgrade_install.yml
vars:
- component: "node"
openshift_version: "{{ openshift_pkg_version | default('') }}"
when: not openshift.common.is_containerized | bool
-- name: Remove obsolete docker-sdn-ovs.conf
- file:
- path: "/etc/systemd/system/docker.service.d/docker-sdn-ovs.conf"
- state: absent
-
-- include_tasks: upgrade/containerized_node_upgrade.yml
- when: openshift.common.is_containerized | bool
-
-- name: Ensure containerized services stopped before Docker restart
- service:
- name: "{{ item }}"
- state: stopped
- with_items:
- - etcd_container
- - openvswitch
- - "{{ openshift_service_type }}-master-api"
- - "{{ openshift_service_type }}-master-controllers"
- - "{{ openshift_service_type }}-node"
- failed_when: false
- when: openshift.common.is_containerized | bool
-
-- name: Stop rpm based services
- service:
- name: "{{ item }}"
- state: stopped
- with_items:
- - "{{ openshift_service_type }}-node"
- - openvswitch
- failed_when: false
- when: not openshift.common.is_containerized | bool
-
-# https://bugzilla.redhat.com/show_bug.cgi?id=1513054
-- name: Clean up dockershim data
- file:
- path: "/var/lib/dockershim/sandbox/"
- state: absent
-- name: Upgrade openvswitch
- package:
- name: openvswitch
- state: latest
- when: not openshift.common.is_containerized | bool
- register: result
- until: result | success
-
-- name: Update oreg value
- yedit:
- src: "{{ openshift.common.config_base }}/node/node-config.yaml"
- key: 'imageConfig.format'
- value: "{{ oreg_url | default(oreg_url_node) }}"
- when: oreg_url is defined or oreg_url_node is defined
-
-# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory
-- name: Check for swap usage
- command: grep "^[^#].*swap" /etc/fstab
- # grep: match any lines which don't begin with '#' and contain 'swap'
- changed_when: false
- failed_when: false
- register: swap_result
-
- # Disable Swap Block
-- block:
-
- - name: Disable swap
- command: swapoff --all
-
- - name: Remove swap entries from /etc/fstab
- replace:
- dest: /etc/fstab
- regexp: '(^[^#].*swap.*)'
- replace: '# \1'
- backup: yes
-
- - name: Add notice about disabling swap
- lineinfile:
- dest: /etc/fstab
- line: '# OpenShift-Ansible Installer disabled swap per overcommit guidelines'
- state: present
-
- when:
- - swap_result.stdout_lines | length > 0
- - openshift_disable_swap | default(true) | bool
- # End Disable Swap Block
-
-- name: Reset selinux context
- command: restorecon -RF {{ openshift_node_data_dir }}/openshift.local.volumes
- when:
- - ansible_selinux is defined
- - ansible_selinux.status == 'enabled'
+- include_tasks: "{{ node_config_hook }}"
+ when: node_config_hook is defined
-- name: Apply 3.6 dns config changes
- yedit:
- src: /etc/origin/node/node-config.yaml
- key: "{{ item.key }}"
- value: "{{ item.value }}"
- with_items:
- - key: "dnsBindAddress"
- value: "127.0.0.1:53"
- - key: "dnsRecursiveResolvConf"
- value: "/etc/origin/node/resolv.conf"
+- include_tasks: upgrade/config_changes.yml
# Restart all services
- include_tasks: upgrade/restart.yml
@@ -181,4 +49,7 @@
retries: 24
delay: 5
+- include_tasks: dnsmasq_install.yml
- include_tasks: dnsmasq.yml
+
+- meta: flush_handlers
diff --git a/roles/openshift_node/tasks/upgrade/config_changes.yml b/roles/openshift_node/tasks/upgrade/config_changes.yml
new file mode 100644
index 000000000..e22018e6d
--- /dev/null
+++ b/roles/openshift_node/tasks/upgrade/config_changes.yml
@@ -0,0 +1,77 @@
+---
+- name: Update systemd units
+ include_tasks: ../systemd_units.yml
+ when: openshift.common.is_containerized
+
+- name: Update oreg value
+ yedit:
+ src: "{{ openshift.common.config_base }}/node/node-config.yaml"
+ key: 'imageConfig.format'
+ value: "{{ oreg_url | default(oreg_url_node) }}"
+ when: oreg_url is defined or oreg_url_node is defined
+
+- name: Remove obsolete docker-sdn-ovs.conf
+ file:
+ path: "/etc/systemd/system/docker.service.d/docker-sdn-ovs.conf"
+ state: absent
+
+# https://bugzilla.redhat.com/show_bug.cgi?id=1513054
+- name: Clean up dockershim data
+ file:
+ path: "/var/lib/dockershim/sandbox/"
+ state: absent
+
+# Disable Swap Block (pre)
+- block:
+ - name: Remove swap entries from /etc/fstab
+ replace:
+ dest: /etc/fstab
+ regexp: '(^[^#].*swap.*)'
+ replace: '# \1'
+ backup: yes
+
+ - name: Add notice about disabling swap
+ lineinfile:
+ dest: /etc/fstab
+ line: '# OpenShift-Ansible Installer disabled swap per overcommit guidelines'
+ state: present
+
+ - name: Disable swap
+ command: swapoff --all
+
+ when:
+ - openshift_node_upgrade_swap_result | default(False) | bool
+ - openshift_disable_swap | default(true) | bool
+# End Disable Swap Block
+
+- name: Apply 3.6 dns config changes
+ yedit:
+ src: /etc/origin/node/node-config.yaml
+ key: "{{ item.key }}"
+ value: "{{ item.value }}"
+ with_items:
+ - key: "dnsBindAddress"
+ value: "127.0.0.1:53"
+ - key: "dnsRecursiveResolvConf"
+ value: "/etc/origin/node/resolv.conf"
+
+- name: Install Node service file
+ template:
+ dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service"
+ src: "node.service.j2"
+ register: l_node_unit
+
+- name: Reset selinux context
+ command: restorecon -RF {{ openshift_node_data_dir }}/openshift.local.volumes
+ when:
+ - ansible_selinux is defined
+ - ansible_selinux.status == 'enabled'
+
+# NOTE: This is needed to make sure we are using the correct set
+# of systemd unit files. The RPMs lay down defaults but
+# the install/upgrade may override them in /etc/systemd/system/.
+# NOTE: We don't use the systemd module as some versions of the module
+# require a service to be part of the call.
+- name: Reload systemd units
+ command: systemctl daemon-reload
+ when: l_node_unit | changed
diff --git a/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml b/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml
deleted file mode 100644
index 245de60a7..000000000
--- a/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-# This is a hack to allow us to use systemd_units.yml, but skip the handlers which
-# restart services. We will unconditionally restart all containerized services
-# because we have to unconditionally restart Docker:
-- set_fact:
- skip_node_svc_handlers: True
-
-- name: Update systemd units
- include_tasks: ../systemd_units.yml
-
-# This is a no-op because of skip_node_svc_handlers, but lets us trigger it before end of
-# play when the node has already been marked schedulable again. (this would look strange
-# in logs otherwise)
-- meta: flush_handlers
diff --git a/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml b/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml
new file mode 100644
index 000000000..71f00dcd2
--- /dev/null
+++ b/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml
@@ -0,0 +1,15 @@
+---
+- name: Pre-pull node image
+ command: >
+ docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
+
+- name: Pre-pull openvswitch image
+ command: >
+ docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
+ when: openshift_use_openshift_sdn | bool
+
+- include_tasks: ../container_images.yml
diff --git a/roles/openshift_node/tasks/upgrade/restart.yml b/roles/openshift_node/tasks/upgrade/restart.yml
index 65c301783..717cfa712 100644
--- a/roles/openshift_node/tasks/upgrade/restart.yml
+++ b/roles/openshift_node/tasks/upgrade/restart.yml
@@ -13,6 +13,15 @@
- name: Reload systemd to ensure latest unit files
command: systemctl daemon-reload
+- name: Restart support services
+ service:
+ name: "{{ item }}"
+ state: restarted
+ enabled: True
+ with_items:
+ - NetworkManager
+ - dnsmasq
+
- name: Restart container runtime
service:
name: "{{ openshift_docker_service_name }}"
diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
index 120b93bc3..d2864e6b8 100644
--- a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
+++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
@@ -5,29 +5,20 @@
# - openshift_pkg_version
# - openshift.common.is_atomic
-# We verified latest rpm available is suitable, so just yum update.
-- name: Upgrade packages
- package: "name={{ openshift_service_type }}-{{ component }}{{ openshift_pkg_version }} state=present"
+# Pre-pull new node rpm, but don't install
+- name: download new node packages
+ command: "{{ ansible_pkg_mgr }} install -y --downloadonly {{ openshift_node_upgrade_rpm_list | join(' ')}}"
register: result
until: result | success
+ vars:
+ openshift_node_upgrade_rpm_list:
+ - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
+ - "PyYAML"
+ - "dnsmasq"
-- name: Ensure python-yaml present for config upgrade
- package: name=PyYAML state=present
- when: not openshift.common.is_atomic | bool
+# Pre-pull the rpms for openvswitch, but don't install
+# openvswitch requires the latest version to be installed.
+- name: download openvswitch upgrade rpm
+ command: "{{ ansible_pkg_mgr }} update -y --downloadonly openvswitch"
register: result
until: result | success
-
-- name: Install Node service file
- template:
- dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service"
- src: "node.service.j2"
- register: l_node_unit
-
-# NOTE: This is needed to make sure we are using the correct set
-# of systemd unit files. The RPMs lay down defaults but
-# the install/upgrade may override them in /etc/systemd/system/.
-# NOTE: We don't use the systemd module as some versions of the module
-# require a service to be part of the call.
-- name: Reload systemd units
- command: systemctl daemon-reload
- when: l_node_unit | changed
diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml
new file mode 100644
index 000000000..6390be558
--- /dev/null
+++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml
@@ -0,0 +1,19 @@
+---
+# input variables:
+# - openshift_service_type
+# - component
+# - openshift_pkg_version
+# - openshift.common.is_atomic
+
+# Install the pre-pulled RPM
+# Note: dnsmasq is covered in it's own play. openvswitch is included here
+# because once we have the latest rpm downloaded, it will happily be installed.
+- name: download new node packages
+ command: "{{ ansible_pkg_mgr }} install -C -y {{ openshift_node_upgrade_rpm_list | join(' ')}}"
+ register: result
+ until: result | success
+ vars:
+ openshift_node_upgrade_rpm_list:
+ - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
+ - "PyYAML"
+ - "openvswitch"
diff --git a/roles/openshift_node/tasks/upgrade/stop_services.yml b/roles/openshift_node/tasks/upgrade/stop_services.yml
new file mode 100644
index 000000000..bbf1c5f25
--- /dev/null
+++ b/roles/openshift_node/tasks/upgrade/stop_services.yml
@@ -0,0 +1,43 @@
+---
+- name: Stop node and openvswitch services
+ service:
+ name: "{{ item }}"
+ state: stopped
+ with_items:
+ - "{{ openshift_service_type }}-node"
+ - openvswitch
+ failed_when: false
+
+- name: Ensure containerized services stopped before Docker restart
+ service:
+ name: "{{ item }}"
+ state: stopped
+ with_items:
+ - etcd_container
+ - openvswitch
+ - "{{ openshift_service_type }}-master-api"
+ - "{{ openshift_service_type }}-master-controllers"
+ - "{{ openshift_service_type }}-node"
+ failed_when: false
+ when: openshift.common.is_containerized | bool
+
+- service:
+ name: docker
+ state: stopped
+ register: l_openshift_node_upgrade_docker_stop_result
+ until: not l_openshift_node_upgrade_docker_stop_result | failed
+ retries: 3
+ delay: 30
+ when:
+ - l_docker_upgrade is defined
+ - l_docker_upgrade | bool
+
+- name: Stop rpm based services
+ service:
+ name: "{{ item }}"
+ state: stopped
+ with_items:
+ - "{{ openshift_service_type }}-node"
+ - openvswitch
+ failed_when: false
+ when: not openshift.common.is_containerized | bool
diff --git a/roles/openshift_node/tasks/upgrade_pre.yml b/roles/openshift_node/tasks/upgrade_pre.yml
new file mode 100644
index 000000000..3346b7c65
--- /dev/null
+++ b/roles/openshift_node/tasks/upgrade_pre.yml
@@ -0,0 +1,56 @@
+---
+# This is a hack to allow us to update various components without restarting
+# services. This will persist into the upgrade play as well, so everything
+# needs to be restarted by hand.
+- set_fact:
+ skip_node_svc_handlers: True
+
+- include_tasks: registry_auth.yml
+
+- name: update package meta data to speed install later.
+ command: "{{ ansible_pkg_mgr }} makecache"
+ register: result
+ until: result | success
+ when: not openshift.common.is_containerized | bool
+
+- name: Check Docker image count
+ shell: "docker images -aq | wc -l"
+ register: docker_image_count
+ when:
+ - l_docker_upgrade is defined
+ - l_docker_upgrade | bool
+
+- debug: var=docker_image_count.stdout
+ when:
+ - l_docker_upgrade is defined
+ - l_docker_upgrade | bool
+
+- include_tasks: upgrade/containerized_upgrade_pull.yml
+ when: openshift.common.is_containerized | bool
+
+# Prepull the rpms for docker upgrade, but don't install
+- name: download docker upgrade rpm
+ command: "{{ ansible_pkg_mgr }} install -y --downloadonly docker{{ '-' + docker_version }}"
+ register: result
+ until: result | success
+ when:
+ - l_docker_upgrade is defined
+ - l_docker_upgrade | bool
+
+- include_tasks: upgrade/rpm_upgrade.yml
+ vars:
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ when: not openshift.common.is_containerized | bool
+
+# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory
+- name: Check for swap usage
+ command: grep "^[^#].*swap" /etc/fstab
+ # grep: match any lines which don't begin with '#' and contain 'swap'
+ changed_when: false
+ failed_when: false
+ register: swap_result
+
+# Set this fact here so we can use it during the next play, which is serial.
+- name: set_fact swap_result
+ set_fact:
+ openshift_node_upgrade_swap_result: "{{ swap_result.stdout_lines | length > 0 | bool }}"
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 16fdde02e..261cac6f1 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -20,9 +20,9 @@ kubeletArguments: {{ openshift.node.kubelet_args | default(None) | to_padded_yam
container-runtime:
- remote
container-runtime-endpoint:
- - /var/run/crio.sock
+ - /var/run/crio/crio.sock
image-service-endpoint:
- - /var/run/crio.sock
+ - /var/run/crio/crio.sock
node-labels:
- router=true
- registry=true
diff --git a/roles/openshift_prometheus/README.md b/roles/openshift_prometheus/README.md
index f1eca1da6..1ebeacabf 100644
--- a/roles/openshift_prometheus/README.md
+++ b/roles/openshift_prometheus/README.md
@@ -38,11 +38,13 @@ openshift_prometheus_args=['--storage.tsdb.retention=6h', '--storage.tsdb.min-bl
Each prometheus component (prometheus, alertmanager, alertbuffer) can set pv claim by setting corresponding role variable:
```
openshift_prometheus_<COMPONENT>_storage_type: <VALUE> (pvc, emptydir)
+openshift_prometheus_<COMPONENT>_storage_class: <VALUE>
openshift_prometheus_<COMPONENT>_pvc_(name|size|access_modes|pv_selector): <VALUE>
```
e.g
```
openshift_prometheus_storage_type: pvc
+openshift_prometheus_storage_class: glusterfs-storage
openshift_prometheus_alertmanager_pvc_name: alertmanager
openshift_prometheus_alertbuffer_pvc_size: 10G
openshift_prometheus_pvc_access_modes: [ReadWriteOnce]
diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml
index df331a4bb..e30108d2c 100644
--- a/roles/openshift_prometheus/defaults/main.yaml
+++ b/roles/openshift_prometheus/defaults/main.yaml
@@ -23,6 +23,7 @@ openshift_prometheus_pvc_name: prometheus
openshift_prometheus_pvc_size: "{{ openshift_prometheus_storage_volume_size | default('10Gi') }}"
openshift_prometheus_pvc_access_modes: [ReadWriteOnce]
openshift_prometheus_pvc_pv_selector: "{{ openshift_prometheus_storage_labels | default({}) }}"
+openshift_prometheus_sc_name: "{{ openshift_prometheus_storage_class | default(None) }}"
# One of ['emptydir', 'pvc']
openshift_prometheus_alertmanager_storage_type: "emptydir"
@@ -30,6 +31,7 @@ openshift_prometheus_alertmanager_pvc_name: prometheus-alertmanager
openshift_prometheus_alertmanager_pvc_size: "{{ openshift_prometheus_alertmanager_storage_volume_size | default('10Gi') }}"
openshift_prometheus_alertmanager_pvc_access_modes: [ReadWriteOnce]
openshift_prometheus_alertmanager_pvc_pv_selector: "{{ openshift_prometheus_alertmanager_storage_labels | default({}) }}"
+openshift_prometheus_alertmanager_sc_name: "{{ openshift_prometheus_alertmanager_storage_class | default(None) }}"
# One of ['emptydir', 'pvc']
openshift_prometheus_alertbuffer_storage_type: "emptydir"
@@ -37,6 +39,7 @@ openshift_prometheus_alertbuffer_pvc_name: prometheus-alertbuffer
openshift_prometheus_alertbuffer_pvc_size: "{{ openshift_prometheus_alertbuffer_storage_volume_size | default('10Gi') }}"
openshift_prometheus_alertbuffer_pvc_access_modes: [ReadWriteOnce]
openshift_prometheus_alertbuffer_pvc_pv_selector: "{{ openshift_prometheus_alertbuffer_storage_labels | default({}) }}"
+openshift_prometheus_alertbuffer_sc_name: "{{ openshift_prometheus_alertbuffer_storage_class | default(None) }}"
# container resources
openshift_prometheus_cpu_limit: null
diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml
index ad15dc65f..abc5dd476 100644
--- a/roles/openshift_prometheus/tasks/install_prometheus.yaml
+++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml
@@ -131,6 +131,7 @@
access_modes: "{{ openshift_prometheus_pvc_access_modes }}"
volume_capacity: "{{ openshift_prometheus_pvc_size }}"
selector: "{{ openshift_prometheus_pvc_pv_selector }}"
+ storage_class_name: "{{ openshift_prometheus_sc_name }}"
when: openshift_prometheus_storage_type == 'pvc'
- name: create alertmanager pvc
@@ -140,6 +141,7 @@
access_modes: "{{ openshift_prometheus_alertmanager_pvc_access_modes }}"
volume_capacity: "{{ openshift_prometheus_alertmanager_pvc_size }}"
selector: "{{ openshift_prometheus_alertmanager_pvc_pv_selector }}"
+ storage_class_name: "{{ openshift_prometheus_alertmanager_sc_name }}"
when: openshift_prometheus_alertmanager_storage_type == 'pvc'
- name: create alertbuffer pvc
@@ -149,6 +151,7 @@
access_modes: "{{ openshift_prometheus_alertbuffer_pvc_access_modes }}"
volume_capacity: "{{ openshift_prometheus_alertbuffer_pvc_size }}"
selector: "{{ openshift_prometheus_alertbuffer_pvc_pv_selector }}"
+ storage_class_name: "{{ openshift_prometheus_alertbuffer_sc_name }}"
when: openshift_prometheus_alertbuffer_storage_type == 'pvc'
# prometheus configmap
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index 5e7bde1e1..3a6667863 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -37,6 +37,13 @@
- when: r_openshift_repos_has_run is not defined
block:
+ - include_tasks: rhel_repos.yml
+ when:
+ - ansible_distribution == 'RedHat'
+ - deployment_type == 'openshift-enterprise'
+ - rhsub_user is defined
+ - rhsub_password is defined
+
- include_tasks: centos_repos.yml
when:
- ansible_os_family == "RedHat"
diff --git a/roles/openshift_repos/tasks/rhel_repos.yml b/roles/openshift_repos/tasks/rhel_repos.yml
new file mode 100644
index 000000000..c384cbe9a
--- /dev/null
+++ b/roles/openshift_repos/tasks/rhel_repos.yml
@@ -0,0 +1,34 @@
+---
+- name: Ensure RHEL rhui repositories are disabled
+ command: bash -c "yum -q --noplugins repolist | grep -v 'repo id' | grep 'rhui'"
+ register: repo_rhui
+ changed_when: "repo_rhui.rc != 1"
+ failed_when: repo_rhui.rc == 11
+
+- name: Disable RHEL rhui repositories
+ command: bash -c "yum-config-manager \
+ --disable 'rhui-REGION-client-config-server-7' \
+ --disable 'rhui-REGION-rhel-server-rh-common' \
+ --disable 'rhui-REGION-rhel-server-releases' \
+ --disable 'rhui-REGION-client-config-server-7'"
+ when: repo_rhui.changed
+
+- name: Ensure RHEL repositories are enabled
+ command: bash -c "yum -q --noplugins repolist | grep -v 'repo id' | grep 'Red Hat' | wc -l"
+ register: repo_rhel
+ changed_when: "'4' not in repo_rhel.stdout"
+ failed_when: repo_rhel.rc == 11
+
+- name: Disable all repositories
+ command: bash -c "subscription-manager repos --disable='*'"
+ when: repo_rhel.changed
+
+- name: Enable RHEL repositories
+ command: subscription-manager repos \
+ --enable="rhel-7-server-rpms" \
+ --enable="rhel-7-server-extras-rpms" \
+ --enable="rhel-7-server-ose-{{ (openshift_release | default('')).split('.')[0:2] | join('.') }}-rpms" \
+ --enable="rhel-7-fast-datapath-rpms"
+ register: subscribe_repos
+ until: subscribe_repos | succeeded
+ when: repo_rhel.changed
diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md
index be749a2e1..f7bd58db3 100644
--- a/roles/openshift_storage_glusterfs/README.md
+++ b/roles/openshift_storage_glusterfs/README.md
@@ -82,6 +82,7 @@ GlusterFS cluster into a new or existing OpenShift cluster:
| openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name
| openshift_storage_glusterfs_use_default_selector | False | Whether to use a default node selector for the GlusterFS namespace/project. If False, the namespace/project will have no restricting node selector. If True, uses pre-existing or default (e.g. osm_default_node_selector) node selectors. **NOTE:** If True, nodes which will host GlusterFS pods must already have the additional labels.
| openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster
+| openshift_storage_glusterfs_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default
| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7'
| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods
| openshift_storage_glusterfs_block_deploy | True | Deploy glusterblock provisioner service
@@ -125,13 +126,14 @@ registry. These variables start with the prefix
values in their corresponding non-registry variables. The following variables
are an exception:
-| Name | Default value | Description |
-|-------------------------------------------------------|-----------------------|-----------------------------------------|
-| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'glusterfs'
-| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters
-| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties
-| openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above
-| openshift_storage_glusterfs_registry_heketi_user_key | auto-generated | Separate from the above
+| Name | Default value | Description |
+|-----------------------------------------------------------|-----------------------|-----------------------------------------|
+| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'glusterfs'
+| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters
+| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties
+| openshift_storage_glusterfs_registry_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default
+| openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above
+| openshift_storage_glusterfs_registry_heketi_user_key | auto-generated | Separate from the above
Additionally, this role's behavior responds to several registry-specific variables in the [openshift_hosted role](../openshift_hosted/README.md):
@@ -147,7 +149,6 @@ Dependencies
------------
* os_firewall
-* openshift_hosted_facts
* openshift_repos
* lib_openshift
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
index b7b3c0db2..da34fab2a 100644
--- a/roles/openshift_storage_glusterfs/defaults/main.yml
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -5,6 +5,7 @@ openshift_storage_glusterfs_name: 'storage'
openshift_storage_glusterfs_nodeselector: "glusterfs={{ openshift_storage_glusterfs_name }}-host"
openshift_storage_glusterfs_use_default_selector: False
openshift_storage_glusterfs_storageclass: True
+openshift_storage_glusterfs_storageclass_default: False
openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}"
openshift_storage_glusterfs_version: 'latest'
openshift_storage_glusterfs_block_deploy: True
@@ -51,6 +52,7 @@ openshift_storage_glusterfs_registry_name: 'registry'
openshift_storage_glusterfs_registry_nodeselector: "glusterfs={{ openshift_storage_glusterfs_registry_name }}-host"
openshift_storage_glusterfs_registry_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}"
openshift_storage_glusterfs_registry_storageclass: False
+openshift_storage_glusterfs_registry_storageclass_default: False
openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}"
openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}"
openshift_storage_glusterfs_registry_block_deploy: "{{ openshift_storage_glusterfs_block_deploy }}"
diff --git a/roles/openshift_storage_glusterfs/meta/main.yml b/roles/openshift_storage_glusterfs/meta/main.yml
index 0cdd33880..6a4ef942b 100644
--- a/roles/openshift_storage_glusterfs/meta/main.yml
+++ b/roles/openshift_storage_glusterfs/meta/main.yml
@@ -10,6 +10,6 @@ galaxy_info:
versions:
- 7
dependencies:
-- role: openshift_hosted_facts
+- role: openshift_facts
- role: lib_openshift
- role: lib_os_firewall
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
index 73b9791eb..2ea7286f3 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
@@ -7,6 +7,7 @@
glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | default(['storagenode', openshift_storage_glusterfs_name] | join('=')) | map_from_pairs }}"
glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}"
glusterfs_storageclass: "{{ openshift_storage_glusterfs_storageclass }}"
+ glusterfs_storageclass_default: "{{ openshift_storage_glusterfs_storageclass_default | bool }}"
glusterfs_image: "{{ openshift_storage_glusterfs_image }}"
glusterfs_version: "{{ openshift_storage_glusterfs_version }}"
glusterfs_block_deploy: "{{ openshift_storage_glusterfs_block_deploy | bool }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
index 7466702b8..b7cff6514 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -7,6 +7,7 @@
glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | default(['storagenode', openshift_storage_glusterfs_registry_name] | join('=')) | map_from_pairs }}"
glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_registry_use_default_selector }}"
glusterfs_storageclass: "{{ openshift_storage_glusterfs_registry_storageclass }}"
+ glusterfs_storageclass_default: "{{ openshift_storage_glusterfs_registry_storageclass_default | bool }}"
glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}"
glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}"
glusterfs_block_deploy: "{{ openshift_storage_glusterfs_registry_block_deploy | bool }}"
diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2
index 454e84aaf..5ea17428f 100644
--- a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2
+++ b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2
@@ -3,6 +3,10 @@ apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
name: glusterfs-{{ glusterfs_name }}
+{% if glusterfs_storageclass_default is defined and glusterfs_storageclass_default %}
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+{% endif %}
provisioner: kubernetes.io/glusterfs
parameters:
resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
index 095fb780f..ca87807fe 100644
--- a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
@@ -3,6 +3,10 @@ apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: glusterfs-{{ glusterfs_name }}
+{% if glusterfs_storageclass_default is defined and glusterfs_storageclass_default %}
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+{% endif %}
provisioner: kubernetes.io/glusterfs
parameters:
resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2
index 095fb780f..ca87807fe 100644
--- a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2
@@ -3,6 +3,10 @@ apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: glusterfs-{{ glusterfs_name }}
+{% if glusterfs_storageclass_default is defined and glusterfs_storageclass_default %}
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+{% endif %}
provisioner: kubernetes.io/glusterfs
parameters:
resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2
index 095fb780f..ca87807fe 100644
--- a/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2
@@ -3,6 +3,10 @@ apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: glusterfs-{{ glusterfs_name }}
+{% if glusterfs_storageclass_default is defined and glusterfs_storageclass_default %}
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+{% endif %}
provisioner: kubernetes.io/glusterfs
parameters:
resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
diff --git a/roles/openshift_storage_nfs/meta/main.yml b/roles/openshift_storage_nfs/meta/main.yml
index 98f7c317e..d61e6873a 100644
--- a/roles/openshift_storage_nfs/meta/main.yml
+++ b/roles/openshift_storage_nfs/meta/main.yml
@@ -11,4 +11,4 @@ galaxy_info:
- 7
dependencies:
- role: lib_os_firewall
-- role: openshift_hosted_facts
+- role: openshift_facts
diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml
deleted file mode 100644
index 8acdfb969..000000000
--- a/roles/rhel_subscribe/tasks/enterprise.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- set_fact:
- openshift_release: "{{ openshift_release[1:] }}"
- when:
- - openshift_release is defined
- - openshift_release[0] == 'v'
-
-- name: Disable all repositories
- command: subscription-manager repos --disable="*"
-
-- name: Enable RHEL repositories
- command: subscription-manager repos \
- --enable="rhel-7-server-rpms" \
- --enable="rhel-7-server-extras-rpms" \
- --enable="rhel-7-server-ose-{{ (openshift_release | default('')).split('.')[0:2] | join('.') }}-rpms" \
- --enable="rhel-7-fast-datapath-rpms"
- register: subscribe_repos
- until: subscribe_repos | succeeded
diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml
index 3466b7e44..6337cd644 100644
--- a/roles/rhel_subscribe/tasks/main.yml
+++ b/roles/rhel_subscribe/tasks/main.yml
@@ -1,31 +1,8 @@
---
-# TODO: Enhance redhat_subscription module
-# to make it able to attach to a pool
-# to make it able to enable repositories
-
- fail:
msg: "This role is only supported for Red Hat hosts"
when: ansible_distribution != 'RedHat'
-- fail:
- msg: The rhsub_user variable is required for this role.
- when: rhsub_user is not defined or not rhsub_user
-
-- fail:
- msg: The rhsub_pass variable is required for this role.
- when: rhsub_pass is not defined or not rhsub_pass
-
-- name: Detecting Atomic Host Operating System
- stat:
- path: /run/ostree-booted
- register: ostree_booted
-
-- name: Satellite preparation
- command: "rpm -Uvh http://{{ rhsub_server }}/pub/katello-ca-consumer-latest.noarch.rpm"
- args:
- creates: /etc/rhsm/ca/katello-server-ca.pem
- when: rhsub_server is defined and rhsub_server
-
- name: Install Red Hat Subscription manager
yum:
name: subscription-manager
@@ -33,36 +10,39 @@
register: result
until: result | success
-- name: RedHat subscriptions
+- name: Is host already registered?
+ command: "subscription-manager version"
+ register: rh_subscribed
+ changed_when: False
+
+- name: Register host
redhat_subscription:
username: "{{ rhsub_user }}"
password: "{{ rhsub_pass }}"
register: rh_subscription
until: rh_subscription | succeeded
+ when:
+ - "'not registered' in rh_subscribed.stdout"
-- name: Retrieve the OpenShift Pool ID
- command: subscription-manager list --available --matches="{{ rhsub_pool }}" --pool-only
- register: openshift_pool_id
- until: openshift_pool_id | succeeded
- changed_when: False
+- fail:
+ msg: 'Unable to register host with Red Hat Subscription Manager'
+ when:
+ - "'not registered' in rh_subscribed.stdout"
+ - rh_subscription.failed
- name: Determine if OpenShift Pool Already Attached
- command: subscription-manager list --consumed --matches="{{ rhsub_pool }}" --pool-only
+ command: "subscription-manager list --consumed --pool-only --matches '*OpenShift*'"
register: openshift_pool_attached
- until: openshift_pool_attached | succeeded
changed_when: False
- when: openshift_pool_id.stdout == ''
-
-- fail:
- msg: "Unable to find pool matching {{ rhsub_pool }} in available or consumed pools"
- when: openshift_pool_id.stdout == '' and openshift_pool_attached is defined and openshift_pool_attached.stdout == ''
+ ignore_errors: yes
- name: Attach to OpenShift Pool
- command: subscription-manager attach --pool {{ openshift_pool_id.stdout_lines[0] }}
- register: subscribe_pool
- until: subscribe_pool | succeeded
- when: openshift_pool_id.stdout != ''
+ command: "subscription-manager attach --pool {{ rhsub_pool }}"
+ register: openshift_pool_attached
+ changed_when: "'Successfully attached a subscription' in openshift_pool_attached.stdout"
+ when: rhsub_pool not in openshift_pool_attached.stdout
-- include_tasks: enterprise.yml
+- include_tasks: satellite.yml
when:
- - not ostree_booted.stat.exists | bool
+ - rhsub_server is defined
+ - rhsub_server
diff --git a/roles/rhel_subscribe/tasks/satellite.yml b/roles/rhel_subscribe/tasks/satellite.yml
new file mode 100644
index 000000000..dadbe3487
--- /dev/null
+++ b/roles/rhel_subscribe/tasks/satellite.yml
@@ -0,0 +1,5 @@
+---
+- name: Satellite preparation
+ command: "rpm -Uvh http://{{ rhsub_server }}/pub/katello-ca-consumer-latest.noarch.rpm"
+ args:
+ creates: /etc/rhsm/ca/katello-server-ca.pem
diff --git a/roles/template_service_broker/defaults/main.yml b/roles/template_service_broker/defaults/main.yml
index 421b4ecf9..c32872d24 100644
--- a/roles/template_service_broker/defaults/main.yml
+++ b/roles/template_service_broker/defaults/main.yml
@@ -3,3 +3,4 @@
template_service_broker_remove: False
template_service_broker_install: True
openshift_template_service_broker_namespaces: ['openshift']
+template_service_broker_selector: { "region": "infra" }
diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml
index 99a58baff..1253c1133 100644
--- a/roles/template_service_broker/tasks/install.yml
+++ b/roles/template_service_broker/tasks/install.yml
@@ -15,6 +15,8 @@
- oc_project:
name: openshift-template-service-broker
state: present
+ node_selector:
+ - ""
- command: mktemp -d /tmp/tsb-ansible-XXXXXX
register: mktemp
@@ -45,6 +47,7 @@
{{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}"
--param API_SERVER_CONFIG="{{ config['content'] | b64decode }}"
--param IMAGE="{{ template_service_broker_prefix }}{{ template_service_broker_image_name }}:{{ template_service_broker_version }}"
+ --param NODE_SELECTOR={{ template_service_broker_selector | to_json | quote }}
| {{ openshift.common.client_binary }} apply -f -
# reconcile with rbac