diff options
Diffstat (limited to 'playbooks/common')
42 files changed, 1609 insertions, 247 deletions
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 239bb211b..c320b80ed 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -3,9 +3,15 @@ tags: - always -- include: disable_excluder.yml +- name: Disable excluders + hosts: oo_masters_to_config:oo_nodes_to_config tags: - always + gather_facts: no + tasks: + - include_role: + name: openshift_excluder + tasks_from: disable - include: ../openshift-etcd/config.yml tags: @@ -39,6 +45,12 @@ tags: - hosted -- include: reset_excluder.yml +- name: Re-enable excluder if it was previously enabled + hosts: oo_masters_to_config:oo_nodes_to_config tags: - always + gather_facts: no + tasks: + - include_role: + name: openshift_excluder + tasks_from: enable diff --git a/playbooks/common/openshift-cluster/disable_excluder.yml b/playbooks/common/openshift-cluster/disable_excluder.yml deleted file mode 100644 index f664c51c9..000000000 --- a/playbooks/common/openshift-cluster/disable_excluder.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -- name: Disable excluders - hosts: oo_masters_to_config:oo_nodes_to_config - gather_facts: no - tasks: - - # During installation the excluders are installed with present state. - # So no pre-validation check here as the excluders are either to be installed (present = latest) - # or they are not going to be updated if already installed - - # disable excluders based on their status - - include_role: - name: openshift_excluder - tasks_from: disable - vars: - openshift_excluder_package_state: present - docker_excluder_package_state: present diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml index 6aac70f63..46932b27f 100644 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -5,33 +5,40 @@ become: no gather_facts: no tasks: - - fail: + - name: Evaluate groups - g_etcd_hosts required + fail: msg: This playbook requires g_etcd_hosts to be set - when: "{{ g_etcd_hosts is not defined }}" + when: g_etcd_hosts is not defined - - fail: + - name: Evaluate groups - g_master_hosts or g_new_master_hosts required + fail: msg: This playbook requires g_master_hosts or g_new_master_hosts to be set - when: "{{ g_master_hosts is not defined and g_new_master_hosts is not defined }}" + when: g_master_hosts is not defined or g_new_master_hosts is not defined - - fail: + - name: Evaluate groups - g_node_hosts or g_new_node_hosts required + fail: msg: This playbook requires g_node_hosts or g_new_node_hosts to be set - when: "{{ g_node_hosts is not defined and g_new_node_hosts is not defined }}" + when: g_node_hosts is not defined or g_new_node_hosts is not defined - - fail: + - name: Evaluate groups - g_lb_hosts required + fail: msg: This playbook requires g_lb_hosts to be set - when: "{{ g_lb_hosts is not defined }}" + when: g_lb_hosts is not defined - - fail: + - name: Evaluate groups - g_nfs_hosts required + fail: msg: This playbook requires g_nfs_hosts to be set - when: "{{ g_nfs_hosts is not defined }}" + when: g_nfs_hosts is not defined - - fail: + - name: Evaluate groups - g_nfs_hosts is single host + fail: msg: The nfs group must be limited to one host - when: "{{ (groups[g_nfs_hosts] | default([])) | length > 1 }}" + when: (groups[g_nfs_hosts] | default([])) | length > 1 - - fail: + - name: Evaluate groups - g_glusterfs_hosts required + fail: msg: This playbook requires g_glusterfs_hosts to be set - when: "{{ g_glusterfs_hosts is not defined }}" + when: g_glusterfs_hosts is not defined - name: Evaluate oo_all_hosts add_host: @@ -51,13 +58,13 @@ with_items: "{{ g_master_hosts | union(g_new_master_hosts) | default([]) }}" changed_when: no - - name: Evaluate oo_etcd_to_config + - name: Evaluate oo_first_master add_host: - name: "{{ item }}" - groups: oo_etcd_to_config + name: "{{ g_master_hosts[0] }}" + groups: oo_first_master ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_etcd_hosts | default([]) }}" + when: g_master_hosts|length > 0 changed_when: no - name: Evaluate oo_masters_to_config @@ -69,41 +76,59 @@ with_items: "{{ g_new_master_hosts | default(g_master_hosts | default([], true), true) }}" changed_when: no - - name: Evaluate oo_nodes_to_config + - name: Evaluate oo_etcd_to_config add_host: name: "{{ item }}" - groups: oo_nodes_to_config + groups: oo_etcd_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}" + with_items: "{{ g_etcd_hosts | default([]) }}" changed_when: no - # Skip adding the master to oo_nodes_to_config when g_new_node_hosts is - - name: Add master to oo_nodes_to_config + - name: Evaluate oo_first_etcd add_host: - name: "{{ item }}" - groups: oo_nodes_to_config + name: "{{ g_etcd_hosts[0] }}" + groups: oo_first_etcd ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_master_hosts | default([]) }}" - when: "{{ g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool }}" + when: g_etcd_hosts|length > 0 changed_when: no - - name: Evaluate oo_first_etcd + # We use two groups one for hosts we're upgrading which doesn't include embedded etcd + # The other for backing up which includes the embedded etcd host, there's no need to + # upgrade embedded etcd that just happens when the master is updated. + - name: Evaluate oo_etcd_hosts_to_upgrade add_host: - name: "{{ g_etcd_hosts[0] }}" - groups: oo_first_etcd + name: "{{ item }}" + groups: oo_etcd_hosts_to_upgrade + with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else [] }}" + changed_when: False + + - name: Evaluate oo_etcd_hosts_to_backup + add_host: + name: "{{ item }}" + groups: oo_etcd_hosts_to_backup + with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}" + changed_when: False + + - name: Evaluate oo_nodes_to_config + add_host: + name: "{{ item }}" + groups: oo_nodes_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - when: "{{ g_etcd_hosts|length > 0 }}" + ansible_become: "{{ g_sudo | default(omit) }}" + with_items: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}" changed_when: no - - name: Evaluate oo_first_master + # Skip adding the master to oo_nodes_to_config when g_new_node_hosts is + - name: Add master to oo_nodes_to_config add_host: - name: "{{ g_master_hosts[0] }}" - groups: oo_first_master + name: "{{ item }}" + groups: oo_nodes_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" - when: "{{ g_master_hosts|length > 0 }}" + with_items: "{{ g_master_hosts | default([]) }}" + when: g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool changed_when: no - name: Evaluate oo_lb_to_config @@ -130,5 +155,5 @@ groups: oo_glusterfs_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_glusterfs_hosts | default([]) }}" + with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts) | default([]) }}" changed_when: no diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml index 07b38920f..f4e52869e 100644 --- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml +++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml @@ -1,13 +1,14 @@ --- # NOTE: requires openshift_facts be run - name: Verify compatible yum/subscription-manager combination - hosts: l_oo_all_hosts + hosts: oo_all_hosts gather_facts: no tasks: # See: # https://bugzilla.redhat.com/show_bug.cgi?id=1395047 # https://bugzilla.redhat.com/show_bug.cgi?id=1282961 # https://github.com/openshift/openshift-ansible/issues/1138 + # Consider the repoquery module for this work - name: Check for bad combinations of yum and subscription-manager command: > {{ repoquery_cmd }} --installed --qf '%{version}' "yum" @@ -16,7 +17,7 @@ when: not openshift.common.is_atomic | bool - fail: msg: Incompatible versions of yum and subscription-manager found. You may need to update yum and yum-utils. - when: "not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout" + when: not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout - name: Determine openshift_version to configure on first master hosts: oo_first_master diff --git a/playbooks/common/openshift-cluster/reset_excluder.yml b/playbooks/common/openshift-cluster/reset_excluder.yml deleted file mode 100644 index eaa8ce39c..000000000 --- a/playbooks/common/openshift-cluster/reset_excluder.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Re-enable excluder if it was previously enabled - hosts: oo_masters_to_config:oo_nodes_to_config - gather_facts: no - tasks: - - include_role: - name: openshift_excluder - tasks_from: enable diff --git a/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml b/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml deleted file mode 100644 index a30952929..000000000 --- a/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -- name: Record excluder state and disable - hosts: oo_masters_to_config:oo_nodes_to_config - gather_facts: no - tasks: - - include: pre/validate_excluder.yml - vars: - excluder: "{{ openshift.common.service_type }}-docker-excluder" - when: enable_docker_excluder | default(enable_excluders) | default(True) | bool - - include: pre/validate_excluder.yml - vars: - excluder: "{{ openshift.common.service_type }}-excluder" - when: enable_openshift_excluder | default(enable_excluders) | default(True) | bool - - - # disable excluders based on their status - - include_role: - name: openshift_excluder - tasks_from: disable - vars: - openshift_excluder_package_state: latest - docker_excluder_package_state: latest diff --git a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml new file mode 100644 index 000000000..35da3b6c3 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml @@ -0,0 +1,14 @@ +--- +- name: Disable excluders + hosts: oo_masters_to_config + gather_facts: no + tasks: + - include_role: + name: openshift_excluder + tasks_from: verify_upgrade + - include_role: + name: openshift_excluder + tasks_from: disable + vars: + openshift_excluder_package_state: latest + docker_excluder_package_state: latest diff --git a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml new file mode 100644 index 000000000..847c22085 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml @@ -0,0 +1,14 @@ +--- +- name: Disable excluders + hosts: oo_nodes_to_config + gather_facts: no + tasks: + - include_role: + name: openshift_excluder + tasks_from: verify_upgrade + - include_role: + name: openshift_excluder + tasks_from: disable + vars: + openshift_excluder_package_state: latest + docker_excluder_package_state: latest diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml index fb51a0061..9d0333ca8 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml @@ -1,6 +1,6 @@ --- - name: Backup etcd - hosts: etcd_hosts_to_backup + hosts: oo_etcd_hosts_to_backup vars: embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" etcdctl_command: "{{ 'etcdctl' if not openshift.common.is_containerized or embedded_etcd else 'docker exec etcd_container etcdctl' if not openshift.common.is_etcd_system_container else 'runc exec etcd etcdctl' }}" @@ -87,10 +87,10 @@ tasks: - set_fact: etcd_backup_completed: "{{ hostvars - | oo_select_keys(groups.etcd_hosts_to_backup) + | oo_select_keys(groups.oo_etcd_hosts_to_backup) | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}" - set_fact: - etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}" + etcd_backup_failed: "{{ groups.oo_etcd_hosts_to_backup | difference(etcd_backup_completed) }}" - fail: msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}" when: etcd_backup_failed | length > 0 diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh b/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh deleted file mode 120000 index 641e04e44..000000000 --- a/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh +++ /dev/null @@ -1 +0,0 @@ -../roles/etcd/files/etcdctl.sh
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml index fa86d29fb..d9b59edcb 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml @@ -5,32 +5,6 @@ # mirrored packages on your own because only the GA and latest versions are # available in the repos. So for Fedora we'll simply skip this, sorry. -- include: ../../evaluate_groups.yml - tags: - - always - -# We use two groups one for hosts we're upgrading which doesn't include embedded etcd -# The other for backing up which includes the embedded etcd host, there's no need to -# upgrade embedded etcd that just happens when the master is updated. -- name: Evaluate additional groups for etcd - hosts: localhost - connection: local - become: no - tasks: - - name: Evaluate etcd_hosts_to_upgrade - add_host: - name: "{{ item }}" - groups: etcd_hosts_to_upgrade - with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else [] }}" - changed_when: False - - - name: Evaluate etcd_hosts_to_backup - add_host: - name: "{{ item }}" - groups: etcd_hosts_to_backup - with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}" - changed_when: False - - name: Backup etcd before upgrading anything include: backup.yml vars: @@ -38,9 +12,11 @@ when: openshift_etcd_backup | default(true) | bool - name: Drop etcdctl profiles - hosts: etcd_hosts_to_upgrade + hosts: oo_etcd_hosts_to_upgrade tasks: - - include: roles/etcd/tasks/etcdctl.yml + - include_role: + name: etcd_common + tasks_from: etcdctl.yml - name: Perform etcd upgrade include: ./upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml index a9b5b94e6..45e301315 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml @@ -1,6 +1,6 @@ --- - name: Determine etcd version - hosts: etcd_hosts_to_upgrade + hosts: oo_etcd_hosts_to_upgrade tasks: - name: Record RPM based etcd version command: rpm -qa --qf '%{version}' etcd\* @@ -43,7 +43,7 @@ # I really dislike this copy/pasta but I wasn't able to find a way to get it to loop # through hosts, then loop through tasks only when appropriate - name: Upgrade to 2.1 - hosts: etcd_hosts_to_upgrade + hosts: oo_etcd_hosts_to_upgrade serial: 1 vars: upgrade_version: '2.1' @@ -52,7 +52,7 @@ when: etcd_rpm_version.stdout | default('99') | version_compare('2.1','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool - name: Upgrade RPM hosts to 2.2 - hosts: etcd_hosts_to_upgrade + hosts: oo_etcd_hosts_to_upgrade serial: 1 vars: upgrade_version: '2.2' @@ -61,7 +61,7 @@ when: etcd_rpm_version.stdout | default('99') | version_compare('2.2','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool - name: Upgrade containerized hosts to 2.2.5 - hosts: etcd_hosts_to_upgrade + hosts: oo_etcd_hosts_to_upgrade serial: 1 vars: upgrade_version: 2.2.5 @@ -70,7 +70,7 @@ when: etcd_container_version.stdout | default('99') | version_compare('2.2','<') and openshift.common.is_containerized | bool - name: Upgrade RPM hosts to 2.3 - hosts: etcd_hosts_to_upgrade + hosts: oo_etcd_hosts_to_upgrade serial: 1 vars: upgrade_version: '2.3' @@ -79,7 +79,7 @@ when: etcd_rpm_version.stdout | default('99') | version_compare('2.3','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool - name: Upgrade containerized hosts to 2.3.7 - hosts: etcd_hosts_to_upgrade + hosts: oo_etcd_hosts_to_upgrade serial: 1 vars: upgrade_version: 2.3.7 @@ -88,7 +88,7 @@ when: etcd_container_version.stdout | default('99') | version_compare('2.3','<') and openshift.common.is_containerized | bool - name: Upgrade RPM hosts to 3.0 - hosts: etcd_hosts_to_upgrade + hosts: oo_etcd_hosts_to_upgrade serial: 1 vars: upgrade_version: '3.0' @@ -97,7 +97,7 @@ when: etcd_rpm_version.stdout | default('99') | version_compare('3.0','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool - name: Upgrade containerized hosts to etcd3 image - hosts: etcd_hosts_to_upgrade + hosts: oo_etcd_hosts_to_upgrade serial: 1 vars: upgrade_version: 3.0.15 @@ -106,7 +106,7 @@ when: etcd_container_version.stdout | default('99') | version_compare('3.0','<') and openshift.common.is_containerized | bool - name: Upgrade fedora to latest - hosts: etcd_hosts_to_upgrade + hosts: oo_etcd_hosts_to_upgrade serial: 1 tasks: - include: fedora_tasks.yml diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml index cbf6d58b3..0f421928b 100644 --- a/playbooks/common/openshift-cluster/upgrades/init.yml +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -10,17 +10,6 @@ - include: ../initialize_facts.yml -- name: Ensure clean repo cache in the event repos have been changed manually - hosts: oo_all_hosts - tags: - - pre_upgrade - tasks: - - name: Clean package cache - command: "{{ ansible_pkg_mgr }} clean all" - when: not openshift.common.is_atomic | bool - args: - warn: no - - name: Ensure firewall is not switched during upgrade hosts: oo_all_hosts tasks: diff --git a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py index 673f11889..4eac8b067 100755 --- a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py +++ b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py @@ -1,7 +1,5 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4 - """Ansible module for modifying OpenShift configs during an upgrade""" import os diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index 0d7cdb227..fff199f42 100644 --- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -97,6 +97,12 @@ - not grep_plugin_order_override | skipped - grep_plugin_order_override.rc == 0 -- include: ../reset_excluder.yml +- name: Re-enable excluder if it was previously enabled + hosts: oo_masters_to_config tags: - always + gather_facts: no + tasks: + - include_role: + name: openshift_excluder + tasks_from: enable diff --git a/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml b/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml deleted file mode 100644 index 6de1ed061..000000000 --- a/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -# input variables: -# - repoquery_cmd -# - excluder -# - openshift_upgrade_target -- block: - - name: Get available excluder version - command: > - {{ repoquery_cmd }} --qf '%{version}' "{{ excluder }}" - register: excluder_version - failed_when: false - changed_when: false - - - name: Docker excluder version detected - debug: - msg: "{{ excluder }}: {{ excluder_version.stdout }}" - - - name: Printing upgrade target version - debug: - msg: "{{ openshift_upgrade_target }}" - - - name: Check the available {{ excluder }} version is at most of the upgrade target version - fail: - msg: "Available {{ excluder }} version {{ excluder_version.stdout }} is higher than the upgrade target version" - when: - - "{{ excluder_version.stdout != '' }}" - - "{{ excluder_version.stdout.split('.')[0:2] | join('.') | version_compare(openshift_upgrade_target.split('.')[0:2] | join('.'), '>', strict=True) }}" - when: - - not openshift.common.is_atomic | bool diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml index c83923dae..6a9f88707 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -1,21 +1,13 @@ --- - name: Verify upgrade targets hosts: oo_masters_to_config:oo_nodes_to_upgrade - vars: - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" - pre_tasks: - - fail: + + tasks: + - name: Fail when OpenShift is not installed + fail: msg: Verify OpenShift is already installed when: openshift.common.version is not defined - - fail: - msg: Verify the correct version was found - when: verify_upgrade_version is defined and openshift_version != verify_upgrade_version - - - set_fact: - g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}" - when: not openshift.common.is_containerized | bool - - name: Verify containers are available for upgrade command: > docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }} @@ -23,19 +15,31 @@ changed_when: "'Downloaded newer image' in pull_result.stdout" when: openshift.common.is_containerized | bool - - name: Check latest available OpenShift RPM version - command: > - {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}" - failed_when: false - changed_when: false - register: avail_openshift_version - when: not openshift.common.is_containerized | bool + - when: not openshift.common.is_containerized | bool + block: + - name: Check latest available OpenShift RPM version + command: > + {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}" + failed_when: false + changed_when: false + register: avail_openshift_version - - name: Verify OpenShift RPMs are available for upgrade - fail: - msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but {{ openshift_upgrade_target }} or greater is required" - when: not openshift.common.is_containerized | bool and not avail_openshift_version | skipped and avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<') + - name: Fail when unable to determine available OpenShift RPM version + fail: + msg: "Unable to determine available OpenShift RPM version" + when: + - avail_openshift_version.stdout == '' - - fail: + - name: Verify OpenShift RPMs are available for upgrade + fail: + msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but {{ openshift_upgrade_target }} or greater is required" + when: + - not avail_openshift_version | skipped + - avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<') + + - name: Fail when openshift version does not meet minium requirement for Origin upgrade + fail: msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later" - when: deployment_type == 'origin' and openshift.common.version | version_compare(openshift_upgrade_min,'<') + when: + - deployment_type == 'origin' + - openshift.common.version | version_compare(openshift_upgrade_min,'<') diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml index 03ac02e9f..164baca81 100644 --- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml @@ -1,27 +1,39 @@ --- -# We verified latest rpm available is suitable, so just yum update. +# When we update package "a-${version}" and a requires b >= ${version} if we +# don't specify the version of b yum will choose the latest version of b +# available and the whole set of dependencies end up at the latest version. +# Since the package module, unlike the yum module, doesn't flatten a list +# of packages into one transaction we need to do that explicitly. The ansible +# core team tells us not to rely on yum module transaction flattening anyway. + +# TODO: If the sdn package isn't already installed this will install it, we +# should fix that -# Master package upgrade ends up depending on node and sdn packages, we need to be explicit -# with all versions to avoid yum from accidentally jumping to something newer than intended: - name: Upgrade master packages - package: name={{ item }} state=present - when: component == "master" - with_items: - - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" + package: name={{ master_pkgs | join(',') }} state=present + vars: + master_pkgs: + - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version}}" + - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" + - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" + - PyYAML + when: + - component == "master" + - not openshift.common.is_atomic | bool - name: Upgrade node packages - package: name={{ item }} state=present - when: component == "node" - with_items: - - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" - -- name: Ensure python-yaml present for config upgrade - package: name=PyYAML state=present - when: not openshift.common.is_atomic | bool + package: name={{ node_pkgs | join(',') }} state=present + vars: + node_pkgs: + - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" + - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" + - PyYAML + when: + - component == "node" + - not openshift.common.is_atomic | bool diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index c6e799261..0ad934d2d 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -2,17 +2,6 @@ ############################################################################### # Upgrade Masters ############################################################################### -- name: Evaluate additional groups for upgrade - hosts: localhost - connection: local - become: no - tasks: - - name: Evaluate etcd_hosts_to_backup - add_host: - name: "{{ item }}" - groups: etcd_hosts_to_backup - with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}" - changed_when: False # If facts cache were for some reason deleted, this fact may not be set, and if not set # it will always default to true. This causes problems for the etcd data dir fact detection diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index e9f894942..2a5ac0aef 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -47,6 +47,12 @@ until: node_schedulable|succeeded when: node_unschedulable|changed -- include: ../reset_excluder.yml +- name: Re-enable excluder if it was previously enabled + hosts: oo_nodes_to_config tags: - always + gather_facts: no + tasks: + - include_role: + name: openshift_excluder + tasks_from: enable diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml index 88f2ddc78..83d2cec81 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml @@ -63,12 +63,12 @@ - block: - debug: msg: "WARNING: openshift_master_scheduler_predicates is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_predicates }}" - when: "{{ openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region] }}" + when: openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region] - debug: msg: "WARNING: openshift_master_scheduler_predicates does not match current defaults of: {{ openshift_master_scheduler_default_predicates }}" - when: "{{ openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates }}" - when: "{{ openshift_master_scheduler_predicates | default(none) is not none }}" + when: openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates + when: openshift_master_scheduler_predicates | default(none) is not none # Handle cases where openshift_master_predicates is not defined - block: @@ -87,7 +87,7 @@ when: "{{ openshift_master_scheduler_current_predicates != default_predicates_no_region and openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region] }}" - when: "{{ openshift_master_scheduler_predicates | default(none) is none }}" + when: openshift_master_scheduler_predicates | default(none) is none # Upgrade priorities @@ -120,12 +120,12 @@ - block: - debug: msg: "WARNING: openshift_master_scheduler_priorities is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_priorities }}" - when: "{{ openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone] }}" + when: openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone] - debug: msg: "WARNING: openshift_master_scheduler_priorities does not match current defaults of: {{ openshift_master_scheduler_default_priorities }}" - when: "{{ openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities }}" - when: "{{ openshift_master_scheduler_priorities | default(none) is not none }}" + when: openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities + when: openshift_master_scheduler_priorities | default(none) is not none # Handle cases where openshift_master_priorities is not defined - block: @@ -144,7 +144,7 @@ when: "{{ openshift_master_scheduler_current_priorities != default_priorities_no_zone and openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone] }}" - when: "{{ openshift_master_scheduler_priorities | default(none) is none }}" + when: openshift_master_scheduler_priorities | default(none) is none # Update scheduler diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml index 68c71a132..d69472fad 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml @@ -53,7 +53,7 @@ dest: "{{ openshift.common.config_base}}/master/master-config.yaml" yaml_key: 'admissionConfig.pluginConfig' yaml_value: "{{ openshift.master.admission_plugin_config }}" - when: "{{ 'admission_plugin_config' in openshift.master }}" + when: "'admission_plugin_config' in openshift.master" - modify_yaml: dest: "{{ openshift.common.config_base}}/master/master-config.yaml" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/roles b/playbooks/common/openshift-cluster/upgrades/v3_3/roles new file mode 120000 index 000000000..6bc1a7aef --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/roles @@ -0,0 +1 @@ +../../../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml new file mode 100644 index 000000000..d81a13ef2 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml @@ -0,0 +1,111 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" + +# Pre-upgrade + +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos and initialize facts on all hosts + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_3/master_config_upgrade.yml" + +- include: ../upgrade_nodes.yml + vars: + node_config_hook: "v3_3/node_config_upgrade.yml" + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml new file mode 100644 index 000000000..8a692d02b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml @@ -0,0 +1,111 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on control plane hosts + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_3/master_config_upgrade.yml" + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml new file mode 100644 index 000000000..2d30bba94 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml @@ -0,0 +1,106 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on nodes + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + tags: + - pre_upgrade + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- name: Verify masters are already upgraded + hosts: oo_masters_to_config + tags: + - pre_upgrade + tasks: + - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." + when: openshift.common.version != openshift_version + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_nodes_to_upgrade + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_nodes.yml + vars: + node_config_hook: "v3_3/node_config_upgrade.yml" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml index 43c2ffcd4..ed89dbe8d 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml @@ -3,7 +3,7 @@ dest: "{{ openshift.common.config_base}}/master/master-config.yaml" yaml_key: 'admissionConfig.pluginConfig' yaml_value: "{{ openshift.master.admission_plugin_config }}" - when: "{{ 'admission_plugin_config' in openshift.master }}" + when: "'admission_plugin_config' in openshift.master" - modify_yaml: dest: "{{ openshift.common.config_base}}/master/master-config.yaml" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/roles b/playbooks/common/openshift-cluster/upgrades/v3_4/roles new file mode 120000 index 000000000..6bc1a7aef --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/roles @@ -0,0 +1 @@ +../../../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml new file mode 100644 index 000000000..e9ff47f32 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml @@ -0,0 +1,109 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + +# Pre-upgrade + +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos and initialize facts on all hosts + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_4/master_config_upgrade.yml" + +- include: ../upgrade_nodes.yml + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml new file mode 100644 index 000000000..d4ae8d8b4 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml @@ -0,0 +1,111 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on control plane hosts + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_4/master_config_upgrade.yml" + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml new file mode 100644 index 000000000..ae205b172 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml @@ -0,0 +1,104 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on nodes + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + tags: + - pre_upgrade + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- name: Verify masters are already upgraded + hosts: oo_masters_to_config + tags: + - pre_upgrade + tasks: + - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." + when: openshift.common.version != openshift_version + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_nodes_to_upgrade + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml new file mode 100644 index 000000000..1269634d1 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml @@ -0,0 +1,113 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + +# Pre-upgrade + +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos and initialize facts on all hosts + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: validator.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + +- include: ../upgrade_nodes.yml + +- include: ../post_control_plane.yml + +- include: storage_upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml new file mode 100644 index 000000000..21c075678 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml @@ -0,0 +1,115 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on control plane hosts + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: validator.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + +- include: ../post_control_plane.yml + +- include: storage_upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml new file mode 100644 index 000000000..e67e169fc --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml @@ -0,0 +1,104 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on nodes + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + tags: + - pre_upgrade + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- name: Verify masters are already upgraded + hosts: oo_masters_to_config + tags: + - pre_upgrade + tasks: + - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." + when: openshift.common.version != openshift_version + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_nodes_to_upgrade + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml new file mode 100644 index 000000000..a1b1f3301 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -0,0 +1,113 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.6' + openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + +# Pre-upgrade + +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos and initialize facts on all hosts + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: validator.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + +- include: ../upgrade_nodes.yml + +- include: ../post_control_plane.yml + +- include: storage_upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml new file mode 100644 index 000000000..af6e1f71b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -0,0 +1,115 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.6' + openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on control plane hosts + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: validator.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + +- include: ../post_control_plane.yml + +- include: storage_upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml new file mode 100644 index 000000000..285c18b7b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml @@ -0,0 +1,104 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.6' + openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on nodes + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + tags: + - pre_upgrade + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- name: Verify masters are already upgraded + hosts: oo_masters_to_config + tags: + - pre_upgrade + tasks: + - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." + when: openshift.common.version != openshift_version + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_nodes_to_upgrade + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml index 75faf5ba8..1efdfb336 100644 --- a/playbooks/common/openshift-glusterfs/config.yml +++ b/playbooks/common/openshift-glusterfs/config.yml @@ -12,7 +12,9 @@ - service: glusterfs_bricks port: "49152-49251/tcp" roles: - - os_firewall + - role: os_firewall + when: + - openshift_storage_glusterfs_is_native | default(True) - name: Configure GlusterFS hosts: oo_first_master diff --git a/playbooks/common/openshift-glusterfs/registry.yml b/playbooks/common/openshift-glusterfs/registry.yml new file mode 100644 index 000000000..80cf7529e --- /dev/null +++ b/playbooks/common/openshift-glusterfs/registry.yml @@ -0,0 +1,49 @@ +--- +- include: config.yml + +- name: Initialize GlusterFS registry PV and PVC vars + hosts: oo_first_master + tags: hosted + tasks: + - set_fact: + glusterfs_pv: [] + glusterfs_pvc: [] + + - set_fact: + glusterfs_pv: + - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-volume" + capacity: "{{ openshift.hosted.registry.storage.volume.size }}" + access_modes: "{{ openshift.hosted.registry.storage.access.modes }}" + storage: + glusterfs: + endpoints: "{{ openshift.hosted.registry.storage.glusterfs.endpoints }}" + path: "{{ openshift.hosted.registry.storage.glusterfs.path }}" + readOnly: "{{ openshift.hosted.registry.storage.glusterfs.readOnly }}" + glusterfs_pvc: + - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim" + capacity: "{{ openshift.hosted.registry.storage.volume.size }}" + access_modes: "{{ openshift.hosted.registry.storage.access.modes }}" + when: openshift.hosted.registry.storage.glusterfs.swap + +- name: Create persistent volumes + hosts: oo_first_master + tags: + - hosted + vars: + persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups, glusterfs_pv) }}" + persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims(glusterfs_pvc) }}" + roles: + - role: openshift_persistent_volumes + when: persistent_volumes | union(glusterfs_pv) | length > 0 or persistent_volume_claims | union(glusterfs_pvc) | length > 0 + +- name: Create Hosted Resources + hosts: oo_first_master + tags: + - hosted + pre_tasks: + - set_fact: + openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" + openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" + when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master" + roles: + - role: openshift_hosted diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml index 92f16dc47..49594d294 100644 --- a/playbooks/common/openshift-master/scaleup.yml +++ b/playbooks/common/openshift-master/scaleup.yml @@ -51,7 +51,7 @@ changed_when: false - name: Configure docker hosts - hosts: oo_masters_to-config:oo_nodes_to_config + hosts: oo_masters_to_config:oo_nodes_to_config vars: docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}" docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}" @@ -60,9 +60,15 @@ - openshift_facts - openshift_docker -- include: ../openshift-cluster/disable_excluder.yml +- name: Disable excluders + hosts: oo_masters_to_config tags: - always + gather_facts: no + tasks: + - include_role: + name: openshift_excluder + tasks_from: disable - include: ../openshift-master/config.yml @@ -70,6 +76,12 @@ - include: ../openshift-node/config.yml -- include: ../openshift-cluster/reset_excluder.yml +- name: Re-enable excluder if it was previously enabled + hosts: oo_masters_to_config tags: - always + gather_facts: no + tasks: + - include_role: + name: openshift_excluder + tasks_from: enable diff --git a/playbooks/common/openshift-node/network_manager.yml b/playbooks/common/openshift-node/network_manager.yml index be050c12c..0014a5dbd 100644 --- a/playbooks/common/openshift-node/network_manager.yml +++ b/playbooks/common/openshift-node/network_manager.yml @@ -1,6 +1,6 @@ --- - name: Install and configure NetworkManager - hosts: l_oo_all_hosts + hosts: oo_all_hosts become: yes tasks: - name: install NetworkManager diff --git a/playbooks/common/openshift-node/scaleup.yml b/playbooks/common/openshift-node/scaleup.yml index c31aca62b..d94df553c 100644 --- a/playbooks/common/openshift-node/scaleup.yml +++ b/playbooks/common/openshift-node/scaleup.yml @@ -27,12 +27,24 @@ - openshift_facts - openshift_docker -- include: ../openshift-cluster/disable_excluder.yml +- name: Disable excluders + hosts: oo_nodes_to_config tags: - always + gather_facts: no + tasks: + - include_role: + name: openshift_excluder + tasks_from: disable - include: ../openshift-node/config.yml -- include: ../openshift-cluster/reset_excluder.yml +- name: Re-enable excluder if it was previously enabled + hosts: oo_nodes_to_config tags: - always + gather_facts: no + tasks: + - include_role: + name: openshift_excluder + tasks_from: enable |