diff options
Diffstat (limited to 'playbooks/common')
89 files changed, 2659 insertions, 842 deletions
diff --git a/playbooks/common/openshift-cfme/config.yml b/playbooks/common/openshift-cfme/config.yml new file mode 100644 index 000000000..533a35d9e --- /dev/null +++ b/playbooks/common/openshift-cfme/config.yml @@ -0,0 +1,44 @@ +--- +# TODO: Make this work. The 'name' variable below is undefined +# presently because it's part of the cfme role. This play can't run +# until that's re-worked. +# +# - name: Pre-Pull manageiq-pods docker images +# hosts: nodes +# tasks: +# - name: Ensure the latest manageiq-pods docker image is pulling +# docker_image: +# name: "{{ openshift_cfme_container_image }}" +# # Fire-and-forget method, never timeout +# async: 99999999999 +# # F-a-f, never check on this. True 'background' task. +# poll: 0 + +- name: Configure Masters for CFME Bulk Image Imports + hosts: oo_masters_to_config + serial: 1 + tasks: + - name: Run master cfme tuning playbook + include_role: + name: openshift_cfme + tasks_from: tune_masters + +- name: Setup CFME + hosts: oo_first_master + vars: + r_openshift_cfme_miq_template_content: "{{ lookup('file', 'roles/openshift_cfme/files/miq-template.yaml') | from_yaml}}" + pre_tasks: + - name: Create a temporary place to evaluate the PV templates + command: mktemp -d /tmp/openshift-ansible-XXXXXXX + register: r_openshift_cfme_mktemp + changed_when: false + - name: Ensure the server template was read from disk + debug: + msg="{{ r_openshift_cfme_miq_template_content | from_yaml }}" + + tasks: + - name: Run the CFME Setup Role + include_role: + name: openshift_cfme + vars: + template_dir: "{{ hostvars[groups.masters.0].r_openshift_cfme_mktemp.stdout }}" diff --git a/playbooks/common/openshift-cfme/filter_plugins b/playbooks/common/openshift-cfme/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/common/openshift-cfme/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cfme/library b/playbooks/common/openshift-cfme/library new file mode 120000 index 000000000..ba40d2f56 --- /dev/null +++ b/playbooks/common/openshift-cfme/library @@ -0,0 +1 @@ +../../../library
\ No newline at end of file diff --git a/playbooks/common/openshift-cfme/roles b/playbooks/common/openshift-cfme/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/common/openshift-cfme/roles @@ -0,0 +1 @@ +../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cfme/uninstall.yml b/playbooks/common/openshift-cfme/uninstall.yml new file mode 100644 index 000000000..78b8e7668 --- /dev/null +++ b/playbooks/common/openshift-cfme/uninstall.yml @@ -0,0 +1,8 @@ +--- +- name: Uninstall CFME + hosts: masters + tasks: + - name: Run the CFME Uninstall Role Tasks + include_role: + name: openshift_cfme + tasks_from: uninstall diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/common/openshift-checks/health.yml new file mode 100644 index 000000000..c7766ff04 --- /dev/null +++ b/playbooks/common/openshift-checks/health.yml @@ -0,0 +1,16 @@ +--- +# openshift_health_checker depends on openshift_version which now requires group eval. +- include: ../openshift-cluster/evaluate_groups.yml + tags: + - always + +- name: Run OpenShift health checks + hosts: OSEv3 + roles: + - openshift_health_checker + vars: + - r_openshift_health_checker_playbook_context: "health" + post_tasks: + - action: openshift_health_check # https://github.com/ansible/ansible/issues/20513 + args: + checks: ['@health'] diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/common/openshift-checks/pre-install.yml new file mode 100644 index 000000000..7ca9f7e8b --- /dev/null +++ b/playbooks/common/openshift-checks/pre-install.yml @@ -0,0 +1,16 @@ +--- +# openshift_health_checker depends on openshift_version which now requires group eval. +- include: ../openshift-cluster/evaluate_groups.yml + tags: + - always + +- hosts: OSEv3 + name: run OpenShift pre-install checks + roles: + - openshift_health_checker + vars: + - r_openshift_health_checker_playbook_context: "pre-install" + post_tasks: + - action: openshift_health_check # https://github.com/ansible/ansible/issues/20513 + args: + checks: ['@preflight'] diff --git a/playbooks/common/openshift-checks/roles b/playbooks/common/openshift-checks/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/common/openshift-checks/roles @@ -0,0 +1 @@ +../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 1b967b7f1..7224ae712 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -1,34 +1,36 @@ --- -- name: Set oo_option facts +# TODO: refactor this into its own include +# and pass a variable for ctx +- name: Verify Requirements hosts: oo_all_hosts + roles: + - openshift_health_checker + vars: + - r_openshift_health_checker_playbook_context: "install" + post_tasks: + - action: openshift_health_check + args: + checks: + - disk_availability + - memory_availability + - package_availability + - package_version + - docker_image_availability + - docker_storage + +- include: initialize_oo_option_facts.yml tags: - always - tasks: - - set_fact: - openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}" - when: openshift_docker_additional_registries is not defined - - set_fact: - openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}" - when: openshift_docker_insecure_registries is not defined - - set_fact: - openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}" - when: openshift_docker_blocked_registries is not defined - - set_fact: - openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}" - when: openshift_docker_options is not defined - - set_fact: - openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}" - when: openshift_docker_log_driver is not defined - - set_fact: - openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}" - when: openshift_docker_log_options is not defined - - set_fact: - openshift_docker_selinux_enabled: "{{ lookup('oo_option', 'docker_selinux_enabled') }}" - when: openshift_docker_selinux_enabled is not defined - -- include: disable_excluder.yml + +- name: Disable excluders + hosts: oo_masters_to_config:oo_nodes_to_config tags: - always + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: disable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" - include: ../openshift-etcd/config.yml tags: @@ -54,10 +56,26 @@ tags: - node +- include: ../openshift-glusterfs/config.yml + tags: + - glusterfs + - include: openshift_hosted.yml tags: - hosted -- include: reset_excluder.yml +- include: service_catalog.yml + when: + - openshift_enable_service_catalog | default(false) | bool + tags: + - servicecatalog + +- name: Re-enable excluder if it was previously enabled + hosts: oo_masters_to_config:oo_nodes_to_config tags: - always + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: enable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" diff --git a/playbooks/common/openshift-cluster/disable_excluder.yml b/playbooks/common/openshift-cluster/disable_excluder.yml deleted file mode 100644 index f664c51c9..000000000 --- a/playbooks/common/openshift-cluster/disable_excluder.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -- name: Disable excluders - hosts: oo_masters_to_config:oo_nodes_to_config - gather_facts: no - tasks: - - # During installation the excluders are installed with present state. - # So no pre-validation check here as the excluders are either to be installed (present = latest) - # or they are not going to be updated if already installed - - # disable excluders based on their status - - include_role: - name: openshift_excluder - tasks_from: disable - vars: - openshift_excluder_package_state: present - docker_excluder_package_state: present diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml index 45a4875a3..baca72c58 100644 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -5,29 +5,40 @@ become: no gather_facts: no tasks: - - fail: + - name: Evaluate groups - g_etcd_hosts required + fail: msg: This playbook requires g_etcd_hosts to be set - when: "{{ g_etcd_hosts is not defined }}" + when: g_etcd_hosts is not defined - - fail: + - name: Evaluate groups - g_master_hosts or g_new_master_hosts required + fail: msg: This playbook requires g_master_hosts or g_new_master_hosts to be set - when: "{{ g_master_hosts is not defined and g_new_master_hosts is not defined }}" + when: g_master_hosts is not defined or g_new_master_hosts is not defined - - fail: + - name: Evaluate groups - g_node_hosts or g_new_node_hosts required + fail: msg: This playbook requires g_node_hosts or g_new_node_hosts to be set - when: "{{ g_node_hosts is not defined and g_new_node_hosts is not defined }}" + when: g_node_hosts is not defined or g_new_node_hosts is not defined - - fail: + - name: Evaluate groups - g_lb_hosts required + fail: msg: This playbook requires g_lb_hosts to be set - when: "{{ g_lb_hosts is not defined }}" + when: g_lb_hosts is not defined - - fail: + - name: Evaluate groups - g_nfs_hosts required + fail: msg: This playbook requires g_nfs_hosts to be set - when: "{{ g_nfs_hosts is not defined }}" + when: g_nfs_hosts is not defined - - fail: + - name: Evaluate groups - g_nfs_hosts is single host + fail: msg: The nfs group must be limited to one host - when: "{{ (groups[g_nfs_hosts] | default([])) | length > 1 }}" + when: (groups[g_nfs_hosts] | default([])) | length > 1 + + - name: Evaluate groups - g_glusterfs_hosts required + fail: + msg: This playbook requires g_glusterfs_hosts to be set + when: g_glusterfs_hosts is not defined - name: Evaluate oo_all_hosts add_host: @@ -47,13 +58,13 @@ with_items: "{{ g_master_hosts | union(g_new_master_hosts) | default([]) }}" changed_when: no - - name: Evaluate oo_etcd_to_config + - name: Evaluate oo_first_master add_host: - name: "{{ item }}" - groups: oo_etcd_to_config + name: "{{ g_master_hosts[0] }}" + groups: oo_first_master ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_etcd_hosts | default([]) }}" + when: g_master_hosts|length > 0 changed_when: no - name: Evaluate oo_masters_to_config @@ -65,6 +76,41 @@ with_items: "{{ g_new_master_hosts | default(g_master_hosts | default([], true), true) }}" changed_when: no + - name: Evaluate oo_etcd_to_config + add_host: + name: "{{ item }}" + groups: oo_etcd_to_config + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" + with_items: "{{ g_etcd_hosts | default([]) }}" + changed_when: no + + - name: Evaluate oo_first_etcd + add_host: + name: "{{ g_etcd_hosts[0] }}" + groups: oo_first_etcd + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" + when: g_etcd_hosts|length > 0 + changed_when: no + + # We use two groups one for hosts we're upgrading which doesn't include embedded etcd + # The other for backing up which includes the embedded etcd host, there's no need to + # upgrade embedded etcd that just happens when the master is updated. + - name: Evaluate oo_etcd_hosts_to_upgrade + add_host: + name: "{{ item }}" + groups: oo_etcd_hosts_to_upgrade + with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else [] }}" + changed_when: False + + - name: Evaluate oo_etcd_hosts_to_backup + add_host: + name: "{{ item }}" + groups: oo_etcd_hosts_to_backup + with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}" + changed_when: False + - name: Evaluate oo_nodes_to_config add_host: name: "{{ item }}" @@ -82,40 +128,41 @@ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" with_items: "{{ g_master_hosts | default([]) }}" - when: "{{ g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool }}" + when: g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool changed_when: no - - name: Evaluate oo_first_etcd + - name: Evaluate oo_lb_to_config add_host: - name: "{{ g_etcd_hosts[0] }}" - groups: oo_first_etcd + name: "{{ item }}" + groups: oo_lb_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - when: "{{ g_etcd_hosts|length > 0 }}" + ansible_become: "{{ g_sudo | default(omit) }}" + with_items: "{{ g_lb_hosts | default([]) }}" changed_when: no - - name: Evaluate oo_first_master + - name: Evaluate oo_nfs_to_config add_host: - name: "{{ g_master_hosts[0] }}" - groups: oo_first_master + name: "{{ item }}" + groups: oo_nfs_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" - when: "{{ g_master_hosts|length > 0 }}" + with_items: "{{ g_nfs_hosts | default([]) }}" changed_when: no - - name: Evaluate oo_lb_to_config + - name: Evaluate oo_glusterfs_to_config add_host: name: "{{ item }}" - groups: oo_lb_to_config + groups: oo_glusterfs_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_lb_hosts | default([]) }}" + with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts | default([])) }}" changed_when: no - - name: Evaluate oo_nfs_to_config + - name: Evaluate oo_etcd_to_migrate add_host: name: "{{ item }}" - groups: oo_nfs_to_config + groups: oo_etcd_to_migrate ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_nfs_hosts | default([]) }}" + with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config | default([]) | length != 0 else groups.oo_first_master }}" changed_when: no diff --git a/playbooks/common/openshift-cluster/initialize_oo_option_facts.yml b/playbooks/common/openshift-cluster/initialize_oo_option_facts.yml new file mode 100644 index 000000000..ac3c702a0 --- /dev/null +++ b/playbooks/common/openshift-cluster/initialize_oo_option_facts.yml @@ -0,0 +1,27 @@ +--- +- name: Set oo_option facts + hosts: oo_all_hosts + tags: + - always + tasks: + - set_fact: + openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}" + when: openshift_docker_additional_registries is not defined + - set_fact: + openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}" + when: openshift_docker_insecure_registries is not defined + - set_fact: + openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}" + when: openshift_docker_blocked_registries is not defined + - set_fact: + openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}" + when: openshift_docker_options is not defined + - set_fact: + openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}" + when: openshift_docker_log_driver is not defined + - set_fact: + openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}" + when: openshift_docker_log_options is not defined + - set_fact: + openshift_docker_selinux_enabled: "{{ lookup('oo_option', 'docker_selinux_enabled') }}" + when: openshift_docker_selinux_enabled is not defined diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml index 07b38920f..f4e52869e 100644 --- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml +++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml @@ -1,13 +1,14 @@ --- # NOTE: requires openshift_facts be run - name: Verify compatible yum/subscription-manager combination - hosts: l_oo_all_hosts + hosts: oo_all_hosts gather_facts: no tasks: # See: # https://bugzilla.redhat.com/show_bug.cgi?id=1395047 # https://bugzilla.redhat.com/show_bug.cgi?id=1282961 # https://github.com/openshift/openshift-ansible/issues/1138 + # Consider the repoquery module for this work - name: Check for bad combinations of yum and subscription-manager command: > {{ repoquery_cmd }} --installed --qf '%{version}' "yum" @@ -16,7 +17,7 @@ when: not openshift.common.is_atomic | bool - fail: msg: Incompatible versions of yum and subscription-manager found. You may need to update yum and yum-utils. - when: "not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout" + when: not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout - name: Determine openshift_version to configure on first master hosts: oo_first_master diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml index 5db71b857..8d94b6509 100644 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -45,6 +45,8 @@ - role: cockpit-ui when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool) + - role: openshift_default_storage_class + when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce') - name: Update master-config for publicLoggingURL hosts: oo_masters_to_config:!oo_first_master diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml index d96a78c4c..57580406c 100644 --- a/playbooks/common/openshift-cluster/openshift_logging.yml +++ b/playbooks/common/openshift-cluster/openshift_logging.yml @@ -1,11 +1,13 @@ --- +- include: evaluate_groups.yml + - name: OpenShift Aggregated Logging hosts: oo_first_master roles: - openshift_logging - name: Update Master configs - hosts: masters:!oo_first_master + hosts: oo_masters:!oo_first_master tasks: - block: - include_role: diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml index 9f38ceea6..bcff4a1a1 100644 --- a/playbooks/common/openshift-cluster/openshift_metrics.yml +++ b/playbooks/common/openshift-cluster/openshift_metrics.yml @@ -1,4 +1,6 @@ --- +- include: evaluate_groups.yml + - name: OpenShift Metrics hosts: oo_first_master roles: diff --git a/playbooks/common/openshift-cluster/openshift_provisioners.yml b/playbooks/common/openshift-cluster/openshift_provisioners.yml new file mode 100644 index 000000000..b1ca6f606 --- /dev/null +++ b/playbooks/common/openshift-cluster/openshift_provisioners.yml @@ -0,0 +1,5 @@ +--- +- name: OpenShift Provisioners + hosts: oo_first_master + roles: + - openshift_provisioners diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml new file mode 100644 index 000000000..6964e8567 --- /dev/null +++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml @@ -0,0 +1,158 @@ +--- +- name: Check cert expirys + hosts: oo_etcd_to_config:oo_masters_to_config + vars: + openshift_certificate_expiry_show_all: yes + roles: + # Sets 'check_results' per host which contains health status for + # etcd, master and node certificates. We will use 'check_results' + # to determine if any certificates were expired prior to running + # this playbook. Service restarts will be skipped if any + # certificates were previously expired. + - role: openshift_certificate_expiry + +- name: Backup existing etcd CA certificate directories + hosts: oo_etcd_to_config + roles: + - role: etcd_common + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + tasks: + - name: Determine if CA certificate directory exists + stat: + path: "{{ etcd_ca_dir }}" + register: etcd_ca_certs_dir_stat + - name: Backup generated etcd certificates + command: > + tar -czf {{ etcd_conf_dir }}/etcd-ca-certificate-backup-{{ ansible_date_time.epoch }}.tgz + {{ etcd_ca_dir }} + args: + warn: no + when: etcd_ca_certs_dir_stat.stat.exists | bool + - name: Remove CA certificate directory + file: + path: "{{ etcd_ca_dir }}" + state: absent + when: etcd_ca_certs_dir_stat.stat.exists | bool + +- name: Generate new etcd CA + hosts: oo_first_etcd + roles: + - role: openshift_etcd_ca + etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" + +- name: Create temp directory for syncing certs + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - name: Create local temp directory for syncing certs + local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX + register: g_etcd_mktemp + changed_when: false + +- name: Distribute etcd CA to etcd hosts + hosts: oo_etcd_to_config + vars: + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + roles: + - role: etcd_common + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + tasks: + - name: Create a tarball of the etcd ca certs + command: > + tar -czvf {{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz + -C {{ etcd_ca_dir }} . + args: + creates: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz" + warn: no + delegate_to: "{{ etcd_ca_host }}" + run_once: true + - name: Retrieve etcd ca cert tarball + fetch: + src: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz" + dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/" + flat: yes + fail_on_missing: yes + validate_checksum: yes + delegate_to: "{{ etcd_ca_host }}" + run_once: true + - name: Ensure ca directory exists + file: + path: "{{ etcd_ca_dir }}" + state: directory + - name: Unarchive etcd ca cert tarballs + unarchive: + src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/{{ etcd_ca_name }}.tgz" + dest: "{{ etcd_ca_dir }}" + - name: Read current etcd CA + slurp: + src: "{{ etcd_conf_dir }}/ca.crt" + register: g_current_etcd_ca_output + - name: Read new etcd CA + slurp: + src: "{{ etcd_ca_dir }}/ca.crt" + register: g_new_etcd_ca_output + - copy: + content: "{{ (g_new_etcd_ca_output.content|b64decode) + (g_current_etcd_ca_output.content|b64decode) }}" + dest: "{{ item }}/ca.crt" + with_items: + - "{{ etcd_conf_dir }}" + - "{{ etcd_ca_dir }}" + +- include: ../../openshift-etcd/restart.yml + # Do not restart etcd when etcd certificates were previously expired. + when: ('expired' not in (hostvars + | oo_select_keys(groups['etcd']) + | oo_collect('check_results.check_results.etcd') + | oo_collect('health'))) + +- name: Retrieve etcd CA certificate + hosts: oo_first_etcd + roles: + - role: etcd_common + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + tasks: + - name: Retrieve etcd CA certificate + fetch: + src: "{{ etcd_conf_dir }}/ca.crt" + dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/" + flat: yes + fail_on_missing: yes + validate_checksum: yes + +- name: Distribute etcd CA to masters + hosts: oo_masters_to_config + vars: + openshift_ca_host: "{{ groups.oo_first_master.0 }}" + tasks: + - name: Deploy etcd CA + copy: + src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/ca.crt" + dest: "{{ openshift.common.config_base }}/master/master.etcd-ca.crt" + when: groups.oo_etcd_to_config | default([]) | length > 0 + +- name: Delete temporary directory on localhost + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - file: + name: "{{ g_etcd_mktemp.stdout }}" + state: absent + changed_when: false + +- include: ../../openshift-master/restart.yml + # Do not restart masters when master certificates were previously expired. + when: ('expired' not in hostvars + | oo_select_keys(groups['oo_masters_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"})) + and + ('expired' not in hostvars + | oo_select_keys(groups['oo_masters_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"})) diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml index 2963a5940..6b5c805e6 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml @@ -3,7 +3,8 @@ hosts: oo_first_etcd any_errors_fatal: true roles: - - etcd_common + - role: etcd_common + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" post_tasks: - name: Determine if generated etcd certificates exist stat: @@ -27,7 +28,8 @@ hosts: oo_etcd_to_config any_errors_fatal: true roles: - - etcd_common + - role: etcd_common + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" post_tasks: - name: Backup etcd certificates command: > @@ -50,6 +52,7 @@ etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" openshift_ca_host: "{{ groups.oo_first_master.0 }}" + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - name: Redeploy etcd client certificates for masters hosts: oo_masters_to_config @@ -63,4 +66,5 @@ etcd_cert_prefix: "master.etcd-" openshift_ca_host: "{{ groups.oo_first_master.0 }}" openshift_master_count: "{{ openshift.master.master_count | default(groups.oo_masters | length) }}" + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml b/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml index c30889d64..51b196299 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml @@ -51,3 +51,13 @@ | oo_collect('openshift.common.hostname') | default(none, true) }}" openshift_certificates_redeploy: true + - role: lib_utils + post_tasks: + - yedit: + src: "{{ openshift.common.config_base }}/master/master-config.yaml" + key: servingInfo.namedCertificates + value: "{{ openshift.master.named_certificates | default([]) | oo_named_certificates_list }}" + when: + - ('named_certificates' in openshift.master) + - openshift.master.named_certificates | default([]) | length > 0 + - openshift_master_overwrite_named_certificates | default(false) | bool diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml index cbb4a2434..089ae6bbc 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml @@ -6,131 +6,17 @@ msg: "The current OpenShift version is less than 1.2/3.2 and does not support CA bundles." when: not openshift.common.version_gte_3_2_or_1_2 | bool -- name: Backup existing etcd CA certificate directories - hosts: oo_etcd_to_config - roles: - - etcd_common - tasks: - - name: Determine if CA certificate directory exists - stat: - path: "{{ etcd_ca_dir }}" - register: etcd_ca_certs_dir_stat - - name: Backup generated etcd certificates - command: > - tar -czf {{ etcd_conf_dir }}/etcd-ca-certificate-backup-{{ ansible_date_time.epoch }}.tgz - {{ etcd_ca_dir }} - args: - warn: no - when: etcd_ca_certs_dir_stat.stat.exists | bool - - name: Remove CA certificate directory - file: - path: "{{ etcd_ca_dir }}" - state: absent - when: etcd_ca_certs_dir_stat.stat.exists | bool - -- name: Generate new etcd CA - hosts: oo_first_etcd - roles: - - role: openshift_etcd_ca - etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" - -- name: Create temp directory for syncing certs - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - name: Create local temp directory for syncing certs - local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX - register: g_etcd_mktemp - changed_when: false - -- name: Distribute etcd CA to etcd hosts - hosts: oo_etcd_to_config +- name: Check cert expirys + hosts: oo_nodes_to_config:oo_masters_to_config vars: - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + openshift_certificate_expiry_show_all: yes roles: - - etcd_common - tasks: - - name: Create a tarball of the etcd ca certs - command: > - tar -czvf {{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz - -C {{ etcd_ca_dir }} . - args: - creates: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz" - warn: no - delegate_to: "{{ etcd_ca_host }}" - run_once: true - - name: Retrieve etcd ca cert tarball - fetch: - src: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz" - dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/" - flat: yes - fail_on_missing: yes - validate_checksum: yes - delegate_to: "{{ etcd_ca_host }}" - run_once: true - - name: Ensure ca directory exists - file: - path: "{{ etcd_ca_dir }}" - state: directory - - name: Unarchive etcd ca cert tarballs - unarchive: - src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/{{ etcd_ca_name }}.tgz" - dest: "{{ etcd_ca_dir }}" - - name: Read current etcd CA - slurp: - src: "{{ etcd_conf_dir }}/ca.crt" - register: g_current_etcd_ca_output - - name: Read new etcd CA - slurp: - src: "{{ etcd_ca_dir }}/ca.crt" - register: g_new_etcd_ca_output - - copy: - content: "{{ (g_new_etcd_ca_output.content|b64decode) + (g_current_etcd_ca_output.content|b64decode) }}" - dest: "{{ item }}/ca.crt" - with_items: - - "{{ etcd_conf_dir }}" - - "{{ etcd_ca_dir }}" - -- name: Retrieve etcd CA certificate - hosts: oo_first_etcd - roles: - - etcd_common - tasks: - - name: Retrieve etcd CA certificate - fetch: - src: "{{ etcd_conf_dir }}/ca.crt" - dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/" - flat: yes - fail_on_missing: yes - validate_checksum: yes - -- name: Distribute etcd CA to masters - hosts: oo_masters_to_config - vars: - openshift_ca_host: "{{ groups.oo_first_master.0 }}" - tasks: - - name: Deploy CA certificate, key, bundle and serial - copy: - src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/ca.crt" - dest: "{{ openshift.common.config_base }}/master/master.etcd-ca.crt" - when: groups.oo_etcd_to_config | default([]) | length > 0 - -- name: Delete temporary directory on localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - file: - name: "{{ g_etcd_mktemp.stdout }}" - state: absent - changed_when: false - -- include: ../../../common/openshift-etcd/restart.yml + # Sets 'check_results' per host which contains health status for + # etcd, master and node certificates. We will use 'check_results' + # to determine if any certificates were expired prior to running + # this playbook. Service restarts will be skipped if any + # certificates were previously expired. + - role: openshift_certificate_expiry # Update master config when ca-bundle not referenced. Services will be # restarted below after new CA certificate has been distributed. @@ -322,7 +208,17 @@ group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}" with_items: "{{ client_users }}" -- include: ../../../common/openshift-master/restart.yml +- include: ../../openshift-master/restart.yml + # Do not restart masters when master certificates were previously expired. + when: ('expired' not in hostvars + | oo_select_keys(groups['oo_masters_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"})) + and + ('expired' not in hostvars + | oo_select_keys(groups['oo_masters_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"})) - name: Distribute OpenShift CA certificate to nodes hosts: oo_nodes_to_config @@ -371,4 +267,14 @@ state: absent changed_when: false -- include: ../../../common/openshift-node/restart.yml +- include: ../../openshift-node/restart.yml + # Do not restart nodes when node certificates were previously expired. + when: ('expired' not in hostvars + | oo_select_keys(groups['oo_nodes_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/server.crt"})) + and + ('expired' not in hostvars + | oo_select_keys(groups['oo_nodes_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/ca.crt"})) diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml index e82996cf4..8c8062585 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml @@ -66,7 +66,7 @@ --signer-cert={{ openshift.common.config_base }}/master/ca.crt --signer-key={{ openshift.common.config_base }}/master/ca.key --signer-serial={{ openshift.common.config_base }}/master/ca.serial.txt - --hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}" + --hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc,docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}" --cert={{ openshift.common.config_base }}/master/registry.crt --key={{ openshift.common.config_base }}/master/registry.key {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %} diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml index a7b614341..9f14f2d69 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml @@ -67,7 +67,66 @@ service.alpha.openshift.io/serving-cert-secret-name=router-certs --config={{ mktemp.stdout }}/admin.kubeconfig -n default - when: l_router_dc.rc == 0 and 'router-certs' in router_secrets + when: l_router_dc.rc == 0 and 'router-certs' in router_secrets and openshift_hosted_router_certificate is undefined + + - block: + - assert: + that: + - "'certfile' in openshift_hosted_router_certificate" + - "'keyfile' in openshift_hosted_router_certificate" + - "'cafile' in openshift_hosted_router_certificate" + msg: |- + openshift_hosted_router_certificate has been set in the inventory but is + missing one or more required keys. Ensure that 'certfile', 'keyfile', + and 'cafile' keys have been specified for the openshift_hosted_router_certificate + inventory variable. + + - name: Read router certificate and key + become: no + local_action: + module: slurp + src: "{{ item }}" + register: openshift_router_certificate_output + # Defaulting dictionary keys to none to avoid deprecation warnings + # (future fatal errors) during template evaluation. Dictionary keys + # won't be accessed unless openshift_hosted_router_certificate is + # defined and has all keys (certfile, keyfile, cafile) which we + # check above. + with_items: + - "{{ (openshift_hosted_router_certificate | default({'certfile':none})).certfile }}" + - "{{ (openshift_hosted_router_certificate | default({'keyfile':none})).keyfile }}" + - "{{ (openshift_hosted_router_certificate | default({'cafile':none})).cafile }}" + + - name: Write temporary router certificate file + copy: + content: "{% for certificate in openshift_router_certificate_output.results -%}{{ certificate.content | b64decode }}{% endfor -%}" + dest: "{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem" + mode: 0600 + + - name: Write temporary router key file + copy: + content: "{{ (openshift_router_certificate_output.results + | oo_collect('content', {'source':(openshift_hosted_router_certificate | default({'keyfile':none})).keyfile}))[0] | b64decode }}" + dest: "{{ mktemp.stdout }}/openshift-hosted-router-certificate.key" + mode: 0600 + + - name: Replace router-certs secret + shell: > + {{ openshift.common.client_binary }} secrets new router-certs + tls.crt="{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem" + tls.key="{{ mktemp.stdout }}/openshift-hosted-router-certificate.key" + --type=kubernetes.io/tls + --confirm + -o json | {{ openshift.common.client_binary }} replace -f - + + - name: Remove temporary router certificate and key files + file: + path: "{{ item }}" + state: absent + with_items: + - "{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem" + - "{{ mktemp.stdout }}/openshift-hosted-router-certificate.key" + when: l_router_dc.rc == 0 and 'router-certs' in router_secrets and openshift_hosted_router_certificate is defined - name: Redeploy router command: > diff --git a/playbooks/common/openshift-cluster/reset_excluder.yml b/playbooks/common/openshift-cluster/reset_excluder.yml deleted file mode 100644 index eaa8ce39c..000000000 --- a/playbooks/common/openshift-cluster/reset_excluder.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Re-enable excluder if it was previously enabled - hosts: oo_masters_to_config:oo_nodes_to_config - gather_facts: no - tasks: - - include_role: - name: openshift_excluder - tasks_from: enable diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml new file mode 100644 index 000000000..c42e8781a --- /dev/null +++ b/playbooks/common/openshift-cluster/service_catalog.yml @@ -0,0 +1,8 @@ +--- +- include: evaluate_groups.yml + +- name: Service Catalog + hosts: oo_first_master + roles: + - openshift_service_catalog + - ansible_service_broker diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml index 74cc1d527..6ed31a644 100644 --- a/playbooks/common/openshift-cluster/std_include.yml +++ b/playbooks/common/openshift-cluster/std_include.yml @@ -1,28 +1,4 @@ --- -- name: Create initial host groups for localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tags: - - always - tasks: - - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml - - name: Evaluate group l_oo_all_hosts - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts | default([]) }}" - changed_when: no - -- name: Create initial host groups for all hosts - hosts: l_oo_all_hosts - gather_facts: no - tags: - - always - tasks: - - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml - - include: evaluate_groups.yml tags: - always diff --git a/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml b/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml deleted file mode 100644 index d1e431c5e..000000000 --- a/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -- name: Record excluder state and disable - hosts: oo_masters_to_config:oo_nodes_to_config - gather_facts: no - tasks: - - include: pre/validate_excluder.yml - vars: - #repoquery_cmd: repoquery_cmd - #openshift_upgrade_target: openshift_upgrade_target - excluder: "{{ item }}" - with_items: - - "{{ openshift.common.service_type }}-docker-excluder" - - "{{ openshift.common.service_type }}-excluder" - - # disable excluders based on their status - - include_role: - name: openshift_excluder - tasks_from: disable - vars: - openshift_excluder_package_state: latest - docker_excluder_package_state: latest diff --git a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml new file mode 100644 index 000000000..800621857 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml @@ -0,0 +1,12 @@ +--- +- name: Disable excluders + hosts: oo_masters_to_config + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: disable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" + r_openshift_excluder_verify_upgrade: true + r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}" + r_openshift_excluder_package_state: latest + r_openshift_excluder_docker_package_state: latest diff --git a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml new file mode 100644 index 000000000..a66301c0d --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml @@ -0,0 +1,12 @@ +--- +- name: Disable excluders + hosts: oo_nodes_to_upgrade:!oo_masters_to_config + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: disable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" + r_openshift_excluder_verify_upgrade: true + r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}" + r_openshift_excluder_package_state: latest + r_openshift_excluder_docker_package_state: latest diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml new file mode 100644 index 000000000..07db071ce --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -0,0 +1,71 @@ +--- +- include: ../../evaluate_groups.yml + vars: + # Do not allow adding hosts during upgrade. + g_new_master_hosts: [] + g_new_node_hosts: [] + openshift_cluster_id: "{{ cluster_id | default('default') }}" + +- include: ../initialize_nodes_to_upgrade.yml + +- name: Check for appropriate Docker versions + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + roles: + - openshift_facts + tasks: + - set_fact: + repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}" + + - fail: + msg: Cannot upgrade Docker on Atomic operating systems. + when: openshift.common.is_atomic | bool + + - include: upgrade_check.yml + when: docker_upgrade is not defined or docker_upgrade | bool + + +# If a node fails, halt everything, the admin will need to clean up and we +# don't want to carry on, potentially taking out every node. The playbook can safely be re-run +# and will not take any action on a node already running the requested docker version. +- name: Drain and upgrade nodes + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + serial: 1 + any_errors_fatal: true + + roles: + - lib_openshift + + tasks: + - name: Mark node unschedulable + oc_adm_manage_node: + node: "{{ openshift.node.nodename | lower }}" + schedulable: False + delegate_to: "{{ groups.oo_first_master.0 }}" + retries: 10 + delay: 5 + register: node_unschedulable + until: node_unschedulable|succeeded + when: + - l_docker_upgrade is defined + - l_docker_upgrade | bool + - inventory_hostname in groups.oo_nodes_to_upgrade + + - name: Drain Node for Kubelet upgrade + command: > + {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --force --delete-local-data --ignore-daemonsets + delegate_to: "{{ groups.oo_first_master.0 }}" + when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade + + - include: upgrade.yml + when: l_docker_upgrade is defined and l_docker_upgrade | bool + + - name: Set node schedulability + oc_adm_manage_node: + node: "{{ openshift.node.nodename | lower }}" + schedulable: True + delegate_to: "{{ groups.oo_first_master.0 }}" + retries: 10 + delay: 5 + register: node_schedulable + until: node_schedulable|succeeded + when: node_unschedulable|changed diff --git a/playbooks/common/openshift-cluster/upgrades/files/nuke_images.sh b/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh index 8635eab0d..8635eab0d 100644 --- a/playbooks/common/openshift-cluster/upgrades/files/nuke_images.sh +++ b/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh diff --git a/playbooks/common/openshift-cluster/upgrades/docker/roles b/playbooks/common/openshift-cluster/upgrades/docker/roles new file mode 120000 index 000000000..6bc1a7aef --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/docker/roles @@ -0,0 +1 @@ +../../../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml index 7ef79afa9..616ba04f8 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml @@ -1,88 +1,14 @@ --- - name: Backup etcd - hosts: etcd_hosts_to_backup - vars: - embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" - timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" - etcdctl_command: "{{ 'etcdctl' if not openshift.common.is_containerized or embedded_etcd else 'docker exec etcd_container etcdctl' if not openshift.common.is_etcd_system_container else 'runc exec etcd etcdctl' }}" + hosts: oo_etcd_hosts_to_backup roles: - - openshift_facts - tasks: - # Ensure we persist the etcd role for this host in openshift_facts - - openshift_facts: - role: etcd - local_facts: {} - when: "'etcd' not in openshift" - - - stat: path=/var/lib/openshift - register: var_lib_openshift - - - stat: path=/var/lib/origin - register: var_lib_origin - - - name: Create origin symlink if necessary - file: src=/var/lib/openshift/ dest=/var/lib/origin state=link - when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False - - # TODO: replace shell module with command and update later checks - # We assume to be using the data dir for all backups. - - name: Check available disk space for etcd backup - shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1 - register: avail_disk - # AUDIT:changed_when: `false` because we are only inspecting - # state, not manipulating anything - changed_when: false - - # TODO: replace shell module with command and update later checks - - name: Check current embedded etcd disk usage - shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1 - register: etcd_disk_usage - when: embedded_etcd | bool - # AUDIT:changed_when: `false` because we are only inspecting - # state, not manipulating anything - changed_when: false - - - name: Abort if insufficient disk space for etcd backup - fail: - msg: > - {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup, - {{ avail_disk.stdout }} Kb available. - when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int) - - # For non containerized and non embedded we should have the correct version of - # etcd installed already. So don't do anything. - # - # For embedded or containerized we need to use the latest because OCP 3.3 uses - # a version of etcd that can only be backed up with etcd-3.x and if it's - # containerized then etcd version may be newer than that on the host so - # upgrade it. - # - # On atomic we have neither yum nor dnf so ansible throws a hard to debug error - # if you use package there, like this: "Could not find a module for unknown." - # see https://bugzilla.redhat.com/show_bug.cgi?id=1408668 - # - # TODO - We should refactor all containerized backups to use the containerized - # version of etcd to perform the backup rather than relying on the host's - # binaries. Until we do that we'll continue to have problems backing up etcd - # when atomic host has an older version than the version that's running in the - # container whether that's embedded or not - - name: Install latest etcd for containerized or embedded - package: - name: etcd - state: latest - when: ( embedded_etcd | bool or openshift.common.is_containerized ) and not openshift.common.is_atomic - - - name: Generate etcd backup - command: > - {{ etcdctl_command }} backup --data-dir={{ openshift.etcd.etcd_data_dir }} - --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ backup_tag | default('') }}{{ timestamp }} - - - set_fact: - etcd_backup_complete: True - - - name: Display location of etcd backup - debug: - msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ backup_tag | default('') }}{{ timestamp }}" + - role: openshift_facts + - role: etcd_common + r_etcd_common_action: backup + r_etcd_common_backup_tag: etcd_backup_tag + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" + r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" - name: Gate on etcd backup hosts: localhost @@ -91,10 +17,10 @@ tasks: - set_fact: etcd_backup_completed: "{{ hostvars - | oo_select_keys(groups.etcd_hosts_to_backup) - | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}" + | oo_select_keys(groups.oo_etcd_hosts_to_backup) + | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}" - set_fact: - etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}" + etcd_backup_failed: "{{ groups.oo_etcd_hosts_to_backup | difference(etcd_backup_completed) }}" - fail: msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}" when: etcd_backup_failed | length > 0 diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml deleted file mode 100644 index 5f8b59e17..000000000 --- a/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml +++ /dev/null @@ -1,46 +0,0 @@ ---- -- name: Verify cluster is healthy pre-upgrade - command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health" - -- name: Get current image - shell: grep 'ExecStart=' /etc/systemd/system/etcd_container.service | awk '{print $NF}' - register: current_image - -- name: Set new_etcd_image - set_fact: - new_etcd_image: "{{ current_image.stdout | regex_replace('/etcd.*$','/etcd:' ~ upgrade_version ) }}" - -- name: Pull new etcd image - command: "docker pull {{ new_etcd_image }}" - -- name: Update to latest etcd image - replace: - dest: /etc/systemd/system/etcd_container.service - regexp: "{{ current_image.stdout }}$" - replace: "{{ new_etcd_image }}" - -- name: Restart etcd_container - systemd: - name: etcd_container - daemon_reload: yes - state: restarted - -## TODO: probably should just move this into the backup playbooks, also this -## will fail on atomic host. We need to revisit how to do etcd backups there as -## the container may be newer than etcdctl on the host. Assumes etcd3 obsoletes etcd (7.3.1) -- name: Upgrade etcd for etcdctl when not atomic - package: name=etcd state=latest - when: not openshift.common.is_atomic | bool - -- name: Verify cluster is healthy - command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health" - register: etcdctl - until: etcdctl.rc == 0 - retries: 3 - delay: 10 - -- name: Store new etcd_image - openshift_facts: - role: etcd - local_facts: - etcd_image: "{{ new_etcd_image }}" diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml deleted file mode 100644 index 30232110e..000000000 --- a/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# F23 GA'd with etcd 2.0, currently has 2.2 in updates -# F24 GA'd with etcd-2.2, currently has 2.2 in updates -# F25 Beta currently has etcd 3.0 -- name: Verify cluster is healthy pre-upgrade - command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health" - -- name: Update etcd - package: - name: "etcd" - state: "latest" - -- name: Restart etcd - service: - name: etcd - state: restarted - -- name: Verify cluster is healthy - command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health" - register: etcdctl - until: etcdctl.rc == 0 - retries: 3 - delay: 10 diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh b/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh deleted file mode 120000 index 641e04e44..000000000 --- a/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh +++ /dev/null @@ -1 +0,0 @@ -../roles/etcd/files/etcdctl.sh
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml index fa86d29fb..64abc54e7 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml @@ -5,42 +5,19 @@ # mirrored packages on your own because only the GA and latest versions are # available in the repos. So for Fedora we'll simply skip this, sorry. -- include: ../../evaluate_groups.yml - tags: - - always - -# We use two groups one for hosts we're upgrading which doesn't include embedded etcd -# The other for backing up which includes the embedded etcd host, there's no need to -# upgrade embedded etcd that just happens when the master is updated. -- name: Evaluate additional groups for etcd - hosts: localhost - connection: local - become: no - tasks: - - name: Evaluate etcd_hosts_to_upgrade - add_host: - name: "{{ item }}" - groups: etcd_hosts_to_upgrade - with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else [] }}" - changed_when: False - - - name: Evaluate etcd_hosts_to_backup - add_host: - name: "{{ item }}" - groups: etcd_hosts_to_backup - with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}" - changed_when: False - - name: Backup etcd before upgrading anything include: backup.yml vars: - backup_tag: "pre-upgrade-" + etcd_backup_tag: "pre-upgrade-" when: openshift_etcd_backup | default(true) | bool - name: Drop etcdctl profiles - hosts: etcd_hosts_to_upgrade + hosts: oo_etcd_hosts_to_upgrade tasks: - - include: roles/etcd/tasks/etcdctl.yml + - include_role: + name: etcd_common + vars: + r_etcd_common_action: drop_etcdctl - name: Perform etcd upgrade include: ./upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml deleted file mode 100644 index 3a972e8ab..000000000 --- a/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- name: Verify cluster is healthy pre-upgrade - command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health" - -- name: Update etcd RPM - package: - name: etcd-{{ upgrade_version }}* - state: latest - -- name: Restart etcd - service: - name: etcd - state: restarted - -- name: Verify cluster is healthy - command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health" - register: etcdctl - until: etcdctl.rc == 0 - retries: 3 - delay: 10 diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml index a9b5b94e6..39e82498d 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml @@ -1,119 +1,110 @@ --- - name: Determine etcd version - hosts: etcd_hosts_to_upgrade + hosts: oo_etcd_hosts_to_upgrade tasks: - - name: Record RPM based etcd version - command: rpm -qa --qf '%{version}' etcd\* - args: - warn: no - register: etcd_rpm_version - failed_when: false - when: not openshift.common.is_containerized | bool - # AUDIT:changed_when: `false` because we are only inspecting - # state, not manipulating anything - changed_when: false - - - name: Record containerized etcd version - command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\* - register: etcd_container_version - failed_when: false - when: openshift.common.is_containerized | bool - # AUDIT:changed_when: `false` because we are only inspecting - # state, not manipulating anything - changed_when: false - - - name: Record containerized etcd version - command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\* - register: etcd_container_version - failed_when: false - when: openshift.common.is_containerized | bool and not openshift.common.is_etcd_system_container | bool - # AUDIT:changed_when: `false` because we are only inspecting - # state, not manipulating anything - changed_when: false - - - name: Record containerized etcd version - command: runc exec etcd_container rpm -qa --qf '%{version}' etcd\* - register: etcd_container_version - failed_when: false - when: openshift.common.is_containerized | bool and openshift.common.is_etcd_system_container | bool - # AUDIT:changed_when: `false` because we are only inspecting - # state, not manipulating anything - changed_when: false - -# I really dislike this copy/pasta but I wasn't able to find a way to get it to loop -# through hosts, then loop through tasks only when appropriate -- name: Upgrade to 2.1 - hosts: etcd_hosts_to_upgrade - serial: 1 + - block: + - name: Record RPM based etcd version + command: rpm -qa --qf '%{version}' etcd\* + args: + warn: no + register: etcd_rpm_version + failed_when: false + # AUDIT:changed_when: `false` because we are only inspecting + # state, not manipulating anything + changed_when: false + - debug: + msg: "Etcd rpm version {{ etcd_rpm_version.stdout }} detected" + when: + - not openshift.common.is_containerized | bool + + - block: + - name: Record containerized etcd version (docker) + command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\* + register: etcd_container_version_docker + failed_when: false + # AUDIT:changed_when: `false` because we are only inspecting + # state, not manipulating anything + changed_when: false + when: + - not openshift.common.is_etcd_system_container | bool + + # Given a register variables is set even if the whwen condition + # is false, we need to set etcd_container_version separately + - set_fact: + etcd_container_version: "{{ etcd_container_version_docker.stdout }}" + when: + - not openshift.common.is_etcd_system_container | bool + + - name: Record containerized etcd version (runc) + command: runc exec etcd rpm -qa --qf '%{version}' etcd\* + register: etcd_container_version_runc + failed_when: false + # AUDIT:changed_when: `false` because we are only inspecting + # state, not manipulating anything + changed_when: false + when: + - openshift.common.is_etcd_system_container | bool + + # Given a register variables is set even if the whwen condition + # is false, we need to set etcd_container_version separately + - set_fact: + etcd_container_version: "{{ etcd_container_version_runc.stdout }}" + when: + - openshift.common.is_etcd_system_container | bool + + - debug: + msg: "Etcd containerized version {{ etcd_container_version }} detected" + when: + - openshift.common.is_containerized | bool + +- include: upgrade_rpm_members.yml vars: - upgrade_version: '2.1' - tasks: - - include: rhel_tasks.yml - when: etcd_rpm_version.stdout | default('99') | version_compare('2.1','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool + etcd_upgrade_version: '2.1' -- name: Upgrade RPM hosts to 2.2 - hosts: etcd_hosts_to_upgrade - serial: 1 +- include: upgrade_rpm_members.yml vars: - upgrade_version: '2.2' - tasks: - - include: rhel_tasks.yml - when: etcd_rpm_version.stdout | default('99') | version_compare('2.2','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool + etcd_upgrade_version: '2.2' -- name: Upgrade containerized hosts to 2.2.5 - hosts: etcd_hosts_to_upgrade - serial: 1 +- include: upgrade_image_members.yml vars: - upgrade_version: 2.2.5 - tasks: - - include: containerized_tasks.yml - when: etcd_container_version.stdout | default('99') | version_compare('2.2','<') and openshift.common.is_containerized | bool + etcd_upgrade_version: '2.2.5' -- name: Upgrade RPM hosts to 2.3 - hosts: etcd_hosts_to_upgrade - serial: 1 +- include: upgrade_rpm_members.yml vars: - upgrade_version: '2.3' - tasks: - - include: rhel_tasks.yml - when: etcd_rpm_version.stdout | default('99') | version_compare('2.3','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool + etcd_upgrade_version: '2.3' -- name: Upgrade containerized hosts to 2.3.7 - hosts: etcd_hosts_to_upgrade - serial: 1 +- include: upgrade_image_members.yml vars: - upgrade_version: 2.3.7 - tasks: - - include: containerized_tasks.yml - when: etcd_container_version.stdout | default('99') | version_compare('2.3','<') and openshift.common.is_containerized | bool + etcd_upgrade_version: '2.3.7' -- name: Upgrade RPM hosts to 3.0 - hosts: etcd_hosts_to_upgrade - serial: 1 +- include: upgrade_rpm_members.yml vars: - upgrade_version: '3.0' - tasks: - - include: rhel_tasks.yml - when: etcd_rpm_version.stdout | default('99') | version_compare('3.0','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool + etcd_upgrade_version: '3.0' -- name: Upgrade containerized hosts to etcd3 image - hosts: etcd_hosts_to_upgrade - serial: 1 +- include: upgrade_image_members.yml vars: - upgrade_version: 3.0.15 - tasks: - - include: containerized_tasks.yml - when: etcd_container_version.stdout | default('99') | version_compare('3.0','<') and openshift.common.is_containerized | bool + etcd_upgrade_version: '3.0.15' + +- include: upgrade_rpm_members.yml + vars: + etcd_upgrade_version: '3.1' + +- include: upgrade_image_members.yml + vars: + etcd_upgrade_version: '3.1.3' - name: Upgrade fedora to latest - hosts: etcd_hosts_to_upgrade + hosts: oo_etcd_hosts_to_upgrade serial: 1 tasks: - - include: fedora_tasks.yml - when: ansible_distribution == 'Fedora' and not openshift.common.is_containerized | bool + - include_role: + name: etcd_upgrade + when: + - ansible_distribution == 'Fedora' + - not openshift.common.is_containerized | bool - name: Backup etcd include: backup.yml vars: - backup_tag: "post-3.0-" + etcd_backup_tag: "post-3.0-" when: openshift_etcd_backup | default(true) | bool diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml new file mode 100644 index 000000000..831ca8f57 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml @@ -0,0 +1,17 @@ +--- +# INPUT etcd_upgrade_version +# INPUT etcd_container_version +# INPUT openshift.common.is_containerized +- name: Upgrade containerized hosts to {{ etcd_upgrade_version }} + hosts: oo_etcd_hosts_to_upgrade + serial: 1 + roles: + - role: etcd_upgrade + r_etcd_upgrade_action: upgrade + r_etcd_upgrade_mechanism: image + r_etcd_upgrade_version: "{{ etcd_upgrade_version }}" + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + etcd_peer: "{{ openshift.common.hostname }}" + when: + - etcd_container_version | default('99') | version_compare(etcd_upgrade_version,'<') + - openshift.common.is_containerized | bool diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml new file mode 100644 index 000000000..2e79451e0 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml @@ -0,0 +1,18 @@ +--- +# INPUT etcd_upgrade_version +# INPUT etcd_rpm_version +# INPUT openshift.common.is_containerized +- name: Upgrade to {{ etcd_upgrade_version }} + hosts: oo_etcd_hosts_to_upgrade + serial: 1 + roles: + - role: etcd_upgrade + r_etcd_upgrade_action: upgrade + r_etcd_upgrade_mechanism: rpm + r_etcd_upgrade_version: "{{ etcd_upgrade_version }}" + r_etcd_common_etcd_runtime: "host" + etcd_peer: "{{ openshift.common.hostname }}" + when: + - etcd_rpm_version.stdout | default('99') | version_compare(etcd_upgrade_version, '<') + - ansible_distribution == 'RedHat' + - not openshift.common.is_containerized | bool diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml index bcbc4ee02..0f421928b 100644 --- a/playbooks/common/openshift-cluster/upgrades/init.yml +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -1,28 +1,4 @@ --- -- name: Create initial host groups for localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tags: - - always - tasks: - - include_vars: ../../../byo/openshift-cluster/cluster_hosts.yml - - name: Evaluate group l_oo_all_hosts - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts | default([]) }}" - changed_when: False - -- name: Create initial host groups for all hosts - hosts: l_oo_all_hosts - gather_facts: no - tags: - - always - tasks: - - include_vars: ../../../byo/openshift-cluster/cluster_hosts.yml - - include: ../evaluate_groups.yml vars: # Do not allow adding hosts during upgrade. @@ -30,48 +6,17 @@ g_new_node_hosts: [] openshift_cluster_id: "{{ cluster_id | default('default') }}" -- name: Set oo_options - hosts: oo_all_hosts - tasks: - - set_fact: - openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}" - when: openshift_docker_additional_registries is not defined - - set_fact: - openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}" - when: openshift_docker_insecure_registries is not defined - - set_fact: - openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}" - when: openshift_docker_blocked_registries is not defined - - set_fact: - openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}" - when: openshift_docker_options is not defined - - set_fact: - openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}" - when: openshift_docker_log_driver is not defined - - set_fact: - openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}" - when: openshift_docker_log_options is not defined +- include: ../initialize_oo_option_facts.yml - include: ../initialize_facts.yml -- name: Ensure clean repo cache in the event repos have been changed manually - hosts: oo_all_hosts - tags: - - pre_upgrade - tasks: - - name: Clean package cache - command: "{{ ansible_pkg_mgr }} clean all" - when: not openshift.common.is_atomic | bool - args: - warn: no - - name: Ensure firewall is not switched during upgrade hosts: oo_all_hosts tasks: - name: Check if iptables is running command: systemctl status iptables - ignore_errors: true changed_when: false + failed_when: false register: service_iptables_status - name: Set fact os_firewall_use_firewalld FALSE for iptables diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml index 046535680..72de63070 100644 --- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml @@ -6,27 +6,32 @@ - lib_openshift tasks: - - name: Retrieve list of openshift nodes matching upgrade label - oc_obj: - state: list - kind: node - selector: "{{ openshift_upgrade_nodes_label }}" - register: nodes_to_upgrade - when: openshift_upgrade_nodes_label is defined + - when: openshift_upgrade_nodes_label is defined + block: + - name: Retrieve list of openshift nodes matching upgrade label + oc_obj: + state: list + kind: node + selector: "{{ openshift_upgrade_nodes_label }}" + register: nodes_to_upgrade - # We got a list of nodes with the label, now we need to match these with inventory hosts - # using their openshift.common.hostname fact. - - name: Map labelled nodes to inventory hosts - add_host: - name: "{{ item }}" - groups: temp_nodes_to_upgrade - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_become: "{{ g_sudo | default(omit) }}" - with_items: " {{ groups['oo_nodes_to_config'] }}" - when: - - openshift_upgrade_nodes_label is defined - - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list - changed_when: false + - name: Fail if no nodes match openshift_upgrade_nodes_label + fail: + msg: "openshift_upgrade_nodes_label was specified but no nodes matched" + when: nodes_to_upgrade.results.results[0]['items'] | length == 0 + + # We got a list of nodes with the label, now we need to match these with inventory hosts + # using their openshift.common.hostname fact. + - name: Map labelled nodes to inventory hosts + add_host: + name: "{{ item }}" + groups: temp_nodes_to_upgrade + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" + with_items: " {{ groups['oo_nodes_to_config'] }}" + when: + - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list + changed_when: false # Build up the oo_nodes_to_upgrade group, use the list filtered by label if # present, otherwise hit all nodes: diff --git a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py index 673f11889..4eac8b067 100755 --- a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py +++ b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py @@ -1,7 +1,5 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4 - """Ansible module for modifying OpenShift configs during an upgrade""" import os diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index c00795a8d..d9ddf3860 100644 --- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -5,9 +5,12 @@ - name: Upgrade default router and default registry hosts: oo_first_master vars: - registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', openshift_image_tag ) }}" - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', openshift_image_tag ) }}" - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig" + registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | + replace ( '${version}', openshift_image_tag ) }}" + router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | + replace ( '${version}', openshift_image_tag ) }}" + registry_console_image: "{{ openshift.master.registry_url | regex_replace ( '(origin|ose)-\\${component}', 'registry-console') | + replace ( '${version}', 'v' ~ openshift.common.short_version ) }}" pre_tasks: - name: Load lib_openshift modules @@ -21,7 +24,10 @@ selector: 'router' register: all_routers - - set_fact: haproxy_routers="{{ all_routers.results.results[0]['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}" + - set_fact: + haproxy_routers: "{{ all_routers.results.results[0]['items'] | + oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | + oo_select_keys_from_list(['metadata']) }}" when: - all_routers.results.returncode == 0 @@ -30,16 +36,15 @@ - all_routers.results.returncode != 0 - name: Update router image to current version + oc_edit: + kind: dc + name: "{{ item['labels']['deploymentconfig'] }}" + namespace: "{{ item['namespace'] }}" + content: + spec.template.spec.containers[0].image: "{{ router_image }}" + with_items: "{{ haproxy_routers }}" when: - all_routers.results.returncode == 0 - command: > - {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -n {{ item['namespace'] }} -p - '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}' - --api-version=v1 - with_items: "{{ haproxy_routers }}" - # AUDIT:changed_when_note: `false` not being set here. What we - # need to do is check the current router image version and see if - # this task needs to be ran. - name: Check for default registry oc_obj: @@ -49,15 +54,34 @@ register: _default_registry - name: Update registry image to current version + oc_edit: + kind: dc + name: docker-registry + namespace: default + content: + spec.template.spec.containers[0].image: "{{ registry_image }}" when: - _default_registry.results.results[0] != {} - command: > - {{ oc_cmd }} patch dc/docker-registry -n default -p - '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}' - --api-version=v1 - # AUDIT:changed_when_note: `false` not being set here. What we - # need to do is check the current registry image version and see - # if this task needs to be ran. + + - name: Check for registry-console + oc_obj: + state: list + kind: dc + name: registry-console + register: _registry_console + when: + - openshift.common.deployment_type != 'origin' + + - name: Update registry-console image to current version + oc_edit: + kind: dc + name: registry-console + namespace: default + content: + spec.template.spec.containers[0].image: "{{ registry_console_image }}" + when: + - openshift.common.deployment_type != 'origin' + - _registry_console.results.results[0] != {} roles: - openshift_manageiq @@ -95,6 +119,12 @@ - not grep_plugin_order_override | skipped - grep_plugin_order_override.rc == 0 -- include: ../reset_excluder.yml +- name: Re-enable excluder if it was previously enabled + hosts: oo_masters_to_config tags: - always + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: enable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" diff --git a/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml b/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml deleted file mode 100644 index 6de1ed061..000000000 --- a/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -# input variables: -# - repoquery_cmd -# - excluder -# - openshift_upgrade_target -- block: - - name: Get available excluder version - command: > - {{ repoquery_cmd }} --qf '%{version}' "{{ excluder }}" - register: excluder_version - failed_when: false - changed_when: false - - - name: Docker excluder version detected - debug: - msg: "{{ excluder }}: {{ excluder_version.stdout }}" - - - name: Printing upgrade target version - debug: - msg: "{{ openshift_upgrade_target }}" - - - name: Check the available {{ excluder }} version is at most of the upgrade target version - fail: - msg: "Available {{ excluder }} version {{ excluder_version.stdout }} is higher than the upgrade target version" - when: - - "{{ excluder_version.stdout != '' }}" - - "{{ excluder_version.stdout.split('.')[0:2] | join('.') | version_compare(openshift_upgrade_target.split('.')[0:2] | join('.'), '>', strict=True) }}" - when: - - not openshift.common.is_atomic | bool diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml index 7646e0fa6..9d8b73cff 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml @@ -1,23 +1,20 @@ --- -- name: Verify docker upgrade targets - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - # Only check if docker upgrade is required if docker_upgrade is not - # already set to False. - - include: ../docker/upgrade_check.yml - when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool +# Only check if docker upgrade is required if docker_upgrade is not +# already set to False. +- include: ../docker/upgrade_check.yml + when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool - # Additional checks for Atomic hosts: +# Additional checks for Atomic hosts: - - name: Determine available Docker - shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker" - register: g_atomic_docker_version_result - when: openshift.common.is_atomic | bool +- name: Determine available Docker + shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker" + register: g_atomic_docker_version_result + when: openshift.common.is_atomic | bool - - set_fact: - l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}" - when: openshift.common.is_atomic | bool +- set_fact: + l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}" + when: openshift.common.is_atomic | bool - - fail: - msg: This playbook requires access to Docker 1.12 or later - when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<') +- fail: + msg: This playbook requires access to Docker 1.12 or later + when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<') diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml index c83923dae..9b4a8e413 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -1,41 +1,43 @@ --- -- name: Verify upgrade targets - hosts: oo_masters_to_config:oo_nodes_to_upgrade - vars: - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" - pre_tasks: - - fail: - msg: Verify OpenShift is already installed - when: openshift.common.version is not defined +- name: Fail when OpenShift is not installed + fail: + msg: Verify OpenShift is already installed + when: openshift.common.version is not defined - - fail: - msg: Verify the correct version was found - when: verify_upgrade_version is defined and openshift_version != verify_upgrade_version +- name: Verify containers are available for upgrade + command: > + docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }} + register: pull_result + changed_when: "'Downloaded newer image' in pull_result.stdout" + when: openshift.common.is_containerized | bool - - set_fact: - g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}" - when: not openshift.common.is_containerized | bool +- when: not openshift.common.is_containerized | bool + block: + - name: Check latest available OpenShift RPM version + repoquery: + name: "{{ openshift.common.service_type }}" + ignore_excluders: true + register: repoquery_out - - name: Verify containers are available for upgrade - command: > - docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }} - register: pull_result - changed_when: "'Downloaded newer image' in pull_result.stdout" - when: openshift.common.is_containerized | bool + - name: Fail when unable to determine available OpenShift RPM version + fail: + msg: "Unable to determine available OpenShift RPM version" + when: + - not repoquery_out.results.package_found - - name: Check latest available OpenShift RPM version - command: > - {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}" - failed_when: false - changed_when: false - register: avail_openshift_version - when: not openshift.common.is_containerized | bool + - name: Set fact avail_openshift_version + set_fact: + avail_openshift_version: "{{ repoquery_out.results.versions.available_versions.0 }}" - name: Verify OpenShift RPMs are available for upgrade fail: - msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but {{ openshift_upgrade_target }} or greater is required" - when: not openshift.common.is_containerized | bool and not avail_openshift_version | skipped and avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<') + msg: "OpenShift {{ avail_openshift_version }} is available, but {{ openshift_upgrade_target }} or greater is required" + when: + - avail_openshift_version | default('0.0', True) | version_compare(openshift_release, '<') - - fail: - msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later" - when: deployment_type == 'origin' and openshift.common.version | version_compare(openshift_upgrade_min,'<') +- name: Fail when openshift version does not meet minium requirement for Origin upgrade + fail: + msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later" + when: + - deployment_type == 'origin' + - openshift.common.version | version_compare(openshift_upgrade_min,'<') diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml index 03ac02e9f..164baca81 100644 --- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml @@ -1,27 +1,39 @@ --- -# We verified latest rpm available is suitable, so just yum update. +# When we update package "a-${version}" and a requires b >= ${version} if we +# don't specify the version of b yum will choose the latest version of b +# available and the whole set of dependencies end up at the latest version. +# Since the package module, unlike the yum module, doesn't flatten a list +# of packages into one transaction we need to do that explicitly. The ansible +# core team tells us not to rely on yum module transaction flattening anyway. + +# TODO: If the sdn package isn't already installed this will install it, we +# should fix that -# Master package upgrade ends up depending on node and sdn packages, we need to be explicit -# with all versions to avoid yum from accidentally jumping to something newer than intended: - name: Upgrade master packages - package: name={{ item }} state=present - when: component == "master" - with_items: - - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" + package: name={{ master_pkgs | join(',') }} state=present + vars: + master_pkgs: + - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version}}" + - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" + - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" + - PyYAML + when: + - component == "master" + - not openshift.common.is_atomic | bool - name: Upgrade node packages - package: name={{ item }} state=present - when: component == "node" - with_items: - - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" - -- name: Ensure python-yaml present for config upgrade - package: name=PyYAML state=present - when: not openshift.common.is_atomic | bool + package: name={{ node_pkgs | join(',') }} state=present + vars: + node_pkgs: + - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" + - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" + - PyYAML + when: + - component == "node" + - not openshift.common.is_atomic | bool diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index c6e799261..6738ce11f 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -2,17 +2,16 @@ ############################################################################### # Upgrade Masters ############################################################################### -- name: Evaluate additional groups for upgrade - hosts: localhost - connection: local - become: no + +# oc adm migrate storage should be run prior to etcd v3 upgrade +# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060 +- name: Pre master upgrade - Upgrade job storage + hosts: oo_first_master tasks: - - name: Evaluate etcd_hosts_to_backup - add_host: - name: "{{ item }}" - groups: etcd_hosts_to_backup - with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}" - changed_when: False + - name: Upgrade job storage + command: > + {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig + migrate storage --include=jobs --confirm # If facts cache were for some reason deleted, this fact may not be set, and if not set # it will always default to true. This causes problems for the etcd data dir fact detection @@ -118,8 +117,8 @@ yedit: src: "{{ openshift.common.config_base }}/master/master-config.yaml" key: 'imageConfig.format' - value: "{{ oreg_url }}" - when: oreg_url is defined + value: "{{ oreg_url | default(oreg_url_master) }}" + when: oreg_url is defined or oreg_url_master is defined # Run the upgrade hook prior to restarting services/system if defined: - debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}" @@ -144,6 +143,14 @@ - set_fact: master_update_complete: True +- name: Post master upgrade - Upgrade job storage + hosts: oo_first_master + tasks: + - name: Upgrade job storage + command: > + {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig + migrate storage --include=jobs --confirm + ############################################################################## # Gate on master update complete ############################################################################## @@ -258,8 +265,8 @@ hosts: oo_masters_to_config:&oo_nodes_to_upgrade # This var must be set with -e on invocation, as it is not a per-host inventory var # and is evaluated early. Values such as "20%" can also be used. - serial: "{{ openshift_upgrade_nodes_serial | default(1) }}" - any_errors_fatal: true + serial: "{{ openshift_upgrade_control_plane_nodes_serial | default(1) }}" + max_fail_percentage: "{{ openshift_upgrade_control_plane_nodes_max_fail_percentage | default(0) }}" pre_tasks: - name: Load lib_openshift modules @@ -289,6 +296,7 @@ - openshift_facts - docker - openshift_node_upgrade + - openshift_node_dnsmasq post_tasks: - name: Set node schedulability diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index e9f894942..35a50cf4e 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -4,7 +4,7 @@ # This var must be set with -e on invocation, as it is not a per-host inventory var # and is evaluated early. Values such as "20%" can also be used. serial: "{{ openshift_upgrade_nodes_serial | default(1) }}" - any_errors_fatal: true + max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}" pre_tasks: - name: Load lib_openshift modules @@ -34,6 +34,10 @@ - openshift_facts - docker - openshift_node_upgrade + - openshift_node_dnsmasq + - role: openshift_excluder + r_openshift_excluder_action: enable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" post_tasks: - name: Set node schedulability @@ -46,7 +50,3 @@ register: node_schedulable until: node_schedulable|succeeded when: node_unschedulable|changed - -- include: ../reset_excluder.yml - tags: - - always diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml index 88f2ddc78..83d2cec81 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml @@ -63,12 +63,12 @@ - block: - debug: msg: "WARNING: openshift_master_scheduler_predicates is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_predicates }}" - when: "{{ openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region] }}" + when: openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region] - debug: msg: "WARNING: openshift_master_scheduler_predicates does not match current defaults of: {{ openshift_master_scheduler_default_predicates }}" - when: "{{ openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates }}" - when: "{{ openshift_master_scheduler_predicates | default(none) is not none }}" + when: openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates + when: openshift_master_scheduler_predicates | default(none) is not none # Handle cases where openshift_master_predicates is not defined - block: @@ -87,7 +87,7 @@ when: "{{ openshift_master_scheduler_current_predicates != default_predicates_no_region and openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region] }}" - when: "{{ openshift_master_scheduler_predicates | default(none) is none }}" + when: openshift_master_scheduler_predicates | default(none) is none # Upgrade priorities @@ -120,12 +120,12 @@ - block: - debug: msg: "WARNING: openshift_master_scheduler_priorities is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_priorities }}" - when: "{{ openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone] }}" + when: openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone] - debug: msg: "WARNING: openshift_master_scheduler_priorities does not match current defaults of: {{ openshift_master_scheduler_default_priorities }}" - when: "{{ openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities }}" - when: "{{ openshift_master_scheduler_priorities | default(none) is not none }}" + when: openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities + when: openshift_master_scheduler_priorities | default(none) is not none # Handle cases where openshift_master_priorities is not defined - block: @@ -144,7 +144,7 @@ when: "{{ openshift_master_scheduler_current_priorities != default_priorities_no_zone and openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone] }}" - when: "{{ openshift_master_scheduler_priorities | default(none) is none }}" + when: openshift_master_scheduler_priorities | default(none) is none # Update scheduler diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml index 68c71a132..d69472fad 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml @@ -53,7 +53,7 @@ dest: "{{ openshift.common.config_base}}/master/master-config.yaml" yaml_key: 'admissionConfig.pluginConfig' yaml_value: "{{ openshift.master.admission_plugin_config }}" - when: "{{ 'admission_plugin_config' in openshift.master }}" + when: "'admission_plugin_config' in openshift.master" - modify_yaml: dest: "{{ openshift.common.config_base}}/master/master-config.yaml" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/roles b/playbooks/common/openshift-cluster/upgrades/v3_3/roles new file mode 120000 index 000000000..6bc1a7aef --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/roles @@ -0,0 +1 @@ +../../../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml new file mode 100644 index 000000000..f1245aa2e --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml @@ -0,0 +1,117 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" + +# Pre-upgrade + +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos and initialize facts on all hosts + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_3/master_config_upgrade.yml" + +- include: ../upgrade_nodes.yml + vars: + node_config_hook: "v3_3/node_config_upgrade.yml" + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml new file mode 100644 index 000000000..b693ab55c --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml @@ -0,0 +1,117 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on control plane hosts + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_3/master_config_upgrade.yml" + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml new file mode 100644 index 000000000..4fd029107 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml @@ -0,0 +1,112 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on nodes + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + tags: + - pre_upgrade + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- name: Verify masters are already upgraded + hosts: oo_masters_to_config + tags: + - pre_upgrade + tasks: + - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." + when: openshift.common.version != openshift_version + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_nodes_to_upgrade + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_nodes.yml + vars: + node_config_hook: "v3_3/node_config_upgrade.yml" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml index 43c2ffcd4..ed89dbe8d 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml @@ -3,7 +3,7 @@ dest: "{{ openshift.common.config_base}}/master/master-config.yaml" yaml_key: 'admissionConfig.pluginConfig' yaml_value: "{{ openshift.master.admission_plugin_config }}" - when: "{{ 'admission_plugin_config' in openshift.master }}" + when: "'admission_plugin_config' in openshift.master" - modify_yaml: dest: "{{ openshift.common.config_base}}/master/master-config.yaml" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/roles b/playbooks/common/openshift-cluster/upgrades/v3_4/roles new file mode 120000 index 000000000..6bc1a7aef --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/roles @@ -0,0 +1 @@ +../../../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml new file mode 100644 index 000000000..965e39482 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml @@ -0,0 +1,115 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + +# Pre-upgrade + +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos and initialize facts on all hosts + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_4/master_config_upgrade.yml" + +- include: ../upgrade_nodes.yml + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml new file mode 100644 index 000000000..7830f462c --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml @@ -0,0 +1,117 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on control plane hosts + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_4/master_config_upgrade.yml" + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml new file mode 100644 index 000000000..4364ff8e3 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml @@ -0,0 +1,110 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on nodes + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + tags: + - pre_upgrade + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- name: Verify masters are already upgraded + hosts: oo_masters_to_config + tags: + - pre_upgrade + tasks: + - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." + when: openshift.common.version != openshift_version + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_nodes_to_upgrade + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml new file mode 100644 index 000000000..ed89dbe8d --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml @@ -0,0 +1,16 @@ +--- +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'admissionConfig.pluginConfig' + yaml_value: "{{ openshift.master.admission_plugin_config }}" + when: "'admission_plugin_config' in openshift.master" + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'admissionConfig.pluginOrderOverride' + yaml_value: + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'kubernetesMasterConfig.admissionConfig' + yaml_value: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml deleted file mode 100644 index 48c69eccd..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -############################################################################### -# Post upgrade - Upgrade job storage -############################################################################### -- name: Upgrade job storage - hosts: oo_first_master - roles: - - { role: openshift_cli } - vars: - # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe - # restart. - skip_docker_role: True - tasks: - - name: Upgrade job storage - command: > - {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig - migrate storage --include=jobs --confirm - run_once: true diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml new file mode 100644 index 000000000..4e7c14e94 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml @@ -0,0 +1,117 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + +# Pre-upgrade + +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos and initialize facts on all hosts + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: validator.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + +- include: ../upgrade_nodes.yml + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml new file mode 100644 index 000000000..45b664d06 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml @@ -0,0 +1,121 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on control plane hosts + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: validator.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_5/master_config_upgrade.yml" + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml new file mode 100644 index 000000000..036d3fcf5 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml @@ -0,0 +1,110 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on nodes + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + tags: + - pre_upgrade + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- name: Verify masters are already upgraded + hosts: oo_masters_to_config + tags: + - pre_upgrade + tasks: + - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." + when: openshift.common.version != openshift_version + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_nodes_to_upgrade + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml new file mode 100644 index 000000000..ed89dbe8d --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml @@ -0,0 +1,16 @@ +--- +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'admissionConfig.pluginConfig' + yaml_value: "{{ openshift.master.admission_plugin_config }}" + when: "'admission_plugin_config' in openshift.master" + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'admissionConfig.pluginOrderOverride' + yaml_value: + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'kubernetesMasterConfig.admissionConfig' + yaml_value: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml deleted file mode 100644 index 48c69eccd..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -############################################################################### -# Post upgrade - Upgrade job storage -############################################################################### -- name: Upgrade job storage - hosts: oo_first_master - roles: - - { role: openshift_cli } - vars: - # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe - # restart. - skip_docker_role: True - tasks: - - name: Upgrade job storage - command: > - {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig - migrate storage --include=jobs --confirm - run_once: true diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml new file mode 100644 index 000000000..5b9ac9e8f --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -0,0 +1,117 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.6' + openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + +# Pre-upgrade + +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos and initialize facts on all hosts + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: validator.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + +- include: ../upgrade_nodes.yml + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml new file mode 100644 index 000000000..a470c7595 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -0,0 +1,121 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.6' + openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on control plane hosts + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: validator.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_6/master_config_upgrade.yml" + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml new file mode 100644 index 000000000..25eceaf90 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml @@ -0,0 +1,110 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.6' + openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on nodes + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + tags: + - pre_upgrade + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- name: Verify masters are already upgraded + hosts: oo_masters_to_config + tags: + - pre_upgrade + tasks: + - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." + when: openshift.common.version != openshift_version + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_nodes_to_upgrade + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml index ac5704f69..78c1767b8 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml @@ -7,4 +7,6 @@ hosts: oo_first_master roles: - { role: lib_openshift } - tasks: [] + tasks: + - name: Check for invalid namespaces and SDN errors + oc_objectvalidator: diff --git a/playbooks/common/openshift-cluster/validate_hostnames.yml b/playbooks/common/openshift-cluster/validate_hostnames.yml index 48cc03b19..33fc5630f 100644 --- a/playbooks/common/openshift-cluster/validate_hostnames.yml +++ b/playbooks/common/openshift-cluster/validate_hostnames.yml @@ -13,4 +13,6 @@ pause: prompt: "The hostname \"{{ openshift.common.hostname }}\" for \"{{ ansible_nodename }}\" doesn't resolve to an ip address owned by this host. Please set openshift_hostname variable to a hostname that when resolved on the host in question resolves to an IP address matching an interface on this host. This host will fail liveness checks for pods utilizing hostPorts, press ENTER to continue or CTRL-C to abort." seconds: "{{ 10 if openshift_override_hostname_check | default(false) | bool else omit }}" - when: lookupip.stdout not in ansible_all_ipv4_addresses + when: + - lookupip.stdout != '127.0.0.1' + - lookupip.stdout not in ansible_all_ipv4_addresses diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml index 1b8106e0e..2cb6197d1 100644 --- a/playbooks/common/openshift-etcd/config.yml +++ b/playbooks/common/openshift-etcd/config.yml @@ -7,4 +7,5 @@ etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - role: nickhammond.logrotate diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/common/openshift-etcd/migrate.yml new file mode 100644 index 000000000..c655449fa --- /dev/null +++ b/playbooks/common/openshift-etcd/migrate.yml @@ -0,0 +1,120 @@ +--- +- include: ../openshift-cluster/evaluate_groups.yml + tags: + - always + +- name: Run pre-checks + hosts: oo_etcd_to_migrate + tags: + - always + roles: + - role: etcd_migrate + r_etcd_migrate_action: check + r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" + etcd_peer: "{{ ansible_default_ipv4.address }}" + +- include: ../openshift-cluster/initialize_facts.yml + tags: + - always + +- name: Backup v2 data + hosts: oo_etcd_to_migrate + gather_facts: no + tags: + - always + roles: + - role: openshift_facts + - role: etcd_common + r_etcd_common_action: backup + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + r_etcd_common_backup_tag: pre-migration + r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" + r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" + +- name: Gate on etcd backup + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + etcd_backup_completed: "{{ hostvars + | oo_select_keys(groups.oo_etcd_to_migrate) + | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}" + - set_fact: + etcd_backup_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_backup_completed) }}" + - fail: + msg: "Migration cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}" + when: + - etcd_backup_failed | length > 0 + +- name: Prepare masters for etcd data migration + hosts: oo_masters_to_config + tasks: + - set_fact: + master_services: + - "{{ openshift.common.service_type + '-master' }}" + - set_fact: + master_services: + - "{{ openshift.common.service_type + '-master-controllers' }}" + - "{{ openshift.common.service_type + '-master-api' }}" + when: + - (openshift_master_cluster_method is defined and openshift_master_cluster_method == "native") or openshift.common.is_master_system_container | bool + - debug: + msg: "master service name: {{ master_services }}" + - name: Stop masters + service: + name: "{{ item }}" + state: stopped + with_items: "{{ master_services }}" + +- name: Migrate etcd data from v2 to v3 + hosts: oo_etcd_to_migrate + gather_facts: no + tags: + - always + roles: + - role: etcd_migrate + r_etcd_migrate_action: migrate + r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" + etcd_peer: "{{ ansible_default_ipv4.address }}" + +- name: Gate on etcd migration + hosts: oo_masters_to_config + gather_facts: no + tasks: + - set_fact: + etcd_migration_completed: "{{ hostvars + | oo_select_keys(groups.oo_etcd_to_migrate) + | oo_collect('inventory_hostname', {'r_etcd_migrate_success': true}) }}" + - set_fact: + etcd_migration_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_migration_completed) }}" + +- name: Configure masters if etcd data migration is succesfull + hosts: oo_masters_to_config + roles: + - role: etcd_migrate + r_etcd_migrate_action: configure + when: etcd_migration_failed | length == 0 + tasks: + - debug: + msg: "Skipping master re-configuration since migration failed." + when: + - etcd_migration_failed | length > 0 + +- name: Start masters after etcd data migration + hosts: oo_masters_to_config + tasks: + - name: Start master services + service: + name: "{{ item }}" + state: started + register: service_status + # Sometimes the master-api, resp. master-controllers fails to start for the first time + until: service_status.state is defined and service_status.state == "started" + retries: 5 + delay: 10 + with_items: "{{ master_services[::-1] }}" + - fail: + msg: "Migration failed. The following hosts were not properly migrated: {{ etcd_migration_failed | join(',') }}" + when: + - etcd_migration_failed | length > 0 diff --git a/playbooks/common/openshift-etcd/restart.yml b/playbooks/common/openshift-etcd/restart.yml index 196c86f28..af1ef245a 100644 --- a/playbooks/common/openshift-etcd/restart.yml +++ b/playbooks/common/openshift-etcd/restart.yml @@ -5,5 +5,5 @@ tasks: - name: restart etcd service: - name: "{{ 'etcd' if not openshift.common.is_containerized | bool else 'etcd_container' }}" + name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}" state: restarted diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml new file mode 100644 index 000000000..1efdfb336 --- /dev/null +++ b/playbooks/common/openshift-glusterfs/config.yml @@ -0,0 +1,23 @@ +--- +- name: Open firewall ports for GlusterFS + hosts: oo_glusterfs_to_config + vars: + os_firewall_allow: + - service: glusterfs_sshd + port: "2222/tcp" + - service: glusterfs_daemon + port: "24007/tcp" + - service: glusterfs_management + port: "24008/tcp" + - service: glusterfs_bricks + port: "49152-49251/tcp" + roles: + - role: os_firewall + when: + - openshift_storage_glusterfs_is_native | default(True) + +- name: Configure GlusterFS + hosts: oo_first_master + roles: + - role: openshift_storage_glusterfs + when: groups.oo_glusterfs_to_config | default([]) | count > 0 diff --git a/playbooks/common/openshift-glusterfs/filter_plugins b/playbooks/common/openshift-glusterfs/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/common/openshift-glusterfs/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-glusterfs/lookup_plugins b/playbooks/common/openshift-glusterfs/lookup_plugins new file mode 120000 index 000000000..ac79701db --- /dev/null +++ b/playbooks/common/openshift-glusterfs/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-glusterfs/registry.yml b/playbooks/common/openshift-glusterfs/registry.yml new file mode 100644 index 000000000..80cf7529e --- /dev/null +++ b/playbooks/common/openshift-glusterfs/registry.yml @@ -0,0 +1,49 @@ +--- +- include: config.yml + +- name: Initialize GlusterFS registry PV and PVC vars + hosts: oo_first_master + tags: hosted + tasks: + - set_fact: + glusterfs_pv: [] + glusterfs_pvc: [] + + - set_fact: + glusterfs_pv: + - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-volume" + capacity: "{{ openshift.hosted.registry.storage.volume.size }}" + access_modes: "{{ openshift.hosted.registry.storage.access.modes }}" + storage: + glusterfs: + endpoints: "{{ openshift.hosted.registry.storage.glusterfs.endpoints }}" + path: "{{ openshift.hosted.registry.storage.glusterfs.path }}" + readOnly: "{{ openshift.hosted.registry.storage.glusterfs.readOnly }}" + glusterfs_pvc: + - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim" + capacity: "{{ openshift.hosted.registry.storage.volume.size }}" + access_modes: "{{ openshift.hosted.registry.storage.access.modes }}" + when: openshift.hosted.registry.storage.glusterfs.swap + +- name: Create persistent volumes + hosts: oo_first_master + tags: + - hosted + vars: + persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups, glusterfs_pv) }}" + persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims(glusterfs_pvc) }}" + roles: + - role: openshift_persistent_volumes + when: persistent_volumes | union(glusterfs_pv) | length > 0 or persistent_volume_claims | union(glusterfs_pvc) | length > 0 + +- name: Create Hosted Resources + hosts: oo_first_master + tags: + - hosted + pre_tasks: + - set_fact: + openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" + openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" + when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master" + roles: + - role: openshift_hosted diff --git a/playbooks/common/openshift-glusterfs/roles b/playbooks/common/openshift-glusterfs/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/playbooks/common/openshift-glusterfs/roles @@ -0,0 +1 @@ +../../../roles/
\ No newline at end of file diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 81c922043..7d3a371e3 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -48,12 +48,6 @@ - set_fact: openshift_hosted_metrics_resolution: "{{ lookup('oo_option', 'openshift_hosted_metrics_resolution') | default('10s', true) }}" when: openshift_hosted_metrics_resolution is not defined - - set_fact: - openshift_hosted_metrics_deployer_prefix: "{{ lookup('oo_option', 'openshift_hosted_metrics_deployer_prefix') | default('openshift') }}" - when: openshift_hosted_metrics_deployer_prefix is not defined - - set_fact: - openshift_hosted_metrics_deployer_version: "{{ lookup('oo_option', 'openshift_hosted_metrics_deployer_version') | default('latest') }}" - when: openshift_hosted_metrics_deployer_version is not defined roles: - openshift_facts post_tasks: @@ -75,7 +69,7 @@ ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}" master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}" -- name: Determine if session secrets must be generated +- name: Inspect state of first master session secrets and config hosts: oo_first_master roles: - role: openshift_facts @@ -85,6 +79,24 @@ local_facts: session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(openshift.master.session_auth_secrets | default(None)) }}" session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(openshift.master.session_encryption_secrets | default(None)) }}" + - name: Check for existing configuration + stat: + path: /etc/origin/master/master-config.yaml + register: master_config_stat + + - name: Set clean install fact + set_fact: + l_clean_install: "{{ not master_config_stat.stat.exists | bool }}" + + - name: Determine if etcd3 storage is in use + command: grep -Pzo "storage-backend:\n.*etcd3" /etc/origin/master/master-config.yaml -q + register: etcd3_grep + failed_when: false + changed_when: false + + - name: Set etcd3 fact + set_fact: + l_etcd3_enabled: "{{ etcd3_grep.rc == 0 | bool }}" - name: Generate master session secrets hosts: oo_first_master @@ -123,10 +135,13 @@ | oo_collect('openshift.common.hostname') | default(none, true) }}" openshift_master_hosts: "{{ groups.oo_masters_to_config }}" + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}" etcd_cert_config_dir: "{{ openshift.common.config_base }}/master" etcd_cert_prefix: "master.etcd-" + r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}" + r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}" - role: nuage_master when: openshift.common.use_nuage | bool - role: calico_master diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml index b35368bf1..6fec346c3 100644 --- a/playbooks/common/openshift-master/restart.yml +++ b/playbooks/common/openshift-master/restart.yml @@ -1,5 +1,5 @@ --- -- include: ../../common/openshift-master/validate_restart.yml +- include: validate_restart.yml - name: Restart masters hosts: oo_masters_to_config @@ -12,8 +12,8 @@ roles: - openshift_facts post_tasks: - - include: ../../common/openshift-master/restart_hosts.yml + - include: restart_hosts.yml when: openshift_rolling_restart_mode | default('services') == 'system' - - include: ../../common/openshift-master/restart_services.yml + - include: restart_services.yml when: openshift_rolling_restart_mode | default('services') == 'services' diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml index 92f16dc47..bc61ee9bb 100644 --- a/playbooks/common/openshift-master/scaleup.yml +++ b/playbooks/common/openshift-master/scaleup.yml @@ -51,7 +51,7 @@ changed_when: false - name: Configure docker hosts - hosts: oo_masters_to-config:oo_nodes_to_config + hosts: oo_masters_to_config:oo_nodes_to_config vars: docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}" docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}" @@ -60,9 +60,15 @@ - openshift_facts - openshift_docker -- include: ../openshift-cluster/disable_excluder.yml +- name: Disable excluders + hosts: oo_masters_to_config tags: - always + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: disable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" - include: ../openshift-master/config.yml @@ -70,6 +76,12 @@ - include: ../openshift-node/config.yml -- include: ../openshift-cluster/reset_excluder.yml +- name: Re-enable excluder if it was previously enabled + hosts: oo_masters_to_config tags: - always + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: enable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 792ffb4e2..acebabc91 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -32,7 +32,7 @@ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" with_items: "{{ groups.oo_nodes_to_config | default([]) }}" - when: hostvars[item].openshift.common is defined and hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config) + when: hostvars[item].openshift is defined and hostvars[item].openshift.common is defined and hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config) changed_when: False - name: Configure containerized nodes diff --git a/playbooks/common/openshift-node/network_manager.yml b/playbooks/common/openshift-node/network_manager.yml new file mode 100644 index 000000000..0014a5dbd --- /dev/null +++ b/playbooks/common/openshift-node/network_manager.yml @@ -0,0 +1,26 @@ +--- +- name: Install and configure NetworkManager + hosts: oo_all_hosts + become: yes + tasks: + - name: install NetworkManager + package: + name: 'NetworkManager' + state: present + + - name: configure NetworkManager + lineinfile: + dest: "/etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4['interface'] }}" + regexp: '^{{ item }}=' + line: '{{ item }}=yes' + state: present + create: yes + with_items: + - 'USE_PEERDNS' + - 'NM_CONTROLLED' + + - name: enable and start NetworkManager + service: + name: 'NetworkManager' + state: started + enabled: yes diff --git a/playbooks/common/openshift-node/restart.yml b/playbooks/common/openshift-node/restart.yml index 441b100e9..01cf948e0 100644 --- a/playbooks/common/openshift-node/restart.yml +++ b/playbooks/common/openshift-node/restart.yml @@ -51,7 +51,7 @@ register: node_output delegate_to: "{{ groups.oo_first_master.0 }}" when: inventory_hostname in groups.oo_nodes_to_config - until: node_output.results.results[0].status.conditions | selectattr('type', 'match', '^Ready$') | map(attribute='status') | join | bool == True + until: node_output.results.returncode == 0 and node_output.results.results[0].status.conditions | selectattr('type', 'match', '^Ready$') | map(attribute='status') | join | bool == True # Give the node two minutes to come back online. retries: 24 delay: 5 diff --git a/playbooks/common/openshift-node/scaleup.yml b/playbooks/common/openshift-node/scaleup.yml index c31aca62b..40da8990d 100644 --- a/playbooks/common/openshift-node/scaleup.yml +++ b/playbooks/common/openshift-node/scaleup.yml @@ -27,12 +27,24 @@ - openshift_facts - openshift_docker -- include: ../openshift-cluster/disable_excluder.yml +- name: Disable excluders + hosts: oo_nodes_to_config tags: - always + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: disable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" - include: ../openshift-node/config.yml -- include: ../openshift-cluster/reset_excluder.yml +- name: Re-enable excluder if it was previously enabled + hosts: oo_nodes_to_config tags: - always + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: enable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" |