diff options
Diffstat (limited to 'playbooks')
102 files changed, 666 insertions, 1540 deletions
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 07f10d48c..5ed55a817 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -151,6 +151,14 @@ - lbr0 - vlinuxbr - vovsbr + + - name: Remove virtual devices + command: nmcli delete device "{{ item }}" + failed_when: False + with_items: + - tun0 + - docker0 + when: openshift_remove_all | default(true) | bool - shell: atomic uninstall "{{ item }}"-master-api @@ -264,12 +272,30 @@ - "{{ directories.results | default([]) }}" - files + - shell: systemctl daemon-reload + changed_when: False + + - name: restart container-engine + service: name=container-engine state=stopped enabled=no + failed_when: false + register: container_engine + + - name: restart docker + service: name=docker state=stopped enabled=no + failed_when: false + when: not (container_engine | changed) + register: l_docker_restart_docker_in_pb_result + until: not l_docker_restart_docker_in_pb_result | failed + retries: 3 + delay: 30 + - name: Remove remaining files file: path={{ item }} state=absent with_items: - /etc/ansible/facts.d/openshift.fact - /etc/openshift - /etc/openshift-sdn + - /etc/pki/ca-trust/source/anchors/openshift-ca.crt - /etc/sysconfig/atomic-openshift-node - /etc/sysconfig/atomic-openshift-node-dep - /etc/sysconfig/openshift-node-dep @@ -284,23 +310,38 @@ - /etc/systemd/system/origin-node-dep.service - /etc/systemd/system/origin-node.service - /etc/systemd/system/origin-node.service.wants + - /var/lib/docker + + - name: Rebuild ca-trust + command: update-ca-trust + + - name: Reset Docker proxy configuration + lineinfile: + state=absent + dest=/etc/sysconfig/docker + regexp='(NO_PROXY|HTTP_PROXY|HTTPS_PROXY)=.*' + + - name: Reset Docker registry configuration + lineinfile: + state=absent + dest=/etc/sysconfig/docker + regexp='(ADD_REGISTRY|BLOCK_REGISTRY|INSECURE_REGISTRY)=.*' + + - name: Detect Docker storage configuration + shell: vgs -o name | grep docker + register: docker_vg_name + failed_when: false + changed_when: false - - shell: systemctl daemon-reload - changed_when: False + - name: Wipe out Docker storage contents + command: vgremove -f {{ item }} + with_items: "{{ docker_vg_name.stdout_lines }}" + when: docker_vg_name.rc == 0 - - name: restart container-engine - service: name=container-engine state=restarted - failed_when: false - register: container_engine + - name: Wipe out Docker storage configuration + file: path=/etc/sysconfig/docker-storage state=absent + when: docker_vg_name.rc == 0 - - name: restart docker - service: name=docker state=restarted - failed_when: false - when: not (container_engine | changed) - register: l_docker_restart_docker_in_pb_result - until: not l_docker_restart_docker_in_pb_result | failed - retries: 3 - delay: 30 - hosts: masters become: yes @@ -525,3 +566,7 @@ with_items: - /etc/ansible/facts.d/openshift.fact - /var/lib/haproxy/stats + # Here we remove only limits.conf rather than directory, as users may put their files. + # - /etc/systemd/system/haproxy.service.d + - /etc/systemd/system/haproxy.service.d/limits.conf + - /etc/systemd/system/haproxy.service diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md index fbab61189..417fb539a 100644 --- a/playbooks/aws/README.md +++ b/playbooks/aws/README.md @@ -65,8 +65,9 @@ openshift_release: # example: v3.7 openshift_pkg_version: # example: -3.7.0 openshift_aws_ssh_key_name: # example: myuser_key openshift_aws_base_ami: # example: ami-12345678 +# These are required when doing SSL on the ELBs openshift_aws_iam_cert_path: # example: '/path/to/wildcard.<clusterid>.example.com.crt' -openshift_aws_iam_key_path: # example: '/path/to/wildcard.<clusterid>.example.com.key' +openshift_aws_iam_cert_key_path: # example: '/path/to/wildcard.<clusterid>.example.com.key' ``` If customization is required for the instances, scale groups, or any other configurable option please see the ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml) for variables and overrides. These overrides can be placed in the `provisioning_vars.yml`, `inventory`, or `group_vars`. diff --git a/playbooks/aws/openshift-cluster/build_ami.yml b/playbooks/aws/openshift-cluster/build_ami.yml index 5b4a6a1e8..fae30eb0a 100644 --- a/playbooks/aws/openshift-cluster/build_ami.yml +++ b/playbooks/aws/openshift-cluster/build_ami.yml @@ -28,6 +28,8 @@ set_fact: ansible_ssh_user: "{{ openshift_aws_build_ami_ssh_user | default(ansible_ssh_user) }}" openshift_node_bootstrap: True + openshift_node_image_prep_packages: + - cloud-utils-growpart # This is the part that installs all of the software and configs for the instance # to become a node. diff --git a/playbooks/aws/openshift-cluster/hosted.yml b/playbooks/aws/openshift-cluster/hosted.yml new file mode 100644 index 000000000..db6e3b8e1 --- /dev/null +++ b/playbooks/aws/openshift-cluster/hosted.yml @@ -0,0 +1,22 @@ +--- +- include: ../../common/openshift-cluster/openshift_hosted.yml + +- include: ../../common/openshift-cluster/openshift_metrics.yml + when: openshift_metrics_install_metrics | default(false) | bool + +- include: ../../common/openshift-cluster/openshift_logging.yml + when: openshift_logging_install_logging | default(false) | bool + +- include: ../../common/openshift-cluster/service_catalog.yml + when: openshift_enable_service_catalog | default(false) | bool + +- include: ../../common/openshift-management/config.yml + when: openshift_management_install_management | default(false) | bool + +- name: Print deprecated variable warning message if necessary + hosts: oo_first_master + gather_facts: no + tasks: + - debug: msg="{{__deprecation_message}}" + when: + - __deprecation_message | default ('') | length > 0 diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml index 4d0bf9531..1e8118490 100644 --- a/playbooks/aws/openshift-cluster/install.yml +++ b/playbooks/aws/openshift-cluster/install.yml @@ -21,5 +21,29 @@ - name: run the std_include include: ../../common/openshift-cluster/std_include.yml -- name: run the config - include: ../../common/openshift-cluster/config.yml +- name: perform the installer openshift-checks + include: ../../common/openshift-checks/install.yml + +- name: etcd install + include: ../../common/openshift-etcd/config.yml + +- name: include nfs + include: ../../common/openshift-nfs/config.yml + when: groups.oo_nfs_to_config | default([]) | count > 0 + +- name: include loadbalancer + include: ../../common/openshift-loadbalancer/config.yml + when: groups.oo_lb_to_config | default([]) | count > 0 + +- name: include openshift-master config + include: ../../common/openshift-master/config.yml + +- name: include master additional config + include: ../../common/openshift-master/additional_config.yml + +- name: include master additional config + include: ../../common/openshift-node/config.yml + +- name: include openshift-glusterfs + include: ../../common/openshift-glusterfs/config.yml + when: groups.oo_glusterfs_to_config | default([]) | count > 0 diff --git a/playbooks/aws/openshift-cluster/prerequisites.yml b/playbooks/aws/openshift-cluster/prerequisites.yml index df77fe3bc..f5eb01b14 100644 --- a/playbooks/aws/openshift-cluster/prerequisites.yml +++ b/playbooks/aws/openshift-cluster/prerequisites.yml @@ -4,5 +4,3 @@ - include: provision_ssh_keypair.yml - include: provision_sec_group.yml - vars: - openshift_aws_node_group_type: compute diff --git a/playbooks/aws/openshift-cluster/provision_install.yml b/playbooks/aws/openshift-cluster/provision_install.yml index e787deced..78dd6a49b 100644 --- a/playbooks/aws/openshift-cluster/provision_install.yml +++ b/playbooks/aws/openshift-cluster/provision_install.yml @@ -6,11 +6,14 @@ - name: Include the provision.yml playbook to create cluster include: provision.yml -- name: Include the install.yml playbook to install cluster +- name: Include the install.yml playbook to install cluster on masters include: install.yml -- name: Include the install.yml playbook to install cluster +- name: provision the infra/compute playbook to install node resources include: provision_nodes.yml - name: Include the accept.yml playbook to accept nodes into the cluster include: accept.yml + +- name: Include the hosted.yml playbook to finish the hosted configuration + include: hosted.yml diff --git a/playbooks/aws/openshift-cluster/provision_sec_group.yml b/playbooks/aws/openshift-cluster/provision_sec_group.yml index 039357adb..7d74a691a 100644 --- a/playbooks/aws/openshift-cluster/provision_sec_group.yml +++ b/playbooks/aws/openshift-cluster/provision_sec_group.yml @@ -6,7 +6,7 @@ connection: local gather_facts: no tasks: - - name: create an instance and prepare for ami + - name: create security groups include_role: name: openshift_aws tasks_from: security_group.yml diff --git a/playbooks/aws/provisioning_vars.yml.example b/playbooks/aws/provisioning_vars.yml.example index aa91363ae..1491fb868 100644 --- a/playbooks/aws/provisioning_vars.yml.example +++ b/playbooks/aws/provisioning_vars.yml.example @@ -116,5 +116,5 @@ openshift_aws_base_ami: # ami-12345678 # custom certificates are required for the ELB openshift_aws_iam_cert_path: # '/path/to/wildcard.<clusterid>.example.com.crt' -openshift_aws_iam_key_path: # '/path/to/wildcard.<clusterid>.example.com.key' -#openshift_aws_iam_cert_chain_path: '/path/to/cert.ca.crt' +openshift_aws_iam_cert_key_path: # '/path/to/wildcard.<clusterid>.example.com.key' +openshift_aws_iam_cert_chain_path: # '/path/to/cert.ca.crt' diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml index 60fa44c5b..f2e52782b 100644 --- a/playbooks/byo/openshift-cluster/config.yml +++ b/playbooks/byo/openshift-cluster/config.yml @@ -8,5 +8,3 @@ - always - include: ../../common/openshift-cluster/config.yml - vars: - openshift_deployment_subtype: "{{ deployment_subtype | default(none) }}" diff --git a/playbooks/byo/openshift-cluster/redeploy-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-certificates.yml index 255b0dbf7..f53d34145 100644 --- a/playbooks/byo/openshift-cluster/redeploy-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-certificates.yml @@ -42,3 +42,7 @@ - include: ../../common/openshift-cluster/redeploy-certificates/registry.yml when: openshift_hosted_manage_registry | default(true) | bool + +- include: ../../common/openshift-master/revert-client-ca.yml + +- include: ../../common/openshift-master/restart.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/README.md b/playbooks/byo/openshift-cluster/upgrades/README.md index 0f64f40f3..d9b1fc2ca 100644 --- a/playbooks/byo/openshift-cluster/upgrades/README.md +++ b/playbooks/byo/openshift-cluster/upgrades/README.md @@ -4,6 +4,5 @@ cluster. Additional notes for the associated upgrade playbooks are provided in their respective directories. # Upgrades available +- [OpenShift Container Platform 3.6 to 3.7](v3_7/README.md) (works also to upgrade OpenShift Origin from 3.6.x to 3.7.x) - [OpenShift Container Platform 3.5 to 3.6](v3_6/README.md) (works also to upgrade OpenShift Origin from 1.5.x to 3.6.x) -- [OpenShift Container Platform 3.4 to 3.5](v3_5/README.md) (works also to upgrade OpenShift Origin from 1.4.x to 1.5.x) -- [OpenShift Container Platform 3.3 to 3.4](v3_4/README.md) (works also to upgrade OpenShift Origin from 1.3.x to 1.4.x) diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml deleted file mode 100644 index 697a18c4d..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -# -# Full Control Plane + Nodes Upgrade -# -- include: ../../initialize_groups.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml deleted file mode 100644 index 4d284c279..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -# -# Control Plane Upgrade Playbook -# -# Upgrades masters and Docker (only on standalone etcd hosts) -# -# This upgrade does not include: -# - node service running on masters -# - docker running on masters -# - node service running on dedicated nodes -# -# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. -# -- include: ../../initialize_groups.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml deleted file mode 100644 index 180a2821f..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -# -# Node Upgrade Playbook -# -# Upgrades nodes only, but requires the control plane to have already been upgraded. -# -- include: ../../initialize_groups.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_4/README.md deleted file mode 100644 index 85b807dc6..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/README.md +++ /dev/null @@ -1,18 +0,0 @@ -# v3.4 Major and Minor Upgrade Playbook - -## Overview -This playbook currently performs the -following steps. - - * Upgrade and restart master services - * Unschedule node. - * Upgrade and restart docker - * Upgrade and restart node services - * Modifies the subset of the configuration necessary - * Applies the latest cluster policies - * Updates the default router if one exists - * Updates the default registry if one exists - * Updates image streams and quickstarts - -## Usage -ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml deleted file mode 100644 index d5329b858..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -# -# Node Upgrade Playbook -# -# Upgrades nodes only, but requires the control plane to have already been upgraded. -# -- include: ../../initialize_groups.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_5/README.md deleted file mode 100644 index 53eebe65e..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/README.md +++ /dev/null @@ -1,18 +0,0 @@ -# v3.5 Major and Minor Upgrade Playbook - -## Overview -This playbook currently performs the -following steps. - - * Upgrade and restart master services - * Unschedule node. - * Upgrade and restart docker - * Upgrade and restart node services - * Modifies the subset of the configuration necessary - * Applies the latest cluster policies - * Updates the default router if one exists - * Updates the default registry if one exists - * Updates image streams and quickstarts - -## Usage -ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml deleted file mode 100644 index f44d55ad2..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -# -# Full Control Plane + Nodes Upgrade -# -- include: ../../initialize_groups.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml deleted file mode 100644 index 2377713fa..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -# -# Control Plane Upgrade Playbook -# -# Upgrades masters and Docker (only on standalone etcd hosts) -# -# This upgrade does not include: -# - node service running on masters -# - docker running on masters -# - node service running on dedicated nodes -# -# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. -# -- include: ../../initialize_groups.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md index 4bf53be81..914e0f5b2 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md +++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md @@ -1,4 +1,4 @@ -# v3.6 Major and Minor Upgrade Playbook +# v3.7 Major and Minor Upgrade Playbook ## Overview This playbook currently performs the following steps. diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_8/README.md index 6892f6324..d9be6ae3b 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/README.md +++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/README.md @@ -1,11 +1,10 @@ -# v3.3 Major and Minor Upgrade Playbook +# v3.6 Major and Minor Upgrade Playbook ## Overview -This playbook currently performs the -following steps. +This playbook currently performs the following steps. * Upgrade and restart master services - * Unschedule node. + * Unschedule node * Upgrade and restart docker * Upgrade and restart node services * Modifies the subset of the configuration necessary @@ -15,4 +14,7 @@ following steps. * Updates image streams and quickstarts ## Usage -ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml + +``` +ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml +``` diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml index 8cce91b3f..3d4e6a790 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml @@ -4,4 +4,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml index 8e5d0f5f9..d83305119 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml @@ -13,4 +13,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml index 5b3f6ab06..a972bb7a6 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml @@ -6,4 +6,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-management/add_container_provider.yml b/playbooks/byo/openshift-management/add_container_provider.yml new file mode 100644 index 000000000..3378b5abd --- /dev/null +++ b/playbooks/byo/openshift-management/add_container_provider.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/evaluate_groups.yml + +- include: ../../common/openshift-management/add_container_provider.yml diff --git a/playbooks/byo/openshift-management/add_many_container_providers.yml b/playbooks/byo/openshift-management/add_many_container_providers.yml new file mode 100644 index 000000000..62fdb11c5 --- /dev/null +++ b/playbooks/byo/openshift-management/add_many_container_providers.yml @@ -0,0 +1,36 @@ +--- +- hosts: localhost + tasks: + - name: Ensure the container provider configuration is defined + assert: + that: container_providers_config is defined + msg: | + Error: Must provide providers config path. Fix: Add '-e container_providers_config=/path/to/your/config' to the ansible-playbook command + + - name: Include providers/management configuration + include_vars: + file: "{{ container_providers_config }}" + + - name: Ensure this cluster is a container provider + uri: + url: "https://{{ management_server['hostname'] }}/api/providers" + body_format: json + method: POST + user: "{{ management_server['user'] }}" + password: "{{ management_server['password'] }}" + validate_certs: no + # Docs on formatting the BODY of the POST request: + # http://manageiq.org/docs/reference/latest/api/reference/providers.html#specifying-connection-configurations + body: "{{ item }}" + failed_when: false + with_items: "{{ container_providers }}" + register: results + + # Include openshift_management for access to filter_plugins. + - include_role: + name: openshift_management + tasks_from: noop + + - name: print each result + debug: + msg: "{{ results.results | oo_filter_container_providers }}" diff --git a/playbooks/byo/openshift-management/config.yml b/playbooks/byo/openshift-management/config.yml index e8795ef85..209c66502 100644 --- a/playbooks/byo/openshift-management/config.yml +++ b/playbooks/byo/openshift-management/config.yml @@ -1,6 +1,6 @@ --- - include: ../openshift-cluster/initialize_groups.yml -- include: ../../common/openshift-cluster/evaluate_groups.yml +- include: ../../common/openshift-cluster/std_include.yml - include: ../../common/openshift-management/config.yml diff --git a/playbooks/byo/openshift-management/roles b/playbooks/byo/openshift-management/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/byo/openshift-management/roles @@ -0,0 +1 @@ +../../../roles
\ No newline at end of file diff --git a/playbooks/byo/openshift-management/uninstall.yml b/playbooks/byo/openshift-management/uninstall.yml index a1fb1cdc4..e95c1c88a 100644 --- a/playbooks/byo/openshift-management/uninstall.yml +++ b/playbooks/byo/openshift-management/uninstall.yml @@ -1,4 +1,2 @@ --- -# - include: ../openshift-cluster/initialize_groups.yml - - include: ../../common/openshift-management/uninstall.yml diff --git a/playbooks/common/openshift-checks/adhoc.yml b/playbooks/common/openshift-checks/adhoc.yml index dfcef8435..d0deaeb65 100644 --- a/playbooks/common/openshift-checks/adhoc.yml +++ b/playbooks/common/openshift-checks/adhoc.yml @@ -1,12 +1,13 @@ --- -- name: OpenShift health checks +- name: OpenShift Health Checks hosts: oo_all_hosts + roles: - openshift_health_checker vars: - r_openshift_health_checker_playbook_context: adhoc post_tasks: - - name: Run health checks + - name: Run health checks (adhoc) action: openshift_health_check args: checks: '{{ openshift_checks | default([]) }}' diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/common/openshift-checks/health.yml index 21ea785ef..d0921b9d3 100644 --- a/playbooks/common/openshift-checks/health.yml +++ b/playbooks/common/openshift-checks/health.yml @@ -1,11 +1,13 @@ --- -- name: Run OpenShift health checks +- name: OpenShift Health Checks hosts: oo_all_hosts + roles: - openshift_health_checker vars: - r_openshift_health_checker_playbook_context: health post_tasks: - - action: openshift_health_check + - name: Run health checks (@health) + action: openshift_health_check args: checks: ['@health'] diff --git a/playbooks/common/openshift-checks/install.yml b/playbooks/common/openshift-checks/install.yml new file mode 100644 index 000000000..93cf6c359 --- /dev/null +++ b/playbooks/common/openshift-checks/install.yml @@ -0,0 +1,51 @@ +--- +- name: Health Check Checkpoint Start + hosts: all + gather_facts: false + tasks: + - name: Set Health Check 'In Progress' + run_once: true + set_stats: + data: + installer_phase_health: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- name: OpenShift Health Checks + hosts: oo_all_hosts + any_errors_fatal: true + roles: + - openshift_health_checker + vars: + - r_openshift_health_checker_playbook_context: install + post_tasks: + - name: Run health checks (install) - EL + when: ansible_distribution != "Fedora" + action: openshift_health_check + args: + checks: + - disk_availability + - memory_availability + - package_availability + - package_version + - docker_image_availability + - docker_storage + + - name: Run health checks (install) - Fedora + when: ansible_distribution == "Fedora" + action: openshift_health_check + args: + checks: + - docker_image_availability + +- name: Health Check Checkpoint End + hosts: all + gather_facts: false + tasks: + - name: Set Health Check 'Complete' + run_once: true + set_stats: + data: + installer_phase_health: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/common/openshift-checks/pre-install.yml index 88e6f9120..32449d4e4 100644 --- a/playbooks/common/openshift-checks/pre-install.yml +++ b/playbooks/common/openshift-checks/pre-install.yml @@ -1,11 +1,13 @@ --- -- name: run OpenShift pre-install checks +- name: OpenShift Health Checks hosts: oo_all_hosts + roles: - openshift_health_checker vars: - r_openshift_health_checker_playbook_context: pre-install post_tasks: - - action: openshift_health_check + - name: Run health checks (@preflight) + action: openshift_health_check args: checks: ['@preflight'] diff --git a/playbooks/common/openshift-cluster/cockpit-ui.yml b/playbooks/common/openshift-cluster/cockpit-ui.yml index 5ddafdb07..359132dd0 100644 --- a/playbooks/common/openshift-cluster/cockpit-ui.yml +++ b/playbooks/common/openshift-cluster/cockpit-ui.yml @@ -3,4 +3,6 @@ hosts: oo_first_master roles: - role: cockpit-ui - when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool) + when: + - openshift_hosted_manage_registry | default(true) | bool + - not openshift.docker.hosted_registry_insecure | default(false) | bool diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 395eb51f1..3b4d6f9a6 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -1,31 +1,5 @@ --- -# TODO: refactor this into its own include -# and pass a variable for ctx -- name: Verify Requirements - hosts: oo_all_hosts - roles: - - openshift_health_checker - vars: - - r_openshift_health_checker_playbook_context: install - post_tasks: - - - name: Verify Requirements - EL - when: ansible_distribution != "Fedora" - action: openshift_health_check - args: - checks: - - disk_availability - - memory_availability - - package_availability - - package_version - - docker_image_availability - - docker_storage - - name: Verify Requirements - Fedora - when: ansible_distribution == "Fedora" - action: openshift_health_check - args: - checks: - - docker_image_availability +- include: ../openshift-checks/install.yml - include: ../openshift-etcd/config.yml @@ -53,7 +27,7 @@ when: openshift_logging_install_logging | default(false) | bool - include: service_catalog.yml - when: openshift_enable_service_catalog | default(false) | bool + when: openshift_enable_service_catalog | default(true) | bool - include: ../openshift-management/config.yml when: openshift_management_install_management | default(false) | bool diff --git a/playbooks/common/openshift-cluster/create_persistent_volumes.yml b/playbooks/common/openshift-cluster/create_persistent_volumes.yml index ec6f2c52c..8a60a30b8 100644 --- a/playbooks/common/openshift-cluster/create_persistent_volumes.yml +++ b/playbooks/common/openshift-cluster/create_persistent_volumes.yml @@ -1,13 +1,4 @@ --- -- name: Create persistent volumes - hosts: oo_first_master - vars: - persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}" - persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}" - tasks: - - debug: var=persistent_volumes - - debug: var=persistent_volume_claims - - name: Create Hosted Resources - persistent volumes hosts: oo_first_master vars: diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml index be14b06f0..fe765aa5d 100644 --- a/playbooks/common/openshift-cluster/enable_dnsmasq.yml +++ b/playbooks/common/openshift-cluster/enable_dnsmasq.yml @@ -5,9 +5,6 @@ hosts: oo_masters_to_config:oo_nodes_to_config roles: - openshift_facts - post_tasks: - - fail: msg="This playbook requires a master version of at least Origin 1.1 or OSE 3.1" - when: not openshift.common.version_gte_3_1_1_or_1_1_1 | bool - name: Reconfigure masters to listen on our new dns_port hosts: oo_masters_to_config diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml index be2f8b5f4..91223d368 100644 --- a/playbooks/common/openshift-cluster/initialize_facts.yml +++ b/playbooks/common/openshift-cluster/initialize_facts.yml @@ -10,6 +10,7 @@ - name: load openshift_facts module include_role: name: openshift_facts + static: yes # TODO: Should this role be refactored into health_checks?? - name: Run openshift_sanitize_inventory to set variables @@ -145,7 +146,19 @@ https_proxy: "{{ openshift_https_proxy | default(None) }}" no_proxy: "{{ openshift_no_proxy | default(None) }}" generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}" - no_proxy_internal_hostnames: "{{ openshift_no_proxy_internal_hostnames | default(None) }}" + + - name: Set fact of no_proxy_internal_hostnames + openshift_facts: + role: common + local_facts: + no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool - name: initialize_facts set_fact repoquery command set_fact: diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml index e6400ea61..37a5284d5 100644 --- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml +++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml @@ -1,15 +1,4 @@ --- -# openshift_install_base_package_group may be set in a play variable to limit -# the host groups the base package is installed on. This is currently used -# for master/control-plane upgrades. -- name: Set version_install_base_package true on masters and nodes - hosts: "{{ openshift_install_base_package_group | default('oo_masters_to_config:oo_nodes_to_config') }}" - tasks: - - name: Set version_install_base_package true - set_fact: - version_install_base_package: True - when: version_install_base_package is not defined - # NOTE: requires openshift_facts be run - name: Determine openshift_version to configure on first master hosts: oo_first_master diff --git a/playbooks/common/openshift-cluster/install_docker_gc.yml b/playbooks/common/openshift-cluster/install_docker_gc.yml new file mode 100644 index 000000000..1e3dfee07 --- /dev/null +++ b/playbooks/common/openshift-cluster/install_docker_gc.yml @@ -0,0 +1,7 @@ +--- +- name: Install docker gc + hosts: oo_first_master + gather_facts: false + tasks: + - include_role: + name: openshift_docker_gc diff --git a/playbooks/common/openshift-cluster/openshift_default_storage_class.yml b/playbooks/common/openshift-cluster/openshift_default_storage_class.yml index 4b4f19690..62fe0dd60 100644 --- a/playbooks/common/openshift-cluster/openshift_default_storage_class.yml +++ b/playbooks/common/openshift-cluster/openshift_default_storage_class.yml @@ -3,4 +3,4 @@ hosts: oo_first_master roles: - role: openshift_default_storage_class - when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce') + when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce' or openshift_cloudprovider_kind == 'openstack') diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml index c1536eb36..15ee60dc0 100644 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -1,13 +1,15 @@ --- - name: Hosted Install Checkpoint Start - hosts: oo_all_hosts + hosts: all gather_facts: false tasks: - name: Set Hosted install 'In Progress' + run_once: true set_stats: data: - installer_phase_hosted: "In Progress" - aggregate: false + installer_phase_hosted: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - include: create_persistent_volumes.yml @@ -24,12 +26,19 @@ - include: openshift_prometheus.yml when: openshift_hosted_prometheus_deploy | default(False) | bool +- include: install_docker_gc.yml + when: + - openshift_use_crio | default(False) | bool + - openshift_crio_enable_docker_gc | default(False) | bool + - name: Hosted Install Checkpoint End - hosts: oo_all_hosts + hosts: all gather_facts: false tasks: - name: Set Hosted install 'Complete' + run_once: true set_stats: data: - installer_phase_hosted: "Complete" - aggregate: false + installer_phase_hosted: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml index 529a4c939..bc59bd95a 100644 --- a/playbooks/common/openshift-cluster/openshift_logging.yml +++ b/playbooks/common/openshift-cluster/openshift_logging.yml @@ -1,13 +1,15 @@ --- - name: Logging Install Checkpoint Start - hosts: oo_all_hosts + hosts: all gather_facts: false tasks: - name: Set Logging install 'In Progress' + run_once: true set_stats: data: - installer_phase_logging: "In Progress" - aggregate: false + installer_phase_logging: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - name: OpenShift Aggregated Logging hosts: oo_first_master @@ -23,11 +25,13 @@ tasks_from: update_master_config - name: Logging Install Checkpoint End - hosts: oo_all_hosts + hosts: all gather_facts: false tasks: - name: Set Logging install 'Complete' + run_once: true set_stats: data: - installer_phase_logging: "Complete" - aggregate: false + installer_phase_logging: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml index 9c0bd489b..80cd93e5f 100644 --- a/playbooks/common/openshift-cluster/openshift_metrics.yml +++ b/playbooks/common/openshift-cluster/openshift_metrics.yml @@ -1,13 +1,15 @@ --- - name: Metrics Install Checkpoint Start - hosts: oo_all_hosts + hosts: all gather_facts: false tasks: - name: Set Metrics install 'In Progress' + run_once: true set_stats: data: - installer_phase_metrics: "In Progress" - aggregate: false + installer_phase_metrics: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - name: OpenShift Metrics hosts: oo_first_master @@ -24,11 +26,13 @@ tasks_from: update_master_config.yaml - name: Metrics Install Checkpoint End - hosts: oo_all_hosts + hosts: all gather_facts: false tasks: - name: Set Metrics install 'Complete' + run_once: true set_stats: data: - installer_phase_metrics: "Complete" - aggregate: false + installer_phase_metrics: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-cluster/openshift_prometheus.yml b/playbooks/common/openshift-cluster/openshift_prometheus.yml index ac2d250a3..7aa9a16e6 100644 --- a/playbooks/common/openshift-cluster/openshift_prometheus.yml +++ b/playbooks/common/openshift-cluster/openshift_prometheus.yml @@ -1,5 +1,29 @@ --- +- name: Prometheus Install Checkpoint Start + hosts: all + gather_facts: false + tasks: + - name: Set Prometheus install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_prometheus: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + - name: Create Hosted Resources - openshift_prometheus hosts: oo_first_master roles: - role: openshift_prometheus + +- name: Prometheus Install Checkpoint End + hosts: all + gather_facts: false + tasks: + - name: Set Prometheus install 'Complete' + run_once: true + set_stats: + data: + installer_phase_prometheus: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml index 2068ed199..eb225dfb5 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml @@ -1,11 +1,4 @@ --- -- name: Verify OpenShift version is greater than or equal to 1.2 or 3.2 - hosts: oo_first_master - tasks: - - fail: - msg: "The current OpenShift version is less than 1.2/3.2 and does not support CA bundles." - when: not openshift.common.version_gte_3_2_or_1_2 | bool - - name: Check cert expirys hosts: oo_nodes_to_config:oo_masters_to_config:oo_etcd_to_config vars: @@ -43,11 +36,6 @@ when: (g_master_config_output.content|b64decode|from_yaml).oauthConfig.masterCA != 'ca-bundle.crt' - modify_yaml: dest: "{{ openshift.common.config_base }}/master/master-config.yaml" - yaml_key: servingInfo.clientCA - yaml_value: ca.crt - when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca.crt' - - modify_yaml: - dest: "{{ openshift.common.config_base }}/master/master-config.yaml" yaml_key: etcdClientInfo.ca yaml_value: ca-bundle.crt when: @@ -67,6 +55,13 @@ when: - groups.oo_etcd_to_config | default([]) | length == 0 - (g_master_config_output.content|b64decode|from_yaml).etcdConfig.servingInfo.clientCA != 'ca-bundle.crt' + # Set servingInfo.clientCA to client-ca-bundle.crt in order to roll the CA certificate. + # This change will be reverted in playbooks/byo/openshift-cluster/redeploy-certificates.yml + - modify_yaml: + dest: "{{ openshift.common.config_base }}/master/master-config.yaml" + yaml_key: servingInfo.clientCA + yaml_value: client-ca-bundle.crt + when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'client-ca-bundle.crt' - name: Copy current OpenShift CA to legacy directory hosts: oo_masters_to_config @@ -155,6 +150,7 @@ - ca.key - ca-bundle.crt - ca.serial.txt + - client-ca-bundle.crt delegate_to: "{{ openshift_ca_host }}" run_once: true changed_when: false @@ -173,6 +169,7 @@ - ca.key - ca-bundle.crt - ca.serial.txt + - client-ca-bundle.crt - name: Update master client kubeconfig CA data kubeclient_ca: client_path: "{{ openshift.common.config_base }}/master/openshift-master.kubeconfig" diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml index afd5463b2..7e9363c5f 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml @@ -70,9 +70,7 @@ --hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc,docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}" --cert={{ openshift.common.config_base }}/master/registry.crt --key={{ openshift.common.config_base }}/master/registry.key - {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %} --expire-days={{ openshift_hosted_registry_cert_expire_days | default(730) }} - {% endif %} - name: Update registry certificates secret oc_secret: diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml index bd964b2ce..7bb8511f6 100644 --- a/playbooks/common/openshift-cluster/service_catalog.yml +++ b/playbooks/common/openshift-cluster/service_catalog.yml @@ -1,13 +1,15 @@ --- - name: Service Catalog Install Checkpoint Start - hosts: oo_all_hosts + hosts: all gather_facts: false tasks: - name: Set Service Catalog install 'In Progress' + run_once: true set_stats: data: - installer_phase_servicecatalog: "In Progress" - aggregate: false + installer_phase_servicecatalog: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - name: Service Catalog hosts: oo_first_master @@ -19,11 +21,13 @@ first_master: "{{ groups.oo_first_master[0] }}" - name: Service Catalog Install Checkpoint End - hosts: oo_all_hosts + hosts: all gather_facts: false tasks: - name: Set Service Catalog install 'Complete' + run_once: true set_stats: data: - installer_phase_servicecatalog: "Complete" - aggregate: false + installer_phase_servicecatalog: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml index 45b34c8bd..fe376fe31 100644 --- a/playbooks/common/openshift-cluster/std_include.yml +++ b/playbooks/common/openshift-cluster/std_include.yml @@ -1,15 +1,17 @@ --- - name: Initialization Checkpoint Start - hosts: oo_all_hosts + hosts: all gather_facts: false roles: - installer_checkpoint tasks: - name: Set install initialization 'In Progress' + run_once: true set_stats: data: - installer_phase_initialize: "In Progress" - aggregate: false + installer_phase_initialize: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - include: evaluate_groups.yml tags: @@ -36,11 +38,13 @@ - always - name: Initialization Checkpoint End - hosts: oo_all_hosts + hosts: all gather_facts: false tasks: - name: Set install initialization 'Complete' + run_once: true set_stats: data: - installer_phase_initialize: "Complete" - aggregate: false + installer_phase_initialize: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml index 2826951e6..6ad0b6b86 100644 --- a/playbooks/common/openshift-cluster/upgrades/init.yml +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -9,7 +9,12 @@ - name: Ensure firewall is not switched during upgrade hosts: oo_all_hosts + vars: + openshift_master_installed_version: "{{ hostvars[groups.oo_first_master.0].openshift.common.version }}" tasks: + - name: set currently installed version + set_fact: + openshift_currently_installed_version: "{{ openshift_master_installed_version }}" - name: Check if iptables is running command: systemctl status iptables changed_when: false diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index e2d74065a..c634e0ab8 100644 --- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -119,7 +119,6 @@ - name: grep pluginOrderOverride command: grep pluginOrderOverride {{ openshift.common.config_base }}/master/master-config.yaml register: grep_plugin_order_override - when: openshift.common.version_gte_3_3_or_1_3 | bool changed_when: false failed_when: false diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml index 45022cd61..6a5bc24f7 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml @@ -9,16 +9,29 @@ local_facts: ha: "{{ groups.oo_masters_to_config | length > 1 }}" - - name: Ensure HA Master is running - service: - name: "{{ openshift.common.service_type }}-master-api" - state: started - enabled: yes - when: openshift.common.is_containerized | bool + - when: openshift.common.is_containerized | bool + block: + - set_fact: + master_services: + - "{{ openshift.common.service_type }}-master" - - name: Ensure HA Master is running - service: - name: "{{ openshift.common.service_type }}-master-controllers" - state: started - enabled: yes - when: openshift.common.is_containerized | bool + # In case of the non-ha to ha upgrade. + - name: Check if the {{ openshift.common.service_type }}-master-api.service exists + command: > + systemctl list-units {{ openshift.common.service_type }}-master-api.service --no-legend + register: master_api_service_status + + - set_fact: + master_services: + - "{{ openshift.common.service_type }}-master-api" + - "{{ openshift.common.service_type }}-master-controllers" + when: + - master_api_service_status.stdout_lines | length > 0 + - (openshift.common.service_type + '-master-api.service') in master_api_service_status.stdout_lines[0] + + - name: Ensure Master is running + service: + name: "{{ item }}" + state: started + enabled: yes + with_items: "{{ master_services }}" diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml index 142ce5f3d..13fa37b09 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -4,6 +4,12 @@ msg: Verify OpenShift is already installed when: openshift.common.version is not defined +- name: Update oreg_auth docker login credentials if necessary + include_role: + name: docker + tasks_from: registry_auth.yml + when: oreg_auth_user is defined + - name: Verify containers are available for upgrade command: > docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }} @@ -37,7 +43,7 @@ fail: msg: "OpenShift {{ avail_openshift_version }} is available, but {{ openshift_upgrade_target }} or greater is required" when: - - openshift_pkg_version | default('0.0', True) | version_compare(openshift_release, '<') + - (openshift_pkg_version | default('-0.0', True)).split('-')[1] | version_compare(openshift_release, '<') - name: Fail when openshift version does not meet minium requirement for Origin upgrade fail: diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml deleted file mode 100644 index 8cc46ab68..000000000 --- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -# When we update package "a-${version}" and a requires b >= ${version} if we -# don't specify the version of b yum will choose the latest version of b -# available and the whole set of dependencies end up at the latest version. -# Since the package module, unlike the yum module, doesn't flatten a list -# of packages into one transaction we need to do that explicitly. The ansible -# core team tells us not to rely on yum module transaction flattening anyway. - -# TODO: If the sdn package isn't already installed this will install it, we -# should fix that -- name: Upgrade master packages - package: name={{ master_pkgs | join(',') }} state=present - vars: - master_pkgs: - - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" - - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - - PyYAML - when: - - component == "master" - - not openshift.common.is_atomic | bool - -- name: Upgrade node packages - package: name={{ node_pkgs | join(',') }} state=present - vars: - node_pkgs: - - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" - - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - - PyYAML - when: - - component == "node" - - not openshift.common.is_atomic | bool diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index c37a5f9ab..399b818a7 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -3,22 +3,6 @@ # Upgrade Masters ############################################################################### -# oc adm migrate storage should be run prior to etcd v3 upgrade -# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060 -- name: Pre master upgrade - Upgrade all storage - hosts: oo_first_master - tasks: - - name: Upgrade all storage - command: > - {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig - migrate storage --include=* --confirm - register: l_pb_upgrade_control_plane_pre_upgrade_storage - when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool - failed_when: - - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool - - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0 - - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool - # If facts cache were for some reason deleted, this fact may not be set, and if not set # it will always default to true. This causes problems for the etcd data dir fact detection # so we must first make sure this is set correctly before attempting the backup. @@ -48,6 +32,22 @@ - include: create_service_signer_cert.yml +# oc adm migrate storage should be run prior to etcd v3 upgrade +# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060 +- name: Pre master upgrade - Upgrade all storage + hosts: oo_first_master + tasks: + - name: Upgrade all storage + command: > + {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig + migrate storage --include=* --confirm + register: l_pb_upgrade_control_plane_pre_upgrade_storage + when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool + failed_when: + - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool + - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0 + - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool + # Set openshift_master_facts separately. In order to reconcile # admission_config's, we currently must run openshift_master_facts and # then run openshift_facts. @@ -63,13 +63,9 @@ vars: openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" serial: 1 - handlers: - - include: ../../../../roles/openshift_master/handlers/main.yml - static: yes - roles: - - openshift_facts - - lib_utils - post_tasks: + tasks: + - include_role: + name: openshift_facts # Run the pre-upgrade hook if defined: - debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}" @@ -78,52 +74,9 @@ - include: "{{ openshift_master_upgrade_pre_hook }}" when: openshift_master_upgrade_pre_hook is defined - - include: rpm_upgrade.yml component=master - when: not openshift.common.is_containerized | bool - - - include_vars: ../../../../roles/openshift_master_facts/vars/main.yml - - - include: upgrade_scheduler.yml - - - include: "{{ master_config_hook }}" - when: master_config_hook is defined - - - include_vars: ../../../../roles/openshift_master/vars/main.yml - - - name: Remove any legacy systemd units and update systemd units - include: ../../../../roles/openshift_master/tasks/systemd_units.yml - - - name: Check for ca-bundle.crt - stat: - path: "{{ openshift.common.config_base }}/master/ca-bundle.crt" - register: ca_bundle_stat - failed_when: false - - - name: Check for ca.crt - stat: - path: "{{ openshift.common.config_base }}/master/ca.crt" - register: ca_crt_stat - failed_when: false - - - name: Migrate ca.crt to ca-bundle.crt - command: mv ca.crt ca-bundle.crt - args: - chdir: "{{ openshift.common.config_base }}/master" - when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists - - - name: Link ca.crt to ca-bundle.crt - file: - src: "{{ openshift.common.config_base }}/master/ca-bundle.crt" - path: "{{ openshift.common.config_base }}/master/ca.crt" - state: link - when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists - - - name: Update oreg value - yedit: - src: "{{ openshift.common.config_base }}/master/master-config.yaml" - key: 'imageConfig.format' - value: "{{ oreg_url | default(oreg_url_master) }}" - when: oreg_url is defined or oreg_url_master is defined + - include_role: + name: openshift_master + tasks_from: upgrade.yml # Run the upgrade hook prior to restarting services/system if defined: - debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}" @@ -150,7 +103,9 @@ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig migrate storage --include=clusterpolicies --confirm register: l_pb_upgrade_control_plane_post_upgrade_storage - when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool + when: + - openshift_upgrade_post_storage_migration_enabled | default(true) | bool + - openshift_version | version_compare('3.7','<') failed_when: - openshift_upgrade_post_storage_migration_enabled | default(true) | bool - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 @@ -199,7 +154,7 @@ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-roles --additive-only=true --confirm -o name register: reconcile_cluster_role_result - when: not openshift.common.version_gte_3_7 | bool + when: openshift_version | version_compare('3.7','<') changed_when: - reconcile_cluster_role_result.stdout != '' - reconcile_cluster_role_result.rc == 0 @@ -214,7 +169,7 @@ --exclude-groups=system:unauthenticated --exclude-users=system:anonymous --additive-only=true --confirm -o name - when: not openshift.common.version_gte_3_7 | bool + when: openshift_version | version_compare('3.7','<') register: reconcile_bindings_result changed_when: - reconcile_bindings_result.stdout != '' @@ -229,9 +184,10 @@ changed_when: - reconcile_jenkins_role_binding_result.stdout != '' - reconcile_jenkins_role_binding_result.rc == 0 - when: (not openshift.common.version_gte_3_7 | bool) and (openshift.common.version_gte_3_4_or_1_4 | bool) + when: + - openshift_version | version_compare('3.7','<') - - when: (openshift.common.version_gte_3_6 | bool) and (not openshift.common.version_gte_3_7 | bool) + - when: openshift_upgrade_target | version_compare('3.7','<') block: - name: Retrieve shared-resource-viewer oc_obj: @@ -250,7 +206,6 @@ - "'annotations' in objout['results']['results'][0]['metadata']" - "'openshift.io/reconcile-protect' in objout['results']['results'][0]['metadata']['annotations']" - "objout['results']['results'][0]['metadata']['annotations']['openshift.io/reconcile-protect'] == 'true'" - - copy: src: "{{ item }}" dest: "/tmp/{{ item }}" @@ -268,6 +223,12 @@ - "/tmp/{{ __master_shared_resource_viewer_file }}" delete_after: true when: __shared_resource_viewer_protected is not defined + register: result + retries: 3 + delay: 5 + until: result.rc == 0 + ignore_errors: true + - name: Reconcile Security Context Constraints command: > diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml deleted file mode 100644 index 8558bf3e9..000000000 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml +++ /dev/null @@ -1,173 +0,0 @@ ---- -# Upgrade predicates -- vars: - prev_predicates: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}" - prev_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, regions_enabled=False) }}" - default_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', regions_enabled=False) }}" - # older_predicates are the set of predicates that have previously been - # hard-coded into openshift_facts - older_predicates: - - - name: MatchNodeSelector - - name: PodFitsResources - - name: PodFitsPorts - - name: NoDiskConflict - - name: NoVolumeZoneConflict - - name: MaxEBSVolumeCount - - name: MaxGCEPDVolumeCount - - name: Region - argument: - serviceAffinity: - labels: - - region - - - name: MatchNodeSelector - - name: PodFitsResources - - name: PodFitsPorts - - name: NoDiskConflict - - name: NoVolumeZoneConflict - - name: Region - argument: - serviceAffinity: - labels: - - region - - - name: MatchNodeSelector - - name: PodFitsResources - - name: PodFitsPorts - - name: NoDiskConflict - - name: Region - argument: - serviceAffinity: - labels: - - region - # older_predicates_no_region are the set of predicates that have previously - # been hard-coded into openshift_facts, with the Region predicate removed - older_predicates_no_region: - - - name: MatchNodeSelector - - name: PodFitsResources - - name: PodFitsPorts - - name: NoDiskConflict - - name: NoVolumeZoneConflict - - name: MaxEBSVolumeCount - - name: MaxGCEPDVolumeCount - - - name: MatchNodeSelector - - name: PodFitsResources - - name: PodFitsPorts - - name: NoDiskConflict - - name: NoVolumeZoneConflict - - - name: MatchNodeSelector - - name: PodFitsResources - - name: PodFitsPorts - - name: NoDiskConflict - block: - - # Handle case where openshift_master_predicates is defined - - block: - - debug: - msg: "WARNING: openshift_master_scheduler_predicates is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_predicates }}" - when: openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region] - - - debug: - msg: "WARNING: openshift_master_scheduler_predicates does not match current defaults of: {{ openshift_master_scheduler_default_predicates }}" - when: openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates - when: openshift_master_scheduler_predicates | default(none) is not none - - # Handle cases where openshift_master_predicates is not defined - - block: - - debug: - msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler predicates: {{ openshift_master_scheduler_current_predicates }}\ncurrent scheduler default predicates are: {{ openshift_master_scheduler_default_predicates }}" - when: - - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates - - openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates] - - - set_fact: - openshift_upgrade_scheduler_predicates: "{{ openshift_master_scheduler_default_predicates }}" - when: - - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates - - openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates] - - - set_fact: - openshift_upgrade_scheduler_predicates: "{{ default_predicates_no_region }}" - when: - - openshift_master_scheduler_current_predicates != default_predicates_no_region - - openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region] - - when: openshift_master_scheduler_predicates | default(none) is none - - -# Upgrade priorities -- vars: - prev_priorities: "{{ lookup('openshift_master_facts_default_priorities', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}" - prev_priorities_no_zone: "{{ lookup('openshift_master_facts_default_priorities', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, zones_enabled=False) }}" - default_priorities_no_zone: "{{ lookup('openshift_master_facts_default_priorities', zones_enabled=False) }}" - # older_priorities are the set of priorities that have previously been - # hard-coded into openshift_facts - older_priorities: - - - name: LeastRequestedPriority - weight: 1 - - name: SelectorSpreadPriority - weight: 1 - - name: Zone - weight: 2 - argument: - serviceAntiAffinity: - label: zone - # older_priorities_no_region are the set of priorities that have previously - # been hard-coded into openshift_facts, with the Zone priority removed - older_priorities_no_zone: - - - name: LeastRequestedPriority - weight: 1 - - name: SelectorSpreadPriority - weight: 1 - block: - - # Handle case where openshift_master_priorities is defined - - block: - - debug: - msg: "WARNING: openshift_master_scheduler_priorities is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_priorities }}" - when: openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone] - - - debug: - msg: "WARNING: openshift_master_scheduler_priorities does not match current defaults of: {{ openshift_master_scheduler_default_priorities }}" - when: openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities - when: openshift_master_scheduler_priorities | default(none) is not none - - # Handle cases where openshift_master_priorities is not defined - - block: - - debug: - msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler priorities: {{ openshift_master_scheduler_current_priorities }}\ncurrent scheduler default priorities are: {{ openshift_master_scheduler_default_priorities }}" - when: - - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities - - openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities] - - - set_fact: - openshift_upgrade_scheduler_priorities: "{{ openshift_master_scheduler_default_priorities }}" - when: - - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities - - openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities] - - - set_fact: - openshift_upgrade_scheduler_priorities: "{{ default_priorities_no_zone }}" - when: - - openshift_master_scheduler_current_priorities != default_priorities_no_zone - - openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone] - - when: openshift_master_scheduler_priorities | default(none) is none - - -# Update scheduler -- vars: - scheduler_config: - kind: Policy - apiVersion: v1 - predicates: "{{ openshift_upgrade_scheduler_predicates - | default(openshift_master_scheduler_current_predicates) }}" - priorities: "{{ openshift_upgrade_scheduler_priorities - | default(openshift_master_scheduler_current_priorities) }}" - block: - - name: Update scheduler config - copy: - content: "{{ scheduler_config | to_nice_json }}" - dest: "{{ openshift_master_scheduler_conf }}" - backup: true - when: > - openshift_upgrade_scheduler_predicates is defined or - openshift_upgrade_scheduler_priorities is defined diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml deleted file mode 100644 index 5e7a66171..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml +++ /dev/null @@ -1,66 +0,0 @@ ---- -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.acceptContentTypes' - yaml_value: 'application/vnd.kubernetes.protobuf,application/json' - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.contentType' - yaml_value: 'application/vnd.kubernetes.protobuf' - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.burst' - yaml_value: 400 - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.qps' - yaml_value: 200 - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.acceptContentTypes' - yaml_value: 'application/vnd.kubernetes.protobuf,application/json' - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.contentType' - yaml_value: 'application/vnd.kubernetes.protobuf' - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.burst' - yaml_value: 600 - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.qps' - yaml_value: 300 - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'controllerConfig.serviceServingCert.signer.certFile' - yaml_value: service-signer.crt - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile' - yaml_value: service-signer.key - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'admissionConfig.pluginConfig' - yaml_value: "{{ openshift.master.admission_plugin_config }}" - when: "'admission_plugin_config' in openshift.master" - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'admissionConfig.pluginOrderOverride' - yaml_value: - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'kubernetesMasterConfig.admissionConfig' - yaml_value: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml deleted file mode 100644 index 89b524f14..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- modify_yaml: - dest: "{{ openshift.common.config_base}}/node/node-config.yaml" - yaml_key: 'masterClientConnectionOverrides.acceptContentTypes' - yaml_value: 'application/vnd.kubernetes.protobuf,application/json' - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/node/node-config.yaml" - yaml_key: 'masterClientConnectionOverrides.contentType' - yaml_value: 'application/vnd.kubernetes.protobuf' - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/node/node-config.yaml" - yaml_key: 'masterClientConnectionOverrides.burst' - yaml_value: 40 - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/node/node-config.yaml" - yaml_key: 'masterClientConnectionOverrides.qps' - yaml_value: 20 diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/roles b/playbooks/common/openshift-cluster/upgrades/v3_3/roles deleted file mode 120000 index 6bc1a7aef..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/roles +++ /dev/null @@ -1 +0,0 @@ -../../../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml deleted file mode 100644 index a241ef039..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml +++ /dev/null @@ -1,118 +0,0 @@ ---- -# -# Full Control Plane + Nodes Upgrade -# -- include: ../init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" - -# Pre-upgrade - -- include: ../initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos and initialize facts on all hosts - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../disable_master_excluders.yml - tags: - - pre_upgrade - -- include: ../disable_node_excluders.yml - tags: - - pre_upgrade - -- include: ../../initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- name: Verify upgrade targets - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tasks: - - include: ../pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- name: Verify docker upgrade targets - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - include: ../pre/tasks/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - include: ../cleanup_unused_images.yml - -- include: ../upgrade_control_plane.yml - vars: - master_config_hook: "v3_3/master_config_upgrade.yml" - -- include: ../upgrade_nodes.yml - vars: - node_config_hook: "v3_3/node_config_upgrade.yml" - -- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml deleted file mode 100644 index f64f0e003..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml +++ /dev/null @@ -1,119 +0,0 @@ ---- -# -# Control Plane Upgrade Playbook -# -# Upgrades masters and Docker (only on standalone etcd hosts) -# -# This upgrade does not include: -# - node service running on masters -# - docker running on masters -# - node service running on dedicated nodes -# -# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. -# -- include: ../init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" - -# Pre-upgrade -- include: ../initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on control plane hosts - hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../disable_master_excluders.yml - tags: - - pre_upgrade - -- include: ../../initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - openshift_install_base_package_group: "oo_masters_to_config" - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- name: Verify upgrade targets - hosts: oo_masters_to_config - tasks: - - include: ../pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- name: Verify docker upgrade targets - hosts: oo_masters_to_config:oo_etcd_to_config - tasks: - - include: ../pre/tasks/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_etcd_to_config - tasks: - - include: ../cleanup_unused_images.yml - -- include: ../upgrade_control_plane.yml - vars: - master_config_hook: "v3_3/master_config_upgrade.yml" - -- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml deleted file mode 100644 index cee4e9087..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml +++ /dev/null @@ -1,113 +0,0 @@ ---- -# -# Node Upgrade Playbook -# -# Upgrades nodes only, but requires the control plane to have already been upgraded. -# -- include: ../init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" - -# Pre-upgrade -- include: ../initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on nodes - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - roles: - - openshift_repos - tags: - - pre_upgrade - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../disable_node_excluders.yml - tags: - - pre_upgrade - -- include: ../../initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- name: Verify masters are already upgraded - hosts: oo_masters_to_config - tags: - - pre_upgrade - tasks: - - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." - when: openshift.common.version != openshift_version - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- name: Verify upgrade targets - hosts: oo_nodes_to_upgrade - tasks: - - include: ../pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- name: Verify docker upgrade targets - hosts: oo_nodes_to_upgrade - tasks: - - include: ../pre/tasks/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_nodes_to_upgrade - tasks: - - include: ../cleanup_unused_images.yml - -- include: ../upgrade_nodes.yml - vars: - node_config_hook: "v3_3/node_config_upgrade.yml" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml deleted file mode 100644 index 52458e03c..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'controllerConfig.serviceServingCert.signer.certFile' - yaml_value: service-signer.crt - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile' - yaml_value: service-signer.key diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/roles b/playbooks/common/openshift-cluster/upgrades/v3_4/roles deleted file mode 120000 index 6bc1a7aef..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/roles +++ /dev/null @@ -1 +0,0 @@ -../../../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml deleted file mode 100644 index ae217ba2e..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml +++ /dev/null @@ -1,116 +0,0 @@ ---- -# -# Full Control Plane + Nodes Upgrade -# -- include: ../init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - -# Pre-upgrade - -- include: ../initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos and initialize facts on all hosts - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../disable_master_excluders.yml - tags: - - pre_upgrade - -- include: ../disable_node_excluders.yml - tags: - - pre_upgrade - -- include: ../../initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- name: Verify upgrade targets - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tasks: - - include: ../pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- name: Verify docker upgrade targets - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - include: ../pre/tasks/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - include: ../cleanup_unused_images.yml - -- include: ../upgrade_control_plane.yml - vars: - master_config_hook: "v3_4/master_config_upgrade.yml" - -- include: ../upgrade_nodes.yml - -- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml deleted file mode 100644 index 43da5b629..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml +++ /dev/null @@ -1,119 +0,0 @@ ---- -# -# Control Plane Upgrade Playbook -# -# Upgrades masters and Docker (only on standalone etcd hosts) -# -# This upgrade does not include: -# - node service running on masters -# - docker running on masters -# - node service running on dedicated nodes -# -# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. -# -- include: ../init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - -# Pre-upgrade -- include: ../initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on control plane hosts - hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../disable_master_excluders.yml - tags: - - pre_upgrade - -- include: ../../initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - openshift_install_base_package_group: "oo_masters_to_config" - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- name: Verify upgrade targets - hosts: oo_masters_to_config - tasks: - - include: ../pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- name: Verify docker upgrade targets - hosts: oo_masters_to_config:oo_etcd_to_config - tasks: - - include: ../pre/tasks/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_etcd_to_config - tasks: - - include: ../cleanup_unused_images.yml - -- include: ../upgrade_control_plane.yml - vars: - master_config_hook: "v3_4/master_config_upgrade.yml" - -- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml deleted file mode 100644 index 8531e6045..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml +++ /dev/null @@ -1,111 +0,0 @@ ---- -# -# Node Upgrade Playbook -# -# Upgrades nodes only, but requires the control plane to have already been upgraded. -# -- include: ../init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - -# Pre-upgrade -- include: ../initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on nodes - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - roles: - - openshift_repos - tags: - - pre_upgrade - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../disable_node_excluders.yml - tags: - - pre_upgrade - -- include: ../../initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- name: Verify masters are already upgraded - hosts: oo_masters_to_config - tags: - - pre_upgrade - tasks: - - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." - when: openshift.common.version != openshift_version - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- name: Verify upgrade targets - hosts: oo_nodes_to_upgrade - tasks: - - include: ../pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- name: Verify docker upgrade targets - hosts: oo_nodes_to_upgrade - tasks: - - include: ../pre/tasks/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_nodes_to_upgrade - tasks: - - include: ../cleanup_unused_images.yml - -- include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml deleted file mode 100644 index 52458e03c..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'controllerConfig.serviceServingCert.signer.certFile' - yaml_value: service-signer.crt - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile' - yaml_value: service-signer.key diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml deleted file mode 100644 index ae63c9ca9..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml +++ /dev/null @@ -1,67 +0,0 @@ ---- -############################################################################### -# Pre upgrade checks for known data problems, if this playbook fails you should -# contact support. If you're not supported contact users@lists.openshift.com -# -# oc_objectvalidator provides these two checks -# 1 - SDN Data issues, never seen in the wild but known possible due to code audits -# https://github.com/openshift/origin/issues/12697 -# 2 - Namespace protections, https://bugzilla.redhat.com/show_bug.cgi?id=1428934 -# -############################################################################### -- name: Verify 3.5 specific upgrade checks - hosts: oo_first_master - roles: - - { role: lib_openshift } - tasks: - - name: Check for invalid namespaces and SDN errors - oc_objectvalidator: - - # What's all this PetSet business about? - # - # 'PetSets' were ALPHA resources in Kube <= 3.4. In >= 3.5 they are - # no longer supported. The BETA resource 'StatefulSets' replaces - # them. We can't migrate clients PetSets to - # StatefulSets. Additionally, Red Hat has never officially supported - # these resource types. Sorry users, but if you were using - # unsupported resources from the Kube documentation then we can't - # help you at this time. - # - # Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1428229 - - name: Check if legacy PetSets exist - oc_obj: - state: list - all_namespaces: true - kind: petsets - register: l_do_petsets_exist - - - name: Fail on unsupported resource migration 'PetSets' - fail: - msg: > - PetSet objects were detected in your cluster. These are an - Alpha feature in upstream Kubernetes 1.4 and are not supported - by Red Hat. In Kubernetes 1.5, they are replaced by the Beta - feature StatefulSets. Red Hat currently does not offer support - for either PetSets or StatefulSets. - - Automatically migrating PetSets to StatefulSets in OpenShift - Container Platform (OCP) 3.5 is not supported. See the - Kubernetes "Upgrading from PetSets to StatefulSets" - documentation for additional information: - - https://kubernetes.io/docs/tasks/manage-stateful-set/upgrade-pet-set-to-stateful-set/ - - PetSets MUST be removed before upgrading to OCP 3.5. Red Hat - strongly recommends reading the above referenced documentation - in its entirety before taking any destructive actions. - - If you want to simply remove all PetSets without manually - migrating to StatefulSets, run this command as a user with - cluster-admin privileges: - - $ oc get petsets --all-namespaces -o yaml | oc delete -f - --cascade=false - when: - # Search did not fail, valid resource type found - - l_do_petsets_exist.results.returncode == 0 - # Items do exist in the search results - - l_do_petsets_exist.results.results.0['items'] | length > 0 diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml deleted file mode 100644 index db0c8f886..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'controllerConfig.serviceServingCert.signer.certFile' - yaml_value: service-signer.crt - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile' - yaml_value: service-signer.key - -- modify_yaml: - dest: "{{ openshift.common.config_base }}/master/master-config.yaml" - yaml_key: servingInfo.clientCA - yaml_value: ca.crt diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml index 920dc2ffc..dd109cfa9 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -116,6 +116,8 @@ - include: ../cleanup_unused_images.yml - include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_6/master_config_upgrade.yml" - include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index 27d8515dc..8ab68002d 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -76,7 +76,6 @@ # defined, and overriding the normal behavior of protecting the installed version openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - openshift_install_base_package_group: "oo_masters_to_config" # We skip the docker role at this point in upgrade to prevent # unintended package, container, or config upgrades which trigger diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml index bf3b94682..f4862e321 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml @@ -119,13 +119,13 @@ tasks: - include: ../cleanup_unused_images.yml -#TODO: Why doesn't this compose using ./upgrade_control_plane rather than -# ../upgrade_control_plane? - include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_7/master_config_upgrade.yml" # All controllers must be stopped at the same time then restarted - name: Cycle all controller services to force new leader election mode - hosts: oo_etcd_to_config + hosts: oo_masters_to_config gather_facts: no tasks: - name: Stop {{ openshift.common.service_type }}-master-controllers diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml index b91bea617..b905d6d86 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -80,7 +80,6 @@ # defined, and overriding the normal behavior of protecting the installed version openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - openshift_install_base_package_group: "oo_masters_to_config" # We skip the docker role at this point in upgrade to prevent # unintended package, container, or config upgrades which trigger @@ -130,7 +129,7 @@ # All controllers must be stopped at the same time then restarted - name: Cycle all controller services to force new leader election mode - hosts: oo_etcd_to_config + hosts: oo_masters_to_config gather_facts: no tasks: - name: Stop {{ openshift.common.service_type }}-master-controllers diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml index f76fc68d1..74d0cd8ad 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml @@ -11,13 +11,15 @@ tasks: - name: Check for invalid namespaces and SDN errors oc_objectvalidator: - + # DO NOT DISABLE THIS, YOUR UPGRADE WILL FAIL IF YOU DO SO - name: Confirm OpenShift authorization objects are in sync command: > {{ openshift.common.client_binary }} adm migrate authorization - when: not openshift.common.version_gte_3_7 | bool + when: + - openshift_currently_installed_version | version_compare('3.7','<') + - openshift_upgrade_pre_authorization_migration_enabled | default(true) | bool changed_when: false register: l_oc_result until: l_oc_result.rc == 0 - retries: 4 + retries: 2 delay: 15 diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_8/filter_plugins index 7de3c1dd7..7de3c1dd7 120000 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/filter_plugins +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/filter_plugins diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/master_config_upgrade.yml index 1d4d1919c..1d4d1919c 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/master_config_upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/roles b/playbooks/common/openshift-cluster/upgrades/v3_8/roles index 415645be6..415645be6 120000 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/roles +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/roles diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml index 30e719d8f..b3162bd5f 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml @@ -12,8 +12,8 @@ - pre_upgrade tasks: - set_fact: - openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_target: '3.8' + openshift_upgrade_min: '3.7' # Pre-upgrade @@ -21,6 +21,10 @@ tags: - pre_upgrade +- include: ../pre/verify_etcd3_backend.yml + tags: + - pre_upgrade + - name: Update repos and initialize facts on all hosts hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config tags: @@ -47,6 +51,10 @@ tags: - pre_upgrade +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + - include: ../pre/verify_control_plane_running.yml tags: - pre_upgrade @@ -112,6 +120,22 @@ - include: ../cleanup_unused_images.yml - include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_7/master_config_upgrade.yml" + +# All controllers must be stopped at the same time then restarted +- name: Cycle all controller services to force new leader election mode + hosts: oo_masters_to_config + gather_facts: no + tasks: + - name: Stop {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: stopped + - name: Start {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: started - include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml index e9cec9220..3df5b17b5 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml @@ -21,14 +21,18 @@ - pre_upgrade tasks: - set_fact: - openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_target: '3.8' + openshift_upgrade_min: '3.7' # Pre-upgrade - include: ../initialize_nodes_to_upgrade.yml tags: - pre_upgrade +- include: ../pre/verify_etcd3_backend.yml + tags: + - pre_upgrade + - name: Update repos on control plane hosts hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config tags: @@ -55,6 +59,10 @@ tags: - pre_upgrade +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + - include: ../pre/verify_control_plane_running.yml tags: - pre_upgrade @@ -72,7 +80,6 @@ # defined, and overriding the normal behavior of protecting the installed version openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - openshift_install_base_package_group: "oo_masters_to_config" # We skip the docker role at this point in upgrade to prevent # unintended package, container, or config upgrades which trigger @@ -118,6 +125,20 @@ - include: ../upgrade_control_plane.yml vars: - master_config_hook: "v3_5/master_config_upgrade.yml" + master_config_hook: "v3_7/master_config_upgrade.yml" + +# All controllers must be stopped at the same time then restarted +- name: Cycle all controller services to force new leader election mode + hosts: oo_masters_to_config + gather_facts: no + tasks: + - name: Stop {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: stopped + - name: Start {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: started - include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml index e29d0f8e6..f3d192ba7 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml @@ -14,8 +14,8 @@ - pre_upgrade tasks: - set_fact: - openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_target: '3.8' + openshift_upgrade_min: '3.7' # Pre-upgrade - include: ../initialize_nodes_to_upgrade.yml @@ -48,6 +48,10 @@ tags: - pre_upgrade +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + - include: ../disable_node_excluders.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/validator.yml new file mode 100644 index 000000000..d8540abfb --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/validator.yml @@ -0,0 +1,7 @@ +--- +- name: Verify 3.8 specific upgrade checks + hosts: oo_first_master + roles: + - { role: lib_openshift } + tasks: + - debug: msg="noop" diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml index 48d46bbb0..3fe483785 100644 --- a/playbooks/common/openshift-etcd/config.yml +++ b/playbooks/common/openshift-etcd/config.yml @@ -1,13 +1,15 @@ --- - name: etcd Install Checkpoint Start - hosts: oo_all_hosts + hosts: all gather_facts: false tasks: - name: Set etcd install 'In Progress' + run_once: true set_stats: data: - installer_phase_etcd: "In Progress" - aggregate: false + installer_phase_etcd: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - include: ca.yml @@ -26,11 +28,13 @@ - role: nickhammond.logrotate - name: etcd Install Checkpoint End - hosts: oo_all_hosts + hosts: all gather_facts: false tasks: - name: Set etcd install 'Complete' + run_once: true set_stats: data: - installer_phase_etcd: "Complete" - aggregate: false + installer_phase_etcd: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-etcd/embedded2external.yml b/playbooks/common/openshift-etcd/embedded2external.yml index 9264f3c32..b16b78c4f 100644 --- a/playbooks/common/openshift-etcd/embedded2external.yml +++ b/playbooks/common/openshift-etcd/embedded2external.yml @@ -158,7 +158,7 @@ tasks_from: configure_external_etcd vars: etcd_peer_url_scheme: "https" - etcd_ip: "{{ openshift.common.ip }}" + etcd_ip: "{{ hostvars[groups.oo_etcd_to_config.0].openshift.common.ip }}" etcd_peer_port: 2379 # 9. start the master diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml index 80cda9e21..19e14ab3e 100644 --- a/playbooks/common/openshift-glusterfs/config.yml +++ b/playbooks/common/openshift-glusterfs/config.yml @@ -1,13 +1,15 @@ --- - name: GlusterFS Install Checkpoint Start - hosts: oo_all_hosts + hosts: all gather_facts: false tasks: - name: Set GlusterFS install 'In Progress' + run_once: true set_stats: data: - installer_phase_glusterfs: "In Progress" - aggregate: false + installer_phase_glusterfs: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - name: Open firewall ports for GlusterFS nodes hosts: glusterfs @@ -17,6 +19,11 @@ tasks_from: firewall.yml when: - openshift_storage_glusterfs_is_native | default(True) | bool + - include_role: + name: openshift_storage_glusterfs + tasks_from: kernel_modules.yml + when: + - openshift_storage_glusterfs_is_native | default(True) | bool - name: Open firewall ports for GlusterFS registry nodes hosts: glusterfs_registry @@ -26,6 +33,11 @@ tasks_from: firewall.yml when: - openshift_storage_glusterfs_registry_is_native | default(True) | bool + - include_role: + name: openshift_storage_glusterfs + tasks_from: kernel_modules.yml + when: + - openshift_storage_glusterfs_registry_is_native | default(True) | bool - name: Configure GlusterFS hosts: oo_first_master @@ -36,11 +48,13 @@ when: groups.oo_glusterfs_to_config | default([]) | count > 0 - name: GlusterFS Install Checkpoint End - hosts: oo_all_hosts + hosts: all gather_facts: false tasks: - name: Set GlusterFS install 'Complete' + run_once: true set_stats: data: - installer_phase_glusterfs: "Complete" - aggregate: false + installer_phase_glusterfs: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml index 2a703cb61..d737b836b 100644 --- a/playbooks/common/openshift-loadbalancer/config.yml +++ b/playbooks/common/openshift-loadbalancer/config.yml @@ -1,13 +1,15 @@ --- - name: Load Balancer Install Checkpoint Start - hosts: oo_all_hosts + hosts: all gather_facts: false tasks: - name: Set load balancer install 'In Progress' + run_once: true set_stats: data: - installer_phase_loadbalancer: "In Progress" - aggregate: false + installer_phase_loadbalancer: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - name: Configure firewall and docker for load balancers hosts: oo_lb_to_config:!oo_masters_to_config:!oo_nodes_to_config @@ -37,11 +39,13 @@ - role: tuned - name: Load Balancer Install Checkpoint End - hosts: oo_all_hosts + hosts: all gather_facts: false tasks: - name: Set load balancer install 'Complete' + run_once: true set_stats: data: - installer_phase_loadbalancer: "Complete" - aggregate: false + installer_phase_loadbalancer: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-management/add_container_provider.yml b/playbooks/common/openshift-management/add_container_provider.yml new file mode 100644 index 000000000..facb3a5b9 --- /dev/null +++ b/playbooks/common/openshift-management/add_container_provider.yml @@ -0,0 +1,8 @@ +--- +- name: Add Container Provider to Management + hosts: oo_first_master + tasks: + - name: Run the Management Integration Tasks + include_role: + name: openshift_management + tasks_from: add_container_provider diff --git a/playbooks/common/openshift-management/config.yml b/playbooks/common/openshift-management/config.yml index 908679e81..3f1cdf713 100644 --- a/playbooks/common/openshift-management/config.yml +++ b/playbooks/common/openshift-management/config.yml @@ -1,13 +1,15 @@ --- - name: Management Install Checkpoint Start - hosts: oo_all_hosts + hosts: all gather_facts: false tasks: - name: Set Management install 'In Progress' + run_once: true set_stats: data: - installer_phase_management: "In Progress" - aggregate: false + installer_phase_management: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - name: Setup CFME hosts: oo_first_master @@ -25,11 +27,13 @@ template_dir: "{{ hostvars[groups.masters.0].r_openshift_management_mktemp.stdout }}" - name: Management Install Checkpoint End - hosts: oo_all_hosts + hosts: all gather_facts: false tasks: - name: Set Management install 'Complete' + run_once: true set_stats: data: - installer_phase_management: "Complete" - aggregate: false + installer_phase_management: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-management/uninstall.yml b/playbooks/common/openshift-management/uninstall.yml index 698d93405..9f35cc276 100644 --- a/playbooks/common/openshift-management/uninstall.yml +++ b/playbooks/common/openshift-management/uninstall.yml @@ -1,6 +1,6 @@ --- - name: Uninstall CFME - hosts: masters + hosts: masters[0] tasks: - name: Run the CFME Uninstall Role Tasks include_role: diff --git a/playbooks/common/openshift-master/additional_config.yml b/playbooks/common/openshift-master/additional_config.yml index e423d6ce9..32f638d42 100644 --- a/playbooks/common/openshift-master/additional_config.yml +++ b/playbooks/common/openshift-master/additional_config.yml @@ -1,13 +1,15 @@ --- - name: Master Additional Install Checkpoint Start - hosts: oo_all_hosts + hosts: all gather_facts: false tasks: - name: Set Master Additional install 'In Progress' + run_once: true set_stats: data: - installer_phase_master_additional: "In Progress" - aggregate: false + installer_phase_master_additional: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - name: Additional master configuration hosts: oo_first_master @@ -30,7 +32,7 @@ when: openshift_use_manageiq | default(true) | bool - role: cockpit when: - - openshift.common.is_atomic + - not openshift.common.is_atomic | bool - deployment_type == 'openshift-enterprise' - osm_use_cockpit is undefined or osm_use_cockpit | bool - openshift.common.deployment_subtype != 'registry' @@ -38,11 +40,13 @@ when: openshift_use_flannel | default(false) | bool - name: Master Additional Install Checkpoint End - hosts: oo_all_hosts + hosts: all gather_facts: false tasks: - name: Set Master Additional install 'Complete' + run_once: true set_stats: data: - installer_phase_master_additional: "Complete" - aggregate: false + installer_phase_master_additional: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index b359919ba..6b0fd6b7c 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -1,13 +1,15 @@ --- - name: Master Install Checkpoint Start - hosts: oo_all_hosts + hosts: all gather_facts: false tasks: - name: Set Master install 'In Progress' + run_once: true set_stats: data: - installer_phase_master: "In Progress" - aggregate: false + installer_phase_master: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - include: certificates.yml @@ -212,6 +214,12 @@ tasks_from: master when: openshift_use_kuryr | default(false) | bool + - name: Setup the node group config maps + include_role: + name: openshift_node_group + when: openshift_master_bootstrap_enabled | default(false) | bool + run_once: True + post_tasks: - name: Create group for deployment type group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }} @@ -232,11 +240,13 @@ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" - name: Master Install Checkpoint End - hosts: oo_all_hosts + hosts: all gather_facts: false tasks: - name: Set Master install 'Complete' + run_once: true set_stats: data: - installer_phase_master: "Complete" - aggregate: false + installer_phase_master: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js b/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js deleted file mode 100644 index d0a9f11dc..000000000 --- a/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js +++ /dev/null @@ -1,2 +0,0 @@ -// empty file so that the master-config can still point to a file that exists -// this file will be replaced by the template service broker role if enabled diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml index 4f8b758fd..4e1b3a3be 100644 --- a/playbooks/common/openshift-master/restart_services.yml +++ b/playbooks/common/openshift-master/restart_services.yml @@ -1,22 +1,4 @@ --- -- name: Restart master API - service: - name: "{{ openshift.common.service_type }}-master-api" - state: restarted - when: openshift_master_ha | bool -- name: Wait for master API to come back online - wait_for: - host: "{{ openshift.common.hostname }}" - state: started - delay: 10 - port: "{{ openshift.master.api_port }}" - timeout: 600 - when: openshift_master_ha | bool -- name: Restart master controllers - service: - name: "{{ openshift.common.service_type }}-master-controllers" - state: restarted - # Ignore errrors since it is possible that type != simple for - # pre-3.1.1 installations. - ignore_errors: true - when: openshift_master_ha | bool +- include_role: + name: openshift_master + tasks_from: restart.yml diff --git a/playbooks/common/openshift-master/revert-client-ca.yml b/playbooks/common/openshift-master/revert-client-ca.yml new file mode 100644 index 000000000..9ae23bf5b --- /dev/null +++ b/playbooks/common/openshift-master/revert-client-ca.yml @@ -0,0 +1,17 @@ +--- +- name: Set servingInfo.clientCA = ca.crt in master config + hosts: oo_masters_to_config + tasks: + - name: Read master config + slurp: + src: "{{ openshift.common.config_base }}/master/master-config.yaml" + register: g_master_config_output + + # servingInfo.clientCA may be set as the client-ca-bundle.crt from + # CA redeployment and this task reverts that change. + - name: Set servingInfo.clientCA = ca.crt in master config + modify_yaml: + dest: "{{ openshift.common.config_base }}/master/master-config.yaml" + yaml_key: servingInfo.clientCA + yaml_value: ca.crt + when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca.crt' diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml index f4dc9df8a..4c415ebce 100644 --- a/playbooks/common/openshift-master/scaleup.yml +++ b/playbooks/common/openshift-master/scaleup.yml @@ -22,16 +22,17 @@ - name: restart master api service: name={{ openshift.common.service_type }}-master-controllers state=restarted notify: verify api server + # We retry the controllers because the API may not be 100% initialized yet. - name: restart master controllers - service: name={{ openshift.common.service_type }}-master-controllers state=restarted + command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" + retries: 3 + delay: 5 + register: result + until: result.rc == 0 - name: verify api server command: > curl --silent --tlsv1.2 - {% if openshift.common.version_gte_3_2_or_1_2 | bool %} --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt - {% else %} - --cacert {{ openshift.common.config_base }}/master/ca.crt - {% endif %} {{ openshift.master.api_url }}/healthz/ready args: # Disables the following warning: diff --git a/playbooks/common/openshift-master/tasks/wire_aggregator.yml b/playbooks/common/openshift-master/tasks/wire_aggregator.yml index 560eea785..97acc5d5d 100644 --- a/playbooks/common/openshift-master/tasks/wire_aggregator.yml +++ b/playbooks/common/openshift-master/tasks/wire_aggregator.yml @@ -136,9 +136,15 @@ when: - not front_proxy_kubeconfig.stat.exists -- name: copy tech preview extension file for service console UI - copy: - src: openshift-ansible-catalog-console.js +- name: Delete temp directory + file: + name: "{{ certtemp.stdout }}" + state: absent + changed_when: False + +- name: Setup extension file for service console UI + template: + src: ../templates/openshift-ansible-catalog-console.js dest: /etc/origin/master/openshift-ansible-catalog-console.js - name: Update master config @@ -179,8 +185,13 @@ - yedit_output.changed - openshift.master.cluster_method == 'native' +# We retry the controllers because the API may not be 100% initialized yet. - name: restart master controllers - systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted + command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" + retries: 3 + delay: 5 + register: result + until: result.rc == 0 when: - yedit_output.changed - openshift.master.cluster_method == 'native' @@ -190,11 +201,7 @@ # wait_for port doesn't provide health information. command: > curl --silent --tlsv1.2 - {% if openshift.common.version_gte_3_2_or_1_2 | bool %} --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt - {% else %} - --cacert {{ openshift.common.config_base }}/master/ca.crt - {% endif %} {{ openshift.master.api_url }}/healthz/ready args: # Disables the following warning: @@ -207,9 +214,3 @@ changed_when: false when: - yedit_output.changed - -- name: Delete temp directory - file: - name: "{{ certtemp.stdout }}" - state: absent - changed_when: False diff --git a/playbooks/common/openshift-master/templates/openshift-ansible-catalog-console.js b/playbooks/common/openshift-master/templates/openshift-ansible-catalog-console.js new file mode 100644 index 000000000..fd02325ba --- /dev/null +++ b/playbooks/common/openshift-master/templates/openshift-ansible-catalog-console.js @@ -0,0 +1 @@ +window.OPENSHIFT_CONSTANTS.TEMPLATE_SERVICE_BROKER_ENABLED = {{ 'true' if (template_service_broker_install | default(True)) else 'false' }}; diff --git a/playbooks/common/openshift-nfs/config.yml b/playbooks/common/openshift-nfs/config.yml index ce672daf5..6ea77e00b 100644 --- a/playbooks/common/openshift-nfs/config.yml +++ b/playbooks/common/openshift-nfs/config.yml @@ -1,13 +1,15 @@ --- - name: NFS Install Checkpoint Start - hosts: oo_all_hosts + hosts: all gather_facts: false tasks: - name: Set NFS install 'In Progress' + run_once: true set_stats: data: - installer_phase_nfs: "In Progress" - aggregate: false + installer_phase_nfs: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - name: Configure nfs hosts: oo_nfs_to_config @@ -16,11 +18,13 @@ - role: openshift_storage_nfs - name: NFS Install Checkpoint End - hosts: oo_all_hosts + hosts: all gather_facts: false tasks: - name: Set NFS install 'Complete' + run_once: true set_stats: data: - installer_phase_nfs: "Complete" - aggregate: false + installer_phase_nfs: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-node/clean_image.yml b/playbooks/common/openshift-node/clean_image.yml new file mode 100644 index 000000000..38753d0af --- /dev/null +++ b/playbooks/common/openshift-node/clean_image.yml @@ -0,0 +1,10 @@ +--- +- name: Configure nodes + hosts: oo_nodes_to_config:!oo_containerized_master_nodes + tasks: + - name: Remove any ansible facts created during AMI creation + file: + path: "/etc/ansible/facts.d/{{ item }}" + state: absent + with_items: + - openshift.fact diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 4f8f98aef..28e3c1b1b 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -1,13 +1,15 @@ --- - name: Node Install Checkpoint Start - hosts: oo_all_hosts + hosts: all gather_facts: false tasks: - name: Set Node install 'In Progress' + run_once: true set_stats: data: - installer_phase_node: "In Progress" - aggregate: false + installer_phase_node: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - include: certificates.yml @@ -24,11 +26,13 @@ - include: enable_excluders.yml - name: Node Install Checkpoint End - hosts: oo_all_hosts + hosts: all gather_facts: false tasks: - name: Set Node install 'Complete' + run_once: true set_stats: data: - installer_phase_node: "Complete" - aggregate: false + installer_phase_node: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-node/image_prep.yml b/playbooks/common/openshift-node/image_prep.yml index 00d167c22..30651a1df 100644 --- a/playbooks/common/openshift-node/image_prep.yml +++ b/playbooks/common/openshift-node/image_prep.yml @@ -19,3 +19,6 @@ - name: Re-enable excluders include: enable_excluders.yml + +- name: Remove any undesired artifacts from build + include: clean_image.yml |