diff options
Diffstat (limited to 'playbooks')
161 files changed, 1154 insertions, 1869 deletions
diff --git a/playbooks/adhoc/openshift_hosted_logging_efk.yaml b/playbooks/adhoc/openshift_hosted_logging_efk.yaml index 44a2ef534..69b2541bb 100644 --- a/playbooks/adhoc/openshift_hosted_logging_efk.yaml +++ b/playbooks/adhoc/openshift_hosted_logging_efk.yaml @@ -8,7 +8,7 @@ hosts: masters:!masters[0] pre_tasks: - set_fact: - openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" + openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ openshift_master_default_subdomain }}" tasks: - include_role: name: openshift_logging diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 5ed55a817..9f044c089 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -62,7 +62,6 @@ - origin-master - origin-master-api - origin-master-controllers - - pcsd failed_when: false - hosts: etcd @@ -124,12 +123,16 @@ - origin-clients - origin-node - origin-sdn-ovs - - tuned-profiles-openshift-node + - tuned-profiles-atomic-openshift-node - tuned-profiles-origin-node + register: result + until: result | success - name: Remove flannel package package: name=flannel state=absent when: openshift_use_flannel | default(false) | bool + register: result + until: result | success when: not is_atomic | bool - shell: systemctl reset-failed @@ -380,8 +383,8 @@ - origin-excluder - origin-docker-excluder - origin-master - - pacemaker - - pcs + register: result + until: result | success - shell: systemctl reset-failed changed_when: False @@ -450,8 +453,6 @@ - /etc/sysconfig/origin-master-api - /etc/sysconfig/origin-master-controllers - /usr/share/openshift/examples - - /var/lib/pacemaker - - /var/lib/pcsd - /usr/lib/systemd/system/atomic-openshift-master-api.service - /usr/lib/systemd/system/atomic-openshift-master-controllers.service - /usr/lib/systemd/system/origin-master-api.service @@ -497,6 +498,8 @@ with_items: - etcd - etcd3 + register: result + until: result | success - shell: systemctl reset-failed changed_when: False @@ -554,6 +557,8 @@ when: not is_atomic | bool and openshift_remove_all | default(True) | bool with_items: - haproxy + register: result + until: result | success - shell: systemctl reset-failed changed_when: False diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md index 417fb539a..d203b9cda 100644 --- a/playbooks/aws/README.md +++ b/playbooks/aws/README.md @@ -75,7 +75,7 @@ If customization is required for the instances, scale groups, or any other confi In order to create the bootstrap-able AMI we need to create a basic openshift-ansible inventory. This enables us to create the AMI using the openshift-ansible node roles. This inventory should not include any hosts, but certain variables should be defined in the appropriate groups, just as deploying a cluster using the normal openshift-ansible method. See provisioning-inventory.example.ini for an example. -There are more examples of cluster inventory settings [`here`](../../inventory/byo/). +There are more examples of cluster inventory settings [`here`](../../inventory/). #### Step 0 (optional) @@ -134,11 +134,11 @@ At this point we have successfully created the infrastructure including the mast Now it is time to install Openshift using the openshift-ansible installer. This can be achieved by running the following playbook: ``` -$ ansible-playbook -i inventory.yml install.yml @provisioning_vars.yml +$ ansible-playbook -i inventory.yml install.yml -e @provisioning_vars.yml ``` This playbook accomplishes the following: 1. Builds a dynamic inventory file by querying AWS. -2. Runs the [`byo`](../../common/openshift-cluster/config.yml) +2. Runs the [`deploy_cluster.yml`](../deploy_cluster.yml) Once this playbook completes, the cluster masters should be installed and configured. diff --git a/playbooks/aws/openshift-cluster/accept.yml b/playbooks/aws/openshift-cluster/accept.yml index c2c8bea50..e7bed4f6e 100755 --- a/playbooks/aws/openshift-cluster/accept.yml +++ b/playbooks/aws/openshift-cluster/accept.yml @@ -14,11 +14,11 @@ msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}" - name: bring lib_openshift into scope - include_role: + import_role: name: lib_openshift - name: fetch masters - ec2_remote_facts: + ec2_instance_facts: region: "{{ openshift_aws_region | default('us-east-1') }}" filters: "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}" @@ -30,7 +30,7 @@ until: "'instances' in mastersout and mastersout.instances|length > 0" - name: fetch new node instances - ec2_remote_facts: + ec2_instance_facts: region: "{{ openshift_aws_region | default('us-east-1') }}" filters: "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}" diff --git a/playbooks/aws/openshift-cluster/build_ami.yml b/playbooks/aws/openshift-cluster/build_ami.yml index 5815c4975..5bf4f652a 100644 --- a/playbooks/aws/openshift-cluster/build_ami.yml +++ b/playbooks/aws/openshift-cluster/build_ami.yml @@ -17,7 +17,7 @@ - name: openshift_aws_region msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}" -- include: provision_instance.yml +- import_playbook: provision_instance.yml vars: openshift_aws_node_group_type: compute @@ -33,8 +33,8 @@ # This is the part that installs all of the software and configs for the instance # to become a node. -- include: ../../openshift-node/private/image_prep.yml +- import_playbook: ../../openshift-node/private/image_prep.yml -- include: seal_ami.yml +- import_playbook: seal_ami.yml vars: openshift_aws_ami_name: "openshift-gi-{{ lookup('pipe', 'date +%Y%m%d%H%M')}}" diff --git a/playbooks/aws/openshift-cluster/hosted.yml b/playbooks/aws/openshift-cluster/hosted.yml index da7ec9d21..9d9ed29de 100644 --- a/playbooks/aws/openshift-cluster/hosted.yml +++ b/playbooks/aws/openshift-cluster/hosted.yml @@ -1,19 +1,19 @@ --- -- include: ../../openshift-hosted/private/config.yml +- import_playbook: ../../openshift-hosted/private/config.yml -- include: ../../openshift-metrics/private/config.yml +- import_playbook: ../../openshift-metrics/private/config.yml when: openshift_metrics_install_metrics | default(false) | bool -- include: ../../common/openshift-cluster/openshift_logging.yml +- import_playbook: ../../openshift-logging/private/config.yml when: openshift_logging_install_logging | default(false) | bool -- include: ../../openshift-prometheus/private/config.yml +- import_playbook: ../../openshift-prometheus/private/config.yml when: openshift_hosted_prometheus_deploy | default(false) | bool -- include: ../../openshift-service-catalog/private/config.yml +- import_playbook: ../../openshift-service-catalog/private/config.yml when: openshift_enable_service_catalog | default(false) | bool -- include: ../../openshift-management/private/config.yml +- import_playbook: ../../openshift-management/private/config.yml when: openshift_management_install_management | default(false) | bool - name: Print deprecated variable warning message if necessary diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml index f8206529a..b03fb0b7f 100644 --- a/playbooks/aws/openshift-cluster/install.yml +++ b/playbooks/aws/openshift-cluster/install.yml @@ -16,31 +16,31 @@ tasks_from: master_facts.yml - name: run the init - include: ../../init/main.yml + import_playbook: ../../init/main.yml - name: perform the installer openshift-checks - include: ../../openshift-checks/private/install.yml + import_playbook: ../../openshift-checks/private/install.yml - name: etcd install - include: ../../openshift-etcd/private/config.yml + import_playbook: ../../openshift-etcd/private/config.yml - name: include nfs - include: ../../openshift-nfs/private/config.yml + import_playbook: ../../openshift-nfs/private/config.yml when: groups.oo_nfs_to_config | default([]) | count > 0 - name: include loadbalancer - include: ../../openshift-loadbalancer/private/config.yml + import_playbook: ../../openshift-loadbalancer/private/config.yml when: groups.oo_lb_to_config | default([]) | count > 0 - name: include openshift-master config - include: ../../openshift-master/private/config.yml + import_playbook: ../../openshift-master/private/config.yml - name: include master additional config - include: ../../openshift-master/private/additional_config.yml + import_playbook: ../../openshift-master/private/additional_config.yml - name: include master additional config - include: ../../openshift-node/private/config.yml + import_playbook: ../../openshift-node/private/config.yml - name: include openshift-glusterfs - include: ../../openshift-glusterfs/private/config.yml + import_playbook: ../../openshift-glusterfs/private/config.yml when: groups.oo_glusterfs_to_config | default([]) | count > 0 diff --git a/playbooks/aws/openshift-cluster/prerequisites.yml b/playbooks/aws/openshift-cluster/prerequisites.yml index f5eb01b14..0afcce331 100644 --- a/playbooks/aws/openshift-cluster/prerequisites.yml +++ b/playbooks/aws/openshift-cluster/prerequisites.yml @@ -1,6 +1,6 @@ --- -- include: provision_vpc.yml +- import_playbook: provision_vpc.yml -- include: provision_ssh_keypair.yml +- import_playbook: provision_ssh_keypair.yml -- include: provision_sec_group.yml +- import_playbook: provision_sec_group.yml diff --git a/playbooks/aws/openshift-cluster/provision_install.yml b/playbooks/aws/openshift-cluster/provision_install.yml index 78dd6a49b..f98f5be9a 100644 --- a/playbooks/aws/openshift-cluster/provision_install.yml +++ b/playbooks/aws/openshift-cluster/provision_install.yml @@ -4,16 +4,16 @@ # this playbook is run with the following parameters: # ansible-playbook -i openshift-ansible-inventory provision_install.yml - name: Include the provision.yml playbook to create cluster - include: provision.yml + import_playbook: provision.yml - name: Include the install.yml playbook to install cluster on masters - include: install.yml + import_playbook: install.yml - name: provision the infra/compute playbook to install node resources - include: provision_nodes.yml + import_playbook: provision_nodes.yml - name: Include the accept.yml playbook to accept nodes into the cluster - include: accept.yml + import_playbook: accept.yml - name: Include the hosted.yml playbook to finish the hosted configuration - include: hosted.yml + import_playbook: hosted.yml diff --git a/playbooks/aws/provisioning-inventory.example.ini b/playbooks/aws/provisioning-inventory.example.ini index 238a7eb2f..cf76c9d10 100644 --- a/playbooks/aws/provisioning-inventory.example.ini +++ b/playbooks/aws/provisioning-inventory.example.ini @@ -11,6 +11,7 @@ etcd openshift_deployment_type=origin openshift_master_bootstrap_enabled=True +openshift_master_api_port=443 openshift_hosted_router_wait=False openshift_hosted_registry_wait=False diff --git a/playbooks/byo/config.yml b/playbooks/byo/config.yml deleted file mode 100644 index 7d03914a2..000000000 --- a/playbooks/byo/config.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -- include: openshift-cluster/config.yml diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml deleted file mode 100644 index 57823847b..000000000 --- a/playbooks/byo/openshift-cluster/config.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- include: ../../init/main.yml - -- include: ../../common/openshift-cluster/config.yml diff --git a/playbooks/byo/openshift-cluster/openshift-logging.yml b/playbooks/byo/openshift-cluster/openshift-logging.yml deleted file mode 100644 index 74e186f33..000000000 --- a/playbooks/byo/openshift-cluster/openshift-logging.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -# -# This playbook is a preview of upcoming changes for installing -# Hosted logging on. See inventory/byo/hosts.*.example for the -# currently supported method. -# -- include: ../../init/main.yml - -- include: ../../common/openshift-cluster/openshift_logging.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-certificates.yml deleted file mode 100644 index c26f11772..000000000 --- a/playbooks/byo/openshift-cluster/redeploy-certificates.yml +++ /dev/null @@ -1,42 +0,0 @@ ---- -- include: ../../init/main.yml - -- include: ../../common/openshift-cluster/redeploy-certificates/check-expiry.yml - vars: - g_check_expiry_hosts: 'oo_etcd_to_config' - -- include: ../../common/openshift-cluster/redeploy-certificates/etcd-backup.yml - -- include: ../../openshift-etcd/private/certificates.yml - vars: - etcd_certificates_redeploy: true - -- include: ../../common/openshift-cluster/redeploy-certificates/masters-backup.yml - -- include: ../../openshift-master/private/certificates.yml - vars: - openshift_certificates_redeploy: true - -- include: ../../common/openshift-cluster/redeploy-certificates/nodes-backup.yml - -- include: ../../openshift-node/private/certificates.yml - vars: - openshift_certificates_redeploy: true - -- include: ../../openshift-etcd/private/restart.yml - vars: - g_etcd_certificates_expired: "{{ ('expired' in (hostvars | oo_select_keys(groups['etcd']) | oo_collect('check_results.check_results.etcd') | oo_collect('health'))) | bool }}" - -- include: ../../openshift-master/private/restart.yml - -- include: ../../openshift-node/private/restart.yml - -- include: ../../common/openshift-cluster/redeploy-certificates/router.yml - when: openshift_hosted_manage_router | default(true) | bool - -- include: ../../common/openshift-cluster/redeploy-certificates/registry.yml - when: openshift_hosted_manage_registry | default(true) | bool - -- include: ../../openshift-master/private/revert-client-ca.yml - -- include: ../../openshift-master/private/restart.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml b/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml deleted file mode 100644 index 77dd121b3..000000000 --- a/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- include: ../../init/main.yml - -- include: ../../common/openshift-cluster/redeploy-certificates/etcd-ca.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml deleted file mode 100644 index 94e50cc28..000000000 --- a/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- include: ../../init/main.yml - -- include: ../../common/openshift-cluster/redeploy-certificates/check-expiry.yml - vars: - g_check_expiry_hosts: 'oo_etcd_to_config' - -- include: ../../common/openshift-cluster/redeploy-certificates/etcd-backup.yml - -- include: ../../openshift-etcd/private/certificates.yml - vars: - etcd_certificates_redeploy: true - -- include: ../../openshift-etcd/private/restart.yml - vars: - g_etcd_certificates_expired: "{{ ('expired' in (hostvars | oo_select_keys(groups['etcd']) | oo_collect('check_results.check_results.etcd') | oo_collect('health'))) | bool }}" - -- include: ../../openshift-master/private/restart.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml deleted file mode 100644 index 88e52f809..000000000 --- a/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- include: ../../init/main.yml - -- include: ../../common/openshift-cluster/redeploy-certificates/masters-backup.yml - -- include: ../../openshift-master/private/certificates.yml - vars: - openshift_certificates_redeploy: true - -- include: ../../openshift-master/private/restart.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml deleted file mode 100644 index 2abbe78f1..000000000 --- a/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- include: ../../init/main.yml - -- include: ../../common/openshift-cluster/redeploy-certificates/nodes-backup.yml - -- include: ../../openshift-node/private/certificates.yml - vars: - openshift_certificates_redeploy: true - -- include: ../../openshift-node/private/restart.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml b/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml deleted file mode 100644 index 9cebeb1ee..000000000 --- a/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- include: ../../init/main.yml - -- include: ../../common/openshift-cluster/redeploy-certificates/openshift-ca.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml deleted file mode 100644 index 36b6250a7..000000000 --- a/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- include: ../../init/main.yml - -- include: ../../common/openshift-cluster/redeploy-certificates/registry.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml deleted file mode 100644 index 181e03381..000000000 --- a/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- include: ../../init/main.yml - -- include: ../../common/openshift-cluster/redeploy-certificates/router.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml index c46b22331..76308465c 100644 --- a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml @@ -1,5 +1,5 @@ --- # Playbook to upgrade Docker to the max allowable version for an OpenShift cluster. -- include: ../../../../init/evaluate_groups.yml +- import_playbook: ../../../../init/evaluate_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/docker/docker_upgrade.yml +- import_playbook: ../../../../common/openshift-cluster/upgrades/docker/docker_upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml deleted file mode 100644 index a9be8dec4..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- include: ../../../init/evaluate_groups.yml - -- include: ../../../common/openshift-cluster/upgrades/etcd/main.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml index c880fe7f7..0effc68bf 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -2,4 +2,4 @@ # # Full Control Plane + Nodes Upgrade # -- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade.yml +- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index aeec5f5cc..ebced5413 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -11,4 +11,4 @@ # # You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. # -- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml index 4664a9a2b..f2e97fc01 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml @@ -4,4 +4,4 @@ # # Upgrades nodes only, but requires the control plane to have already been upgraded. # -- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml +- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml index cbb89bc4d..f6fedfdff 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml @@ -2,4 +2,4 @@ # # Full Control Plane + Nodes Upgrade # -- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade.yml +- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml index 1adfbdec0..b8b5f5762 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -11,4 +11,4 @@ # # You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. # -- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml +- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml index b4da18281..c63f11b30 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml @@ -4,4 +4,4 @@ # # Upgrades nodes only, but requires the control plane to have already been upgraded. # -- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml +- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml new file mode 100644 index 000000000..23a3fcbb5 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml @@ -0,0 +1,7 @@ +--- +# +# Node Scale Group Upgrade Playbook +# +# Upgrades scale group nodes only. +# +- import_playbook: ../../../../common/openshift-cluster/upgrades/upgrade_scale_group.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_8/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_8/README.md index d9be6ae3b..815e49c28 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_8/README.md +++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/README.md @@ -1,4 +1,4 @@ -# v3.6 Major and Minor Upgrade Playbook +# v3.8 Major and Minor Upgrade Playbook ## Overview This playbook currently performs the following steps. diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml index f7e5dd1d2..c4094aa7e 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml @@ -2,4 +2,4 @@ # # Full Control Plane + Nodes Upgrade # -- include: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade.yml +- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml index cc04d81c1..5a3aa6288 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml @@ -11,4 +11,4 @@ # # You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. # -- include: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml +- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml index 37a9f69bb..74981cc31 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml @@ -4,4 +4,4 @@ # # Upgrades nodes only, but requires the control plane to have already been upgraded. # -- include: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml +- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_9/README.md new file mode 100644 index 000000000..0ab3d3a52 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/README.md @@ -0,0 +1,20 @@ +# v3.9 Major and Minor Upgrade Playbook + +## Overview +This playbook currently performs the following steps. + + * Upgrade and restart master services + * Unschedule node + * Upgrade and restart docker + * Upgrade and restart node services + * Modifies the subset of the configuration necessary + * Applies the latest cluster policies + * Updates the default router if one exists + * Updates the default registry if one exists + * Updates image streams and quickstarts + +## Usage + +``` +ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml +``` diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml new file mode 100644 index 000000000..a2a9d59f2 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml @@ -0,0 +1,5 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml new file mode 100644 index 000000000..869e185af --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml @@ -0,0 +1,14 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml new file mode 100644 index 000000000..a5867434b --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml @@ -0,0 +1,7 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-node/network_manager.yml b/playbooks/byo/openshift-node/network_manager.yml deleted file mode 100644 index ca09fb65c..000000000 --- a/playbooks/byo/openshift-node/network_manager.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# TODO (rteague): Temporarily leaving this playbook to allow CI tests to operate until CI jobs are updated. -- include: ../../openshift-node/network_manager.yml diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml index 29e0ebe8d..85a65b7e1 100644 --- a/playbooks/byo/openshift_facts.yml +++ b/playbooks/byo/openshift_facts.yml @@ -1,13 +1,12 @@ --- -- include: ../init/main.yml +- import_playbook: ../init/main.yml - name: Gather Cluster facts hosts: oo_all_hosts roles: - openshift_facts tasks: - - openshift_facts: - openshift_env: "{{ hostvars[inventory_hostname] | oo_openshift_env }}" + - openshift_facts: {} register: result - debug: var: result diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml index 261143080..5a877809a 100644 --- a/playbooks/byo/rhel_subscribe.yml +++ b/playbooks/byo/rhel_subscribe.yml @@ -1,5 +1,5 @@ --- -- include: ../init/evaluate_groups.yml +- import_playbook: ../init/evaluate_groups.yml - name: Subscribe hosts, update repos and update OS packages hosts: oo_all_hosts diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml deleted file mode 100644 index 2eeb81b86..000000000 --- a/playbooks/common/openshift-cluster/config.yml +++ /dev/null @@ -1,44 +0,0 @@ ---- -- include: ../../openshift-checks/private/install.yml - -- include: ../../openshift-etcd/private/config.yml - -- include: ../../openshift-nfs/private/config.yml - when: groups.oo_nfs_to_config | default([]) | count > 0 - -- include: ../../openshift-loadbalancer/private/config.yml - when: groups.oo_lb_to_config | default([]) | count > 0 - -- include: ../../openshift-master/private/config.yml - -- include: ../../openshift-master/private/additional_config.yml - -- include: ../../openshift-node/private/config.yml - -- include: ../../openshift-glusterfs/private/config.yml - when: groups.oo_glusterfs_to_config | default([]) | count > 0 - -- include: ../../openshift-hosted/private/config.yml - -- include: ../../openshift-metrics/private/config.yml - when: openshift_metrics_install_metrics | default(false) | bool - -- include: openshift_logging.yml - when: openshift_logging_install_logging | default(false) | bool - -- include: ../../openshift-prometheus/private/config.yml - when: openshift_hosted_prometheus_deploy | default(false) | bool - -- include: ../../openshift-service-catalog/private/config.yml - when: openshift_enable_service_catalog | default(true) | bool - -- include: ../../openshift-management/private/config.yml - when: openshift_management_install_management | default(false) | bool - -- name: Print deprecated variable warning message if necessary - hosts: oo_first_master - gather_facts: no - tasks: - - debug: msg="{{__deprecation_message}}" - when: - - __deprecation_message | default ('') | length > 0 diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/filter_plugins b/playbooks/common/openshift-cluster/redeploy-certificates/filter_plugins deleted file mode 120000 index b1213dedb..000000000 --- a/playbooks/common/openshift-cluster/redeploy-certificates/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/library b/playbooks/common/openshift-cluster/redeploy-certificates/library deleted file mode 120000 index 9a53f009d..000000000 --- a/playbooks/common/openshift-cluster/redeploy-certificates/library +++ /dev/null @@ -1 +0,0 @@ -../../../../library
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/lookup_plugins b/playbooks/common/openshift-cluster/redeploy-certificates/lookup_plugins deleted file mode 120000 index aff753026..000000000 --- a/playbooks/common/openshift-cluster/redeploy-certificates/lookup_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/roles b/playbooks/common/openshift-cluster/redeploy-certificates/roles deleted file mode 120000 index 4bdbcbad3..000000000 --- a/playbooks/common/openshift-cluster/redeploy-certificates/roles +++ /dev/null @@ -1 +0,0 @@ -../../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml b/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml index 6e953be69..ed97d539c 100644 --- a/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml +++ b/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml @@ -1,22 +1 @@ --- -- name: Check Docker image count - shell: "docker images -aq | wc -l" - register: docker_image_count - when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool - -- debug: var=docker_image_count.stdout - when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool - -- name: Remove unused Docker images for Docker 1.10+ migration - shell: "docker rmi `docker images -aq`" - # Will fail on images still in use: - failed_when: false - when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool - -- name: Check Docker image count - shell: "docker images -aq | wc -l" - register: docker_image_count - when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool - -- debug: var=docker_image_count.stdout - when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool diff --git a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_excluders.yml index 800621857..858912379 100644 --- a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml +++ b/playbooks/common/openshift-cluster/upgrades/disable_excluders.yml @@ -1,11 +1,10 @@ --- - name: Disable excluders - hosts: oo_masters_to_config + hosts: "{{ l_upgrade_excluder_hosts }}" gather_facts: no roles: - role: openshift_excluder r_openshift_excluder_action: disable - r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" r_openshift_excluder_verify_upgrade: true r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}" r_openshift_excluder_package_state: latest diff --git a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml deleted file mode 100644 index a66301c0d..000000000 --- a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -- name: Disable excluders - hosts: oo_nodes_to_upgrade:!oo_masters_to_config - gather_facts: no - roles: - - role: openshift_excluder - r_openshift_excluder_action: disable - r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" - r_openshift_excluder_verify_upgrade: true - r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}" - r_openshift_excluder_package_state: latest - r_openshift_excluder_docker_package_state: latest diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml index 6d4ddf011..38aa9df47 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -1,11 +1,11 @@ --- -- include: ../../../../init/evaluate_groups.yml +- import_playbook: ../../../../init/evaluate_groups.yml vars: # Do not allow adding hosts during upgrade. g_new_master_hosts: [] g_new_node_hosts: [] -- include: ../initialize_nodes_to_upgrade.yml +- import_playbook: ../initialize_nodes_to_upgrade.yml - name: Check for appropriate Docker versions hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config @@ -19,7 +19,9 @@ msg: Cannot upgrade Docker on Atomic operating systems. when: openshift.common.is_atomic | bool - - include: upgrade_check.yml + - include_role: + name: container_runtime + tasks_from: docker_upgrade_check.yml when: docker_upgrade is not defined or docker_upgrade | bool @@ -32,6 +34,7 @@ any_errors_fatal: true roles: + - openshift_facts - lib_openshift tasks: @@ -51,7 +54,7 @@ - name: Drain Node for Kubelet upgrade command: > - {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets + {{ openshift.common.client_binary }} adm drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets delegate_to: "{{ groups.oo_first_master.0 }}" when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade register: l_docker_upgrade_drain_result @@ -59,7 +62,7 @@ retries: 60 delay: 60 - - include: tasks/upgrade.yml + - include_tasks: tasks/upgrade.yml when: l_docker_upgrade is defined and l_docker_upgrade | bool - name: Set node schedulability diff --git a/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh b/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh deleted file mode 100644 index 8635eab0d..000000000 --- a/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -# Stop any running containers -running_container_ids=`docker ps -q` -if test -n "$running_container_ids" -then - docker stop $running_container_ids -fi - -# Delete all containers -container_ids=`docker ps -a -q` -if test -n "$container_ids" -then - docker rm -f -v $container_ids -fi - -# Delete all images (forcefully) -image_ids=`docker images -aq` -if test -n "$image_ids" -then - # Some layers are deleted recursively and are no longer present - # when docker goes to remove them: - docker rmi -f `docker images -aq` || true -fi - diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml index 83f16ac0d..dbc4f39c7 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml @@ -6,18 +6,14 @@ retries: 3 delay: 30 -- name: Update docker facts - openshift_facts: - role: docker - - name: Restart containerized services service: name={{ item }} state=started with_items: - etcd_container - openvswitch - - "{{ openshift.common.service_type }}-master-api" - - "{{ openshift.common.service_type }}-master-controllers" - - "{{ openshift.common.service_type }}-node" + - "{{ openshift_service_type }}-master-api" + - "{{ openshift_service_type }}-master-controllers" + - "{{ openshift_service_type }}-node" failed_when: false when: openshift.common.is_containerized | bool diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml index 808cc562c..4856a4b51 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml @@ -4,9 +4,9 @@ - name: Stop containerized services service: name={{ item }} state=stopped with_items: - - "{{ openshift.common.service_type }}-master-api" - - "{{ openshift.common.service_type }}-master-controllers" - - "{{ openshift.common.service_type }}-node" + - "{{ openshift_service_type }}-master-api" + - "{{ openshift_service_type }}-master-controllers" + - "{{ openshift_service_type }}-node" - etcd_container - openvswitch failed_when: false @@ -41,6 +41,8 @@ - name: Upgrade Docker package: name=docker{{ '-' + docker_version }} state=present + register: result + until: result | success -- include: restart.yml +- include_tasks: restart.yml when: not skip_docker_restart | default(False) | bool diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml index 2e3a7ae8b..ed97d539c 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml @@ -1,58 +1 @@ --- - -# This snippet determines if a Docker upgrade is required by checking the inventory -# variables, the available packages, and sets l_docker_upgrade to True if so. - -- set_fact: - docker_upgrade: True - when: docker_upgrade is not defined - -- name: Check if Docker is installed - command: rpm -q docker - args: - warn: no - register: pkg_check - failed_when: pkg_check.rc > 1 - changed_when: no - -- name: Get current version of Docker - command: "{{ repoquery_installed }} --qf '%{version}' docker" - register: curr_docker_version - retries: 4 - until: curr_docker_version | succeeded - changed_when: false - -- name: Get latest available version of Docker - command: > - {{ repoquery_cmd }} --qf '%{version}' "docker" - register: avail_docker_version - retries: 4 - until: avail_docker_version | succeeded - # Don't expect docker rpm to be available on hosts that don't already have it installed: - when: pkg_check.rc == 0 - failed_when: false - changed_when: false - -- fail: - msg: This playbook requires access to Docker 1.12 or later - # Disable the 1.12 requirement if the user set a specific Docker version - when: docker_version is not defined and (docker_upgrade is not defined or docker_upgrade | bool == True) and (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout | version_compare('1.12','<'))) - -# Default l_docker_upgrade to False, we'll set to True if an upgrade is required: -- set_fact: - l_docker_upgrade: False - -# Make sure a docker_version is set if none was requested: -- set_fact: - docker_version: "{{ avail_docker_version.stdout }}" - when: pkg_check.rc == 0 and docker_version is not defined - -- name: Flag for Docker upgrade if necessary - set_fact: - l_docker_upgrade: True - when: pkg_check.rc == 0 and curr_docker_version.stdout | version_compare(docker_version,'<') - -- name: Flag to delete all images prior to upgrade if crossing Docker 1.10 boundary - set_fact: - docker_upgrade_nuke_images: True - when: l_docker_upgrade | bool and docker_upgrade_nuke_images is not defined and curr_docker_version.stdout | version_compare('1.10','<') and docker_version | version_compare('1.10','>=') diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins b/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins deleted file mode 120000 index 27ddaa18b..000000000 --- a/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins deleted file mode 120000 index cf407f69b..000000000 --- a/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/roles b/playbooks/common/openshift-cluster/upgrades/etcd/roles deleted file mode 120000 index 6bc1a7aef..000000000 --- a/playbooks/common/openshift-cluster/upgrades/etcd/roles +++ /dev/null @@ -1 +0,0 @@ -../../../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml index 9981d905b..8ee83819e 100644 --- a/playbooks/common/openshift-cluster/upgrades/init.yml +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -1,14 +1,14 @@ --- -- include: ../../../init/evaluate_groups.yml +- import_playbook: ../../../init/evaluate_groups.yml vars: # Do not allow adding hosts during upgrade. g_new_master_hosts: [] g_new_node_hosts: [] -- include: ../../../init/facts.yml +- import_playbook: ../../../init/facts.yml - name: Ensure firewall is not switched during upgrade - hosts: oo_all_hosts + hosts: "{{ l_upgrade_no_switch_firewall_hosts | default('oo_all_hosts') }}" vars: openshift_master_installed_version: "{{ hostvars[groups.oo_first_master.0].openshift.common.version }}" tasks: diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index 3aa9e0460..344ddea3c 100644 --- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -14,8 +14,9 @@ pre_tasks: - name: Load lib_openshift modules - include_role: + import_role: name: lib_openshift + - name: Collect all routers oc_obj: state: list @@ -113,7 +114,6 @@ roles: - role: openshift_excluder r_openshift_excluder_action: enable - r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" post_tasks: # Check if any masters are using pluginOrderOverride and warn if so, only for 1.3/3.3 and beyond: - name: grep pluginOrderOverride diff --git a/playbooks/common/openshift-cluster/upgrades/pre/config.yml b/playbooks/common/openshift-cluster/upgrades/pre/config.yml new file mode 100644 index 000000000..d5b82d9a0 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/pre/config.yml @@ -0,0 +1,77 @@ +--- + +# Pre-upgrade +- import_playbook: ../initialize_nodes_to_upgrade.yml + +- import_playbook: verify_cluster.yml + +- name: Update repos on upgrade hosts + hosts: "{{ l_upgrade_repo_hosts }}" + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: "{{ l_upgrade_no_proxy_hosts }}" + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- name: OpenShift Health Checks + hosts: "{{ l_upgrade_health_check_hosts }}" + any_errors_fatal: true + roles: + - openshift_health_checker + vars: + - r_openshift_health_checker_playbook_context: upgrade + post_tasks: + - name: Run health checks (upgrade) + action: openshift_health_check + args: + checks: + - disk_availability + - memory_availability + - docker_image_availability + +- import_playbook: ../disable_excluders.yml + +- import_playbook: ../../../../init/version.yml + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + +# If we're only upgrading nodes, we need to ensure masters are already upgraded +- name: Verify masters are already upgraded + hosts: oo_masters_to_config + tasks: + - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." + when: + - l_upgrade_nodes_only | default(False) | bool + - openshift.common.version != openshift_version + +# If we're only upgrading nodes, skip this. +- import_playbook: ../../../../openshift-master/private/validate_restart.yml + when: not (l_upgrade_nodes_only | default(False)) | bool + +- name: Verify upgrade targets + hosts: "{{ l_upgrade_verify_targets_hosts }}" + roles: + - role: openshift_facts + tasks: + - include_tasks: verify_upgrade_targets.yml + +- name: Verify docker upgrade targets + hosts: "{{ l_upgrade_docker_target_hosts }}" + tasks: + - include_role: + name: container_runtime + tasks_from: docker_upgrade_check.yml diff --git a/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml deleted file mode 100644 index 8ecae4539..000000000 --- a/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- name: Flag pre-upgrade checks complete for hosts without errors - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - set_fact: - pre_upgrade_complete: True diff --git a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml deleted file mode 100644 index 6d8503879..000000000 --- a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -# Only check if docker upgrade is required if docker_upgrade is not -# already set to False. -- include: ../../docker/upgrade_check.yml - when: - - docker_upgrade is not defined or (docker_upgrade | bool) - - not (openshift.common.is_atomic | bool) - -# Additional checks for Atomic hosts: - -- name: Determine available Docker - shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker" - register: g_atomic_docker_version_result - when: openshift.common.is_atomic | bool - -- set_fact: - l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}" - when: openshift.common.is_atomic | bool - -- fail: - msg: This playbook requires access to Docker 1.12 or later - when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<') diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml new file mode 100644 index 000000000..2ab9f852c --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml @@ -0,0 +1,93 @@ +--- +# Verify a few items before we proceed with upgrade process. + +- name: Verify upgrade can proceed on first master + hosts: oo_first_master + gather_facts: no + tasks: + - fail: + msg: > + This upgrade is only supported for origin and openshift-enterprise + deployment types + when: deployment_type not in ['origin','openshift-enterprise'] + + # Error out in situations where the user has older versions specified in their + # inventory in any of the openshift_release, openshift_image_tag, and + # openshift_pkg_version variables. These must be removed or updated to proceed + # with upgrade. + # TODO: Should we block if you're *over* the next major release version as well? + - fail: + msg: > + openshift_pkg_version is {{ openshift_pkg_version }} which is not a + valid version for a {{ openshift_upgrade_target }} upgrade + when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(openshift_upgrade_target ,'<') + + - fail: + msg: > + openshift_image_tag is {{ openshift_image_tag }} which is not a + valid version for a {{ openshift_upgrade_target }} upgrade + when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(openshift_upgrade_target ,'<') + + - set_fact: + openshift_release: "{{ openshift_release[1:] }}" + when: openshift_release is defined and openshift_release[0] == 'v' + + - fail: + msg: > + openshift_release is {{ openshift_release }} which is not a + valid release for a {{ openshift_upgrade_target }} upgrade + when: openshift_release is defined and not openshift_release | version_compare(openshift_upgrade_target ,'=') + +- name: Verify master processes + hosts: oo_masters_to_config + roles: + - lib_utils + - openshift_facts + tasks: + - name: Read master storage backend setting + yedit: + state: list + src: /etc/origin/master/master-config.yaml + key: kubernetesMasterConfig.apiServerArguments.storage-backend + register: _storage_backend + + - fail: + msg: "Storage backend in /etc/origin/master/master-config.yaml must be set to 'etcd3' before the upgrade can continue" + when: + # assuming the master-config.yml is properly configured, i.e. the value is a list + - _storage_backend.result | default([], true) | length == 0 or _storage_backend.result[0] != "etcd3" + + - debug: + msg: "Storage backend is set to etcd3" + + - openshift_facts: + role: master + local_facts: + ha: "{{ groups.oo_masters_to_config | length > 1 }}" + + - when: openshift.common.is_containerized | bool + block: + - set_fact: + master_services: + - "{{ openshift_service_type }}-master" + + # In case of the non-ha to ha upgrade. + - name: Check if the {{ openshift_service_type }}-master-api.service exists + command: > + systemctl list-units {{ openshift_service_type }}-master-api.service --no-legend + register: master_api_service_status + + - set_fact: + master_services: + - "{{ openshift_service_type }}-master-api" + - "{{ openshift_service_type }}-master-controllers" + when: + - master_api_service_status.stdout_lines | length > 0 + - (openshift_service_type + '-master-api.service') in master_api_service_status.stdout_lines[0] + + - name: Ensure Master is running + service: + name: "{{ item }}" + state: started + enabled: yes + with_items: "{{ master_services }}" diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml deleted file mode 100644 index 6a5bc24f7..000000000 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -- name: Verify master processes - hosts: oo_masters_to_config - roles: - - openshift_facts - tasks: - - openshift_facts: - role: master - local_facts: - ha: "{{ groups.oo_masters_to_config | length > 1 }}" - - - when: openshift.common.is_containerized | bool - block: - - set_fact: - master_services: - - "{{ openshift.common.service_type }}-master" - - # In case of the non-ha to ha upgrade. - - name: Check if the {{ openshift.common.service_type }}-master-api.service exists - command: > - systemctl list-units {{ openshift.common.service_type }}-master-api.service --no-legend - register: master_api_service_status - - - set_fact: - master_services: - - "{{ openshift.common.service_type }}-master-api" - - "{{ openshift.common.service_type }}-master-controllers" - when: - - master_api_service_status.stdout_lines | length > 0 - - (openshift.common.service_type + '-master-api.service') in master_api_service_status.stdout_lines[0] - - - name: Ensure Master is running - service: - name: "{{ item }}" - state: started - enabled: yes - with_items: "{{ master_services }}" diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml deleted file mode 100644 index f75ae3b15..000000000 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -- name: Verify all masters has etcd3 storage backend set - hosts: oo_masters_to_config - gather_facts: no - roles: - - lib_utils - tasks: - - name: Read master storage backend setting - yedit: - state: list - src: /etc/origin/master/master-config.yaml - key: kubernetesMasterConfig.apiServerArguments.storage-backend - register: _storage_backend - - - fail: - msg: "Storage backend in /etc/origin/master/master-config.yaml must be set to 'etcd3' before the upgrade can continue" - when: - # assuming the master-config.yml is properly configured, i.e. the value is a list - - _storage_backend.result | default([], true) | length == 0 or _storage_backend.result[0] != "etcd3" - - - debug: - msg: "Storage backend is set to etcd3" diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml deleted file mode 100644 index 2a8de50a2..000000000 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -- name: OpenShift Health Checks - hosts: oo_all_hosts - any_errors_fatal: true - roles: - - openshift_health_checker - vars: - - r_openshift_health_checker_playbook_context: upgrade - post_tasks: - - name: Run health checks (upgrade) - action: openshift_health_check - args: - checks: - - disk_availability - - memory_availability - - docker_image_availability diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml deleted file mode 100644 index 3c0017891..000000000 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -- name: Verify upgrade can proceed on first master - hosts: oo_first_master - gather_facts: no - tasks: - - fail: - msg: > - This upgrade is only supported for origin and openshift-enterprise - deployment types - when: deployment_type not in ['origin','openshift-enterprise'] - - # Error out in situations where the user has older versions specified in their - # inventory in any of the openshift_release, openshift_image_tag, and - # openshift_pkg_version variables. These must be removed or updated to proceed - # with upgrade. - # TODO: Should we block if you're *over* the next major release version as well? - - fail: - msg: > - openshift_pkg_version is {{ openshift_pkg_version }} which is not a - valid version for a {{ openshift_upgrade_target }} upgrade - when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(openshift_upgrade_target ,'<') - - - fail: - msg: > - openshift_image_tag is {{ openshift_image_tag }} which is not a - valid version for a {{ openshift_upgrade_target }} upgrade - when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(openshift_upgrade_target ,'<') - - - set_fact: - openshift_release: "{{ openshift_release[1:] }}" - when: openshift_release is defined and openshift_release[0] == 'v' - - - fail: - msg: > - openshift_release is {{ openshift_release }} which is not a - valid release for a {{ openshift_upgrade_target }} upgrade - when: openshift_release is defined and not openshift_release | version_compare(openshift_upgrade_target ,'=') diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml index 446f315d6..96f970506 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -6,7 +6,7 @@ - name: Update oreg_auth docker login credentials if necessary include_role: - name: docker + name: container_runtime tasks_from: registry_auth.yml when: oreg_auth_user is defined @@ -21,7 +21,7 @@ block: - name: Check latest available OpenShift RPM version repoquery: - name: "{{ openshift.common.service_type }}" + name: "{{ openshift_service_type }}" ignore_excluders: true register: repoquery_out diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index d08c6e940..7b82fe05b 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -2,22 +2,8 @@ ############################################################################### # Upgrade Masters ############################################################################### - -# If facts cache were for some reason deleted, this fact may not be set, and if not set -# it will always default to true. This causes problems for the etcd data dir fact detection -# so we must first make sure this is set correctly before attempting the backup. -- name: Set master embedded_etcd fact - hosts: oo_masters_to_config - roles: - - openshift_facts - tasks: - - openshift_facts: - role: master - local_facts: - embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" - -- name: Upgrade and backup etcd - include: ./etcd/main.yml +- name: Backup and upgrade etcd + import_playbook: ../../../openshift-etcd/private/upgrade_main.yml # Create service signer cert when missing. Service signer certificate # is added to master config in the master_config_upgrade hook. @@ -30,7 +16,7 @@ register: service_signer_cert_stat changed_when: false -- include: create_service_signer_cert.yml +- import_playbook: create_service_signer_cert.yml # oc adm migrate storage should be run prior to etcd v3 upgrade # See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060 @@ -71,7 +57,7 @@ - debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}" when: openshift_master_upgrade_pre_hook is defined - - include: "{{ openshift_master_upgrade_pre_hook }}" + - include_tasks: "{{ openshift_master_upgrade_pre_hook }}" when: openshift_master_upgrade_pre_hook is defined - include_role: @@ -82,20 +68,20 @@ - debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}" when: openshift_master_upgrade_hook is defined - - include: "{{ openshift_master_upgrade_hook }}" + - include_tasks: "{{ openshift_master_upgrade_hook }}" when: openshift_master_upgrade_hook is defined - - include: ../../../openshift-master/private/tasks/restart_hosts.yml + - include_tasks: ../../../openshift-master/private/tasks/restart_hosts.yml when: openshift.common.rolling_restart_mode == 'system' - - include: ../../../openshift-master/private/tasks/restart_services.yml + - include_tasks: ../../../openshift-master/private/tasks/restart_services.yml when: openshift.common.rolling_restart_mode == 'services' # Run the post-upgrade hook if defined: - debug: msg="Running master post-upgrade hook {{ openshift_master_upgrade_post_hook }}" when: openshift_master_upgrade_post_hook is defined - - include: "{{ openshift_master_upgrade_post_hook }}" + - include_tasks: "{{ openshift_master_upgrade_post_hook }}" when: openshift_master_upgrade_post_hook is defined - name: Post master upgrade - Upgrade clusterpolicies storage @@ -143,10 +129,6 @@ roles: - { role: openshift_cli } vars: - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" - # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe - # restart. - skip_docker_role: True __master_shared_resource_viewer_file: "shared_resource_viewer_role.yaml" tasks: - name: Reconcile Cluster Roles @@ -279,7 +261,7 @@ roles: - openshift_facts tasks: - - include: docker/tasks/upgrade.yml + - include_tasks: docker/tasks/upgrade.yml when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool - name: Drain and upgrade master nodes @@ -291,7 +273,7 @@ pre_tasks: - name: Load lib_openshift modules - include_role: + import_role: name: lib_openshift # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node @@ -309,7 +291,7 @@ - name: Drain Node for Kubelet upgrade command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets + {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets delegate_to: "{{ groups.oo_first_master.0 }}" register: l_upgrade_control_plane_drain_result until: not l_upgrade_control_plane_drain_result | failed diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index 5dc8193a7..a3cb1d0f9 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -1,16 +1,25 @@ --- +- name: Prepull images and rpms before doing rolling restart + hosts: oo_nodes_to_upgrade:!oo_masters_to_config + roles: + - role: openshift_facts + tasks: + - include_role: + name: openshift_node + tasks_from: upgrade_pre.yml + vars: + openshift_node_upgrade_in_progress: True + - name: Drain and upgrade nodes hosts: oo_nodes_to_upgrade:!oo_masters_to_config # This var must be set with -e on invocation, as it is not a per-host inventory var # and is evaluated early. Values such as "20%" can also be used. serial: "{{ openshift_upgrade_nodes_serial | default(1) }}" max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}" - + roles: + - lib_openshift + - openshift_facts pre_tasks: - - name: Load lib_openshift modules - include_role: - name: lib_openshift - # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node # or docker actually needs an upgrade before proceeding. Perhaps best to save this until # we merge upgrade functionality into the base roles and a normal config.yml playbook run. @@ -26,26 +35,19 @@ - name: Drain Node for Kubelet upgrade command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets + {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets delegate_to: "{{ groups.oo_first_master.0 }}" register: l_upgrade_nodes_drain_result until: not l_upgrade_nodes_drain_result | failed retries: 60 delay: 60 - roles: - - openshift_facts post_tasks: - include_role: name: openshift_node tasks_from: upgrade.yml vars: openshift_node_upgrade_in_progress: True - - include_role: - name: openshift_excluder - vars: - r_openshift_excluder_action: enable - r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" - name: Set node schedulability oc_adm_manage_node: node: "{{ openshift.node.nodename | lower }}" @@ -56,3 +58,11 @@ register: node_schedulable until: node_schedulable|succeeded when: node_unschedulable|changed + +- name: Re-enable excluders + hosts: oo_nodes_to_upgrade:!oo_masters_to_config + tasks: + - include_role: + name: openshift_excluder + vars: + r_openshift_excluder_action: enable diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml new file mode 100644 index 000000000..4fc897a57 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml @@ -0,0 +1,66 @@ +--- +- name: create new scale group + hosts: localhost + tasks: + - name: build upgrade scale groups + include_role: + name: openshift_aws + tasks_from: upgrade_node_group.yml + + - fail: + msg: "Ensure that new scale groups were provisioned before proceeding to update." + when: + - "'oo_sg_new_nodes' not in groups or groups.oo_sg_new_nodes|length == 0" + - "'oo_sg_current_nodes' not in groups or groups.oo_sg_current_nodes|length == 0" + - groups.oo_sg_current_nodes == groups.oo_sg_new_nodes + +- name: initialize upgrade bits + import_playbook: init.yml + +- name: unschedule nodes + hosts: oo_sg_current_nodes + tasks: + - name: Load lib_openshift modules + import_role: + name: ../roles/lib_openshift + + - name: Mark node unschedulable + oc_adm_manage_node: + node: "{{ openshift.node.nodename | lower }}" + schedulable: False + delegate_to: "{{ groups.oo_first_master.0 }}" + retries: 10 + delay: 5 + register: node_unschedulable + until: node_unschedulable|succeeded + +- name: Drain nodes + hosts: oo_sg_current_nodes + # This var must be set with -e on invocation, as it is not a per-host inventory var + # and is evaluated early. Values such as "20%" can also be used. + serial: "{{ openshift_upgrade_nodes_serial | default(1) }}" + max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}" + tasks: + - name: Drain Node for Kubelet upgrade + command: > + {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} + --config={{ openshift.common.config_base }}/master/admin.kubeconfig + --force --delete-local-data --ignore-daemonsets + --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s + delegate_to: "{{ groups.oo_first_master.0 }}" + register: l_upgrade_nodes_drain_result + until: not l_upgrade_nodes_drain_result | failed + retries: "{{ 1 if openshift_upgrade_nodes_drain_timeout | default(0) == '0' else 0 | int }}" + delay: 5 + failed_when: + - l_upgrade_nodes_drain_result | failed + - openshift_upgrade_nodes_drain_timeout | default(0) == '0' + +# Alright, let's clean up! +- name: clean up the old scale group + hosts: localhost + tasks: + - name: clean up scale group + include_role: + name: openshift_aws + tasks_from: remove_scale_group.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml index 6cb6a665f..a5ad3801d 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -2,7 +2,7 @@ # # Full Control Plane + Nodes Upgrade # -- include: ../init.yml +- import_playbook: ../init.yml tags: - pre_upgrade @@ -15,110 +15,29 @@ openshift_upgrade_target: '3.6' openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" -# Pre-upgrade - -- include: ../initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos and initialize facts on all hosts - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../pre/verify_health_checks.yml - tags: - - pre_upgrade - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../disable_master_excluders.yml - tags: - - pre_upgrade - -- include: ../disable_node_excluders.yml - tags: - - pre_upgrade - -- include: ../../../../init/version.yml - tags: - - pre_upgrade +- import_playbook: ../pre/config.yml vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True + l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config" + l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" + l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" + l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config" + l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config" -- include: ../../../../openshift-master/private/validate_restart.yml - tags: - - pre_upgrade - -- name: Verify upgrade targets - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tasks: - - include: ../pre/verify_upgrade_targets.yml - tags: - - pre_upgrade +- import_playbook: validator.yml -- name: Verify docker upgrade targets +- name: Flag pre-upgrade checks complete for hosts without errors hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config tasks: - - include: ../pre/tasks/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: validator.yml - tags: - - pre_upgrade - -- include: ../pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + - set_fact: + pre_upgrade_complete: True -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - include: ../cleanup_unused_images.yml +# Pre-upgrade completed -- include: ../upgrade_control_plane.yml +- import_playbook: ../upgrade_control_plane.yml vars: master_config_hook: "v3_6/master_config_upgrade.yml" -- include: ../upgrade_nodes.yml +- import_playbook: ../upgrade_nodes.yml -- include: ../post_control_plane.yml +- import_playbook: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index 8f48bedcc..1498db4c5 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -11,116 +11,38 @@ # # You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. # -- include: ../init.yml - tags: - - pre_upgrade +- import_playbook: ../init.yml + vars: + l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" - name: Configure the upgrade target for the common upgrade tasks - hosts: oo_all_hosts - tags: - - pre_upgrade + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config tasks: - set_fact: openshift_upgrade_target: '3.6' openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" -# Pre-upgrade -- include: ../initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on control plane hosts - hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../pre/verify_health_checks.yml - tags: - - pre_upgrade - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../disable_master_excluders.yml - tags: - - pre_upgrade - -- include: ../../../../init/version.yml - tags: - - pre_upgrade +- import_playbook: ../pre/config.yml vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True + l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_upgrade_no_proxy_hosts: "oo_masters_to_config" + l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_upgrade_verify_targets_hosts: "oo_masters_to_config" + l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config" + l_upgrade_excluder_hosts: "oo_masters_to_config" -- include: ../../../../openshift-master/private/validate_restart.yml - tags: - - pre_upgrade +- import_playbook: validator.yml -- name: Verify upgrade targets - hosts: oo_masters_to_config - tasks: - - include: ../pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- name: Verify docker upgrade targets +- name: Flag pre-upgrade checks complete for hosts without errors hosts: oo_masters_to_config:oo_etcd_to_config tasks: - - include: ../pre/tasks/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: validator.yml - tags: - - pre_upgrade - -- include: ../pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + - set_fact: + pre_upgrade_complete: True -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_etcd_to_config - tasks: - - include: ../cleanup_unused_images.yml +# Pre-upgrade completed -- include: ../upgrade_control_plane.yml +- import_playbook: ../upgrade_control_plane.yml vars: master_config_hook: "v3_6/master_config_upgrade.yml" -- include: ../post_control_plane.yml +- import_playbook: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml index f25cfe0d0..6958652d8 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml @@ -4,7 +4,7 @@ # # Upgrades nodes only, but requires the control plane to have already been upgraded. # -- include: ../init.yml +- import_playbook: ../init.yml tags: - pre_upgrade @@ -17,99 +17,22 @@ openshift_upgrade_target: '3.6' openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" -# Pre-upgrade -- include: ../initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on nodes - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - roles: - - openshift_repos - tags: - - pre_upgrade - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../pre/verify_health_checks.yml - tags: - - pre_upgrade - -- include: ../disable_node_excluders.yml - tags: - - pre_upgrade - -- include: ../../../../init/version.yml - tags: - - pre_upgrade +- import_playbook: ../pre/config.yml vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- name: Verify masters are already upgraded - hosts: oo_masters_to_config - tags: - - pre_upgrade + l_upgrade_repo_hosts: "oo_nodes_to_config" + l_upgrade_no_proxy_hosts: "oo_all_hosts" + l_upgrade_health_check_hosts: "oo_nodes_to_config" + l_upgrade_verify_targets_hosts: "oo_nodes_to_config" + l_upgrade_docker_target_hosts: "oo_nodes_to_config" + l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config" + l_upgrade_nodes_only: True + +- name: Flag pre-upgrade checks complete for hosts without errors + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config tasks: - - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." - when: openshift.common.version != openshift_version - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- name: Verify upgrade targets - hosts: oo_nodes_to_upgrade - tasks: - - include: ../pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- name: Verify docker upgrade targets - hosts: oo_nodes_to_upgrade - tasks: - - include: ../pre/tasks/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + - set_fact: + pre_upgrade_complete: True -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_nodes_to_upgrade - tasks: - - include: ../cleanup_unused_images.yml +# Pre-upgrade completed -- include: ../upgrade_nodes.yml +- import_playbook: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml index 2b99568c7..4daa9e490 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml @@ -2,7 +2,7 @@ # # Full Control Plane + Nodes Upgrade # -- include: ../init.yml +- import_playbook: ../init.yml tags: - pre_upgrade @@ -15,111 +15,26 @@ openshift_upgrade_target: '3.7' openshift_upgrade_min: '3.6' -# Pre-upgrade - -- include: ../initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- include: ../pre/verify_etcd3_backend.yml - tags: - - pre_upgrade - -- name: Update repos and initialize facts on all hosts - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../pre/verify_health_checks.yml - tags: - - pre_upgrade - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../disable_master_excluders.yml - tags: - - pre_upgrade - -- include: ../disable_node_excluders.yml - tags: - - pre_upgrade - -- include: ../../../../init/version.yml - tags: - - pre_upgrade +- import_playbook: ../pre/config.yml vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True + l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config" + l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" + l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" + l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config" + l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config" -- include: ../../../../openshift-master/private/validate_restart.yml - tags: - - pre_upgrade - -- name: Verify upgrade targets - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tasks: - - include: ../pre/verify_upgrade_targets.yml - tags: - - pre_upgrade +- import_playbook: validator.yml -- name: Verify docker upgrade targets +- name: Flag pre-upgrade checks complete for hosts without errors hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config tasks: - - include: ../pre/tasks/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: validator.yml - tags: - - pre_upgrade - -- include: ../pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + - set_fact: + pre_upgrade_complete: True -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - include: ../cleanup_unused_images.yml +# Pre-upgrade completed -- include: ../upgrade_control_plane.yml +- import_playbook: ../upgrade_control_plane.yml vars: master_config_hook: "v3_7/master_config_upgrade.yml" @@ -127,16 +42,18 @@ - name: Cycle all controller services to force new leader election mode hosts: oo_masters_to_config gather_facts: no + roles: + - role: openshift_facts tasks: - - name: Stop {{ openshift.common.service_type }}-master-controllers + - name: Stop {{ openshift_service_type }}-master-controllers systemd: - name: "{{ openshift.common.service_type }}-master-controllers" + name: "{{ openshift_service_type }}-master-controllers" state: stopped - - name: Start {{ openshift.common.service_type }}-master-controllers + - name: Start {{ openshift_service_type }}-master-controllers systemd: - name: "{{ openshift.common.service_type }}-master-controllers" + name: "{{ openshift_service_type }}-master-controllers" state: started -- include: ../upgrade_nodes.yml +- import_playbook: ../upgrade_nodes.yml -- include: ../post_control_plane.yml +- import_playbook: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml index d3d2046e6..1750148d4 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -11,119 +11,37 @@ # # You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. # -- include: ../init.yml - tags: - - pre_upgrade +- import_playbook: ../init.yml + vars: + l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" - name: Configure the upgrade target for the common upgrade tasks - hosts: oo_all_hosts - tags: - - pre_upgrade + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config tasks: - set_fact: openshift_upgrade_target: '3.7' openshift_upgrade_min: '3.6' -# Pre-upgrade -- include: ../initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- include: ../pre/verify_etcd3_backend.yml - tags: - - pre_upgrade - -- name: Update repos on control plane hosts - hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../pre/verify_health_checks.yml - tags: - - pre_upgrade - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../disable_master_excluders.yml - tags: - - pre_upgrade - -- include: ../../../../init/version.yml - tags: - - pre_upgrade +- import_playbook: ../pre/config.yml vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../../openshift-master/private/validate_restart.yml - tags: - - pre_upgrade + l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_upgrade_no_proxy_hosts: "oo_masters_to_config" + l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_upgrade_verify_targets_hosts: "oo_masters_to_config" + l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config" + l_upgrade_excluder_hosts: "oo_masters_to_config" -- name: Verify upgrade targets - hosts: oo_masters_to_config - tasks: - - include: ../pre/verify_upgrade_targets.yml - tags: - - pre_upgrade +- import_playbook: validator.yml -- name: Verify docker upgrade targets +- name: Flag pre-upgrade checks complete for hosts without errors hosts: oo_masters_to_config:oo_etcd_to_config tasks: - - include: ../pre/tasks/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: validator.yml - tags: - - pre_upgrade - -- include: ../pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + - set_fact: + pre_upgrade_complete: True -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_etcd_to_config - tasks: - - include: ../cleanup_unused_images.yml +# Pre-upgrade completed -- include: ../upgrade_control_plane.yml +- import_playbook: ../upgrade_control_plane.yml vars: master_config_hook: "v3_7/master_config_upgrade.yml" @@ -131,14 +49,16 @@ - name: Cycle all controller services to force new leader election mode hosts: oo_masters_to_config gather_facts: no + roles: + - role: openshift_facts tasks: - - name: Stop {{ openshift.common.service_type }}-master-controllers + - name: Stop {{ openshift_service_type }}-master-controllers systemd: - name: "{{ openshift.common.service_type }}-master-controllers" + name: "{{ openshift_service_type }}-master-controllers" state: stopped - - name: Start {{ openshift.common.service_type }}-master-controllers + - name: Start {{ openshift_service_type }}-master-controllers systemd: - name: "{{ openshift.common.service_type }}-master-controllers" + name: "{{ openshift_service_type }}-master-controllers" state: started -- include: ../post_control_plane.yml +- import_playbook: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml index c0546bd2d..16d95514c 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml @@ -4,7 +4,7 @@ # # Upgrades nodes only, but requires the control plane to have already been upgraded. # -- include: ../init.yml +- import_playbook: ../init.yml tags: - pre_upgrade @@ -17,99 +17,22 @@ openshift_upgrade_target: '3.7' openshift_upgrade_min: '3.6' -# Pre-upgrade -- include: ../initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on nodes - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - roles: - - openshift_repos - tags: - - pre_upgrade - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../pre/verify_health_checks.yml - tags: - - pre_upgrade - -- include: ../disable_node_excluders.yml - tags: - - pre_upgrade - -- include: ../../../../init/version.yml - tags: - - pre_upgrade +- import_playbook: ../pre/config.yml vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- name: Verify masters are already upgraded - hosts: oo_masters_to_config - tags: - - pre_upgrade + l_upgrade_repo_hosts: "oo_nodes_to_config" + l_upgrade_no_proxy_hosts: "oo_all_hosts" + l_upgrade_health_check_hosts: "oo_nodes_to_config" + l_upgrade_verify_targets_hosts: "oo_nodes_to_config" + l_upgrade_docker_target_hosts: "oo_nodes_to_config" + l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config" + l_upgrade_nodes_only: True + +- name: Flag pre-upgrade checks complete for hosts without errors + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config tasks: - - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." - when: openshift.common.version != openshift_version - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- name: Verify upgrade targets - hosts: oo_nodes_to_upgrade - tasks: - - include: ../pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- name: Verify docker upgrade targets - hosts: oo_nodes_to_upgrade - tasks: - - include: ../pre/tasks/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + - set_fact: + pre_upgrade_complete: True -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_nodes_to_upgrade - tasks: - - include: ../cleanup_unused_images.yml +# Pre-upgrade completed -- include: ../upgrade_nodes.yml +- import_playbook: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml index b602cdd0e..0f74e0137 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml @@ -2,7 +2,7 @@ # # Full Control Plane + Nodes Upgrade # -- include: ../init.yml +- import_playbook: ../init.yml tags: - pre_upgrade @@ -15,111 +15,26 @@ openshift_upgrade_target: '3.8' openshift_upgrade_min: '3.7' -# Pre-upgrade - -- include: ../initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- include: ../pre/verify_etcd3_backend.yml - tags: - - pre_upgrade - -- name: Update repos and initialize facts on all hosts - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../pre/verify_health_checks.yml - tags: - - pre_upgrade - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../disable_master_excluders.yml - tags: - - pre_upgrade - -- include: ../disable_node_excluders.yml - tags: - - pre_upgrade - -- include: ../../../../init/version.yml - tags: - - pre_upgrade +- import_playbook: ../pre/config.yml vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True + l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config" + l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" + l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" + l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config" + l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config" -- include: ../../../../openshift-master/private/validate_restart.yml - tags: - - pre_upgrade - -- name: Verify upgrade targets - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tasks: - - include: ../pre/verify_upgrade_targets.yml - tags: - - pre_upgrade +- import_playbook: validator.yml -- name: Verify docker upgrade targets +- name: Flag pre-upgrade checks complete for hosts without errors hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config tasks: - - include: ../pre/tasks/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: validator.yml - tags: - - pre_upgrade - -- include: ../pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + - set_fact: + pre_upgrade_complete: True -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - include: ../cleanup_unused_images.yml +# Pre-upgrade completed -- include: ../upgrade_control_plane.yml +- import_playbook: ../upgrade_control_plane.yml vars: master_config_hook: "v3_7/master_config_upgrade.yml" @@ -127,16 +42,18 @@ - name: Cycle all controller services to force new leader election mode hosts: oo_masters_to_config gather_facts: no + roles: + - role: openshift_facts tasks: - - name: Stop {{ openshift.common.service_type }}-master-controllers + - name: Stop {{ openshift_service_type }}-master-controllers systemd: - name: "{{ openshift.common.service_type }}-master-controllers" + name: "{{ openshift_service_type }}-master-controllers" state: stopped - - name: Start {{ openshift.common.service_type }}-master-controllers + - name: Start {{ openshift_service_type }}-master-controllers systemd: - name: "{{ openshift.common.service_type }}-master-controllers" + name: "{{ openshift_service_type }}-master-controllers" state: started -- include: ../upgrade_nodes.yml +- import_playbook: ../upgrade_nodes.yml -- include: ../post_control_plane.yml +- import_playbook: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml index da81e6dea..08bfd239f 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml @@ -11,119 +11,37 @@ # # You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. # -- include: ../init.yml - tags: - - pre_upgrade +- import_playbook: ../init.yml + vars: + l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" - name: Configure the upgrade target for the common upgrade tasks - hosts: oo_all_hosts - tags: - - pre_upgrade + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config tasks: - set_fact: openshift_upgrade_target: '3.8' openshift_upgrade_min: '3.7' -# Pre-upgrade -- include: ../initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- include: ../pre/verify_etcd3_backend.yml - tags: - - pre_upgrade - -- name: Update repos on control plane hosts - hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../pre/verify_health_checks.yml - tags: - - pre_upgrade - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../disable_master_excluders.yml - tags: - - pre_upgrade - -- include: ../../../../init/version.yml - tags: - - pre_upgrade +- import_playbook: ../pre/config.yml vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../../openshift-master/private/validate_restart.yml - tags: - - pre_upgrade + l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_upgrade_no_proxy_hosts: "oo_masters_to_config" + l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_upgrade_verify_targets_hosts: "oo_masters_to_config" + l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config" + l_upgrade_excluder_hosts: "oo_masters_to_config" -- name: Verify upgrade targets - hosts: oo_masters_to_config - tasks: - - include: ../pre/verify_upgrade_targets.yml - tags: - - pre_upgrade +- import_playbook: validator.yml -- name: Verify docker upgrade targets +- name: Flag pre-upgrade checks complete for hosts without errors hosts: oo_masters_to_config:oo_etcd_to_config tasks: - - include: ../pre/tasks/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: validator.yml - tags: - - pre_upgrade - -- include: ../pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + - set_fact: + pre_upgrade_complete: True -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_etcd_to_config - tasks: - - include: ../cleanup_unused_images.yml +# Pre-upgrade completed -- include: ../upgrade_control_plane.yml +- import_playbook: ../upgrade_control_plane.yml vars: master_config_hook: "v3_7/master_config_upgrade.yml" @@ -131,14 +49,16 @@ - name: Cycle all controller services to force new leader election mode hosts: oo_masters_to_config gather_facts: no + roles: + - role: openshift_facts tasks: - - name: Stop {{ openshift.common.service_type }}-master-controllers + - name: Stop {{ openshift_service_type }}-master-controllers systemd: - name: "{{ openshift.common.service_type }}-master-controllers" + name: "{{ openshift_service_type }}-master-controllers" state: stopped - - name: Start {{ openshift.common.service_type }}-master-controllers + - name: Start {{ openshift_service_type }}-master-controllers systemd: - name: "{{ openshift.common.service_type }}-master-controllers" + name: "{{ openshift_service_type }}-master-controllers" state: started -- include: ../post_control_plane.yml +- import_playbook: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml index abd56e762..b5f1038fd 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml @@ -4,7 +4,7 @@ # # Upgrades nodes only, but requires the control plane to have already been upgraded. # -- include: ../init.yml +- import_playbook: ../init.yml tags: - pre_upgrade @@ -17,99 +17,22 @@ openshift_upgrade_target: '3.8' openshift_upgrade_min: '3.7' -# Pre-upgrade -- include: ../initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on nodes - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - roles: - - openshift_repos - tags: - - pre_upgrade - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../pre/verify_health_checks.yml - tags: - - pre_upgrade - -- include: ../disable_node_excluders.yml - tags: - - pre_upgrade - -- include: ../../../../init/version.yml - tags: - - pre_upgrade +- import_playbook: ../pre/config.yml vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- name: Verify masters are already upgraded - hosts: oo_masters_to_config - tags: - - pre_upgrade + l_upgrade_repo_hosts: "oo_nodes_to_config" + l_upgrade_no_proxy_hosts: "oo_all_hosts" + l_upgrade_health_check_hosts: "oo_nodes_to_config" + l_upgrade_verify_targets_hosts: "oo_nodes_to_config" + l_upgrade_docker_target_hosts: "oo_nodes_to_config" + l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config" + l_upgrade_nodes_only: True + +- name: Flag pre-upgrade checks complete for hosts without errors + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config tasks: - - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." - when: openshift.common.version != openshift_version - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- name: Verify upgrade targets - hosts: oo_nodes_to_upgrade - tasks: - - include: ../pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- name: Verify docker upgrade targets - hosts: oo_nodes_to_upgrade - tasks: - - include: ../pre/tasks/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + - set_fact: + pre_upgrade_complete: True -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_nodes_to_upgrade - tasks: - - include: ../cleanup_unused_images.yml +# Pre-upgrade completed -- include: ../upgrade_nodes.yml +- import_playbook: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins new file mode 120000 index 000000000..7de3c1dd7 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins @@ -0,0 +1 @@ +../../../../../filter_plugins/
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml new file mode 100644 index 000000000..1d4d1919c --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml @@ -0,0 +1,20 @@ +--- +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'controllerConfig.election.lockName' + yaml_value: 'openshift-master-controllers' + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'controllerConfig.serviceServingCert.signer.certFile' + yaml_value: service-signer.crt + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile' + yaml_value: service-signer.key + +- modify_yaml: + dest: "{{ openshift.common.config_base }}/master/master-config.yaml" + yaml_key: servingInfo.clientCA + yaml_value: ca.crt diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/roles b/playbooks/common/openshift-cluster/upgrades/v3_9/roles new file mode 120000 index 000000000..415645be6 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/roles @@ -0,0 +1 @@ +../../../../../roles/
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml new file mode 100644 index 000000000..0aea5069d --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml @@ -0,0 +1,55 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- import_playbook: ../init.yml + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tasks: + - set_fact: + openshift_upgrade_target: '3.9' + openshift_upgrade_min: '3.7' + +- import_playbook: ../pre/config.yml + vars: + l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config" + l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" + l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" + l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config" + l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config" + +- import_playbook: validator.yml + +- name: Flag pre-upgrade checks complete for hosts without errors + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - set_fact: + pre_upgrade_complete: True + +# Pre-upgrade completed + +- import_playbook: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_7/master_config_upgrade.yml" + +# All controllers must be stopped at the same time then restarted +- name: Cycle all controller services to force new leader election mode + hosts: oo_masters_to_config + gather_facts: no + roles: + - role: openshift_facts + tasks: + - name: Stop {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: stopped + - name: Start {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: started + +- import_playbook: ../upgrade_nodes.yml + +- import_playbook: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml new file mode 100644 index 000000000..05aa737c6 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml @@ -0,0 +1,65 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- import_playbook: ../init.yml + vars: + l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tasks: + - set_fact: + openshift_upgrade_target: '3.9' + openshift_upgrade_min: '3.7' + +- import_playbook: ../pre/config.yml + vars: + l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_upgrade_no_proxy_hosts: "oo_masters_to_config" + l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_upgrade_verify_targets_hosts: "oo_masters_to_config" + l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config" + l_upgrade_excluder_hosts: "oo_masters_to_config" + +- import_playbook: validator.yml + +- name: Flag pre-upgrade checks complete for hosts without errors + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - set_fact: + pre_upgrade_complete: True + +# Pre-upgrade completed + + +- import_playbook: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_7/master_config_upgrade.yml" + +# All controllers must be stopped at the same time then restarted +- name: Cycle all controller services to force new leader election mode + hosts: oo_masters_to_config + gather_facts: no + roles: + - role: openshift_facts + tasks: + - name: Stop {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: stopped + - name: Start {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: started + +- import_playbook: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml new file mode 100644 index 000000000..1d1b255c1 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml @@ -0,0 +1,34 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- import_playbook: ../init.yml + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tasks: + - set_fact: + openshift_upgrade_target: '3.9' + openshift_upgrade_min: '3.7' + +- import_playbook: ../pre/config.yml + vars: + l_upgrade_repo_hosts: "oo_nodes_to_config" + l_upgrade_no_proxy_hosts: "oo_all_hosts" + l_upgrade_health_check_hosts: "oo_nodes_to_config" + l_upgrade_verify_targets_hosts: "oo_nodes_to_config" + l_upgrade_docker_target_hosts: "oo_nodes_to_config" + l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config" + l_upgrade_nodes_only: True + +- name: Flag pre-upgrade checks complete for hosts without errors + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - set_fact: + pre_upgrade_complete: True + +# Pre-upgrade completed + +- import_playbook: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml new file mode 100644 index 000000000..4bd2d87b1 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml @@ -0,0 +1,7 @@ +--- +- name: Verify 3.9 specific upgrade checks + hosts: oo_first_master + roles: + - { role: lib_openshift } + tasks: + - debug: msg="noop" diff --git a/playbooks/container-runtime/config.yml b/playbooks/container-runtime/config.yml new file mode 100644 index 000000000..f15aa771f --- /dev/null +++ b/playbooks/container-runtime/config.yml @@ -0,0 +1,6 @@ +--- +- import_playbook: ../init/main.yml + vars: + skip_verison: True + +- import_playbook: private/config.yml diff --git a/playbooks/container-runtime/private/config.yml b/playbooks/container-runtime/private/config.yml new file mode 100644 index 000000000..67445edeb --- /dev/null +++ b/playbooks/container-runtime/private/config.yml @@ -0,0 +1,28 @@ +--- +- hosts: "{{ l_containerized_host_groups }}" + vars: + l_chg_temp: "{{ openshift_containerized_host_groups | default([]) }}" + l_containerized_host_groups: "{{ (['oo_nodes_to_config'] | union(l_chg_temp)) | join(':') }}" + # role: container_runtime is necessary here to bring role default variables + # into the play scope. + roles: + - role: container_runtime + tasks: + - include_role: + name: container_runtime + tasks_from: package_docker.yml + when: + - not openshift_docker_use_system_container | bool + - not openshift_use_crio_only | bool + - include_role: + name: container_runtime + tasks_from: systemcontainer_docker.yml + when: + - openshift_docker_use_system_container | bool + - not openshift_use_crio_only | bool + - include_role: + name: container_runtime + tasks_from: systemcontainer_crio.yml + when: + - openshift_use_crio | bool + - openshift_docker_is_node_or_master | bool diff --git a/playbooks/container-runtime/private/roles b/playbooks/container-runtime/private/roles new file mode 120000 index 000000000..148b13206 --- /dev/null +++ b/playbooks/container-runtime/private/roles @@ -0,0 +1 @@ +../../roles/
\ No newline at end of file diff --git a/playbooks/deploy_cluster.yml b/playbooks/deploy_cluster.yml new file mode 100644 index 000000000..0e6bde09a --- /dev/null +++ b/playbooks/deploy_cluster.yml @@ -0,0 +1,46 @@ +--- +- import_playbook: init/main.yml + +- import_playbook: openshift-checks/private/install.yml + +- import_playbook: openshift-etcd/private/config.yml + +- import_playbook: openshift-nfs/private/config.yml + when: groups.oo_nfs_to_config | default([]) | count > 0 + +- import_playbook: openshift-loadbalancer/private/config.yml + when: groups.oo_lb_to_config | default([]) | count > 0 + +- import_playbook: openshift-master/private/config.yml + +- import_playbook: openshift-master/private/additional_config.yml + +- import_playbook: openshift-node/private/config.yml + +- import_playbook: openshift-glusterfs/private/config.yml + when: groups.oo_glusterfs_to_config | default([]) | count > 0 + +- import_playbook: openshift-hosted/private/config.yml + +- import_playbook: openshift-metrics/private/config.yml + when: openshift_metrics_install_metrics | default(false) | bool + +- import_playbook: openshift-logging/private/config.yml + when: openshift_logging_install_logging | default(false) | bool + +- import_playbook: openshift-prometheus/private/config.yml + when: openshift_hosted_prometheus_deploy | default(false) | bool + +- import_playbook: openshift-service-catalog/private/config.yml + when: openshift_enable_service_catalog | default(true) | bool + +- import_playbook: openshift-management/private/config.yml + when: openshift_management_install_management | default(false) | bool + +- name: Print deprecated variable warning message if necessary + hosts: oo_first_master + gather_facts: no + tasks: + - debug: msg="{{__deprecation_message}}" + when: + - __deprecation_message | default ('') | length > 0 diff --git a/playbooks/gcp/openshift-cluster/provision.yml b/playbooks/gcp/provision.yml index 097717607..6016e6a78 100644 --- a/playbooks/gcp/openshift-cluster/provision.yml +++ b/playbooks/gcp/provision.yml @@ -9,8 +9,5 @@ include_role: name: openshift_gcp -- name: run the init - include: ../../init/main.yml - -- name: run the config - include: ../../common/openshift-cluster/config.yml +- name: run the cluster deploy + import_playbook: ../deploy_cluster.yml diff --git a/playbooks/init/evaluate_groups.yml b/playbooks/init/evaluate_groups.yml index 8787c87e1..8087f6ffc 100644 --- a/playbooks/init/evaluate_groups.yml +++ b/playbooks/init/evaluate_groups.yml @@ -46,14 +46,9 @@ - name: Evaluate groups - Fail if no etcd hosts group is defined fail: msg: > - Running etcd as an embedded service is no longer supported. If this is a - new install please define an 'etcd' group with either one or three - hosts. These hosts may be the same hosts as your masters. If this is an - upgrade you may set openshift_master_unsupported_embedded_etcd=true - until a migration playbook becomes available. + Running etcd as an embedded service is no longer supported. when: - g_etcd_hosts | default([]) | length not in [3,1] - - not openshift_master_unsupported_embedded_etcd | default(False) - not (openshift_node_bootstrap | default(False)) - name: Evaluate oo_all_hosts diff --git a/playbooks/init/facts.yml b/playbooks/init/facts.yml index 05142f9b6..d41f365dc 100644 --- a/playbooks/init/facts.yml +++ b/playbooks/init/facts.yml @@ -84,6 +84,8 @@ - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}" - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}" - yum-utils + register: result + until: result | success - name: Ensure various deps for running system containers are installed package: @@ -100,6 +102,8 @@ or (openshift_use_openvswitch_system_container | default(False)) | bool or (openshift_use_node_system_container | default(False)) | bool or (openshift_use_master_system_container | default(False)) | bool + register: result + until: result | success - name: Gather Cluster facts and set is_containerized if needed openshift_facts: @@ -131,11 +135,13 @@ - openshift_http_proxy is defined or openshift_https_proxy is defined - openshift_generate_no_proxy_hosts | default(True) | bool + - name: Initialize openshift.node.sdn_mtu + openshift_facts: + role: node + local_facts: + sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" + - name: initialize_facts set_fact repoquery command set_fact: repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}" repoquery_installed: "{{ 'dnf repoquery --latest-limit 1 -d 0 --disableexcludes=all --installed' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins --installed' }}" - - - name: initialize_facts set_fact on openshift_docker_hosted_registry_network - set_fact: - openshift_docker_hosted_registry_network: "{{ '' if 'oo_first_master' not in groups else hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" diff --git a/playbooks/init/main.yml b/playbooks/init/main.yml index 1d4f41ffc..b2b972a7d 100644 --- a/playbooks/init/main.yml +++ b/playbooks/init/main.yml @@ -18,12 +18,15 @@ - import_playbook: facts.yml - import_playbook: sanity_checks.yml + when: not (skip_sanity_checks | default(False)) - import_playbook: validate_hostnames.yml + when: not (skip_validate_hostnames | default(False)) - import_playbook: repos.yml - import_playbook: version.yml + when: not (skip_verison | default(False)) - name: Initialization Checkpoint End hosts: all diff --git a/playbooks/init/repos.yml b/playbooks/init/repos.yml index a7114fc80..4967b8f13 100644 --- a/playbooks/init/repos.yml +++ b/playbooks/init/repos.yml @@ -3,6 +3,14 @@ hosts: oo_all_hosts gather_facts: no tasks: + - name: subscribe instances to Red Hat Subscription Manager + include_role: + name: rhel_subscribe + when: + - ansible_distribution == 'RedHat' + - deployment_type == 'openshift-enterprise' + - (rhel_subscription_user or rhsub_user) is defined + - (rhel_subscription_password or rhsub_pass) is defined - name: initialize openshift repos include_role: name: openshift_repos diff --git a/playbooks/openshift-etcd/private/ca.yml b/playbooks/openshift-etcd/private/ca.yml index c9f186e72..f3bb3c2d1 100644 --- a/playbooks/openshift-etcd/private/ca.yml +++ b/playbooks/openshift-etcd/private/ca.yml @@ -7,7 +7,7 @@ tasks: - include_role: name: etcd - tasks_from: ca + tasks_from: ca.yml vars: etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml b/playbooks/openshift-etcd/private/certificates-backup.yml index d738c8207..ce21a1f96 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml +++ b/playbooks/openshift-etcd/private/certificates-backup.yml @@ -5,10 +5,10 @@ tasks: - include_role: name: etcd - tasks_from: backup_generated_certificates + tasks_from: backup_generated_certificates.yml - include_role: name: etcd - tasks_from: remove_generated_certificates + tasks_from: remove_generated_certificates.yml - name: Backup deployed etcd certificates hosts: oo_etcd_to_config @@ -16,4 +16,4 @@ tasks: - include_role: name: etcd - tasks_from: backup_server_certificates + tasks_from: backup_server_certificates.yml diff --git a/playbooks/openshift-etcd/private/config.yml b/playbooks/openshift-etcd/private/config.yml index 3d6c79834..35407969e 100644 --- a/playbooks/openshift-etcd/private/config.yml +++ b/playbooks/openshift-etcd/private/config.yml @@ -19,7 +19,6 @@ hosts: oo_etcd_to_config any_errors_fatal: true roles: - - role: os_firewall - role: openshift_clock - role: openshift_etcd etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" diff --git a/playbooks/openshift-etcd/private/embedded2external.yml b/playbooks/openshift-etcd/private/embedded2external.yml index 514319b88..be177b714 100644 --- a/playbooks/openshift-etcd/private/embedded2external.yml +++ b/playbooks/openshift-etcd/private/embedded2external.yml @@ -20,9 +20,9 @@ - name: Check the master API is ready include_role: name: openshift_master - tasks_from: check_master_api_is_ready + tasks_from: check_master_api_is_ready.yml - set_fact: - master_service: "{{ openshift.common.service_type + '-master' }}" + master_service: "{{ openshift_service_type + '-master' }}" embedded_etcd_backup_suffix: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" - debug: msg: "master service name: {{ master_service }}" @@ -34,7 +34,7 @@ # Can't use with_items with include_role: https://github.com/ansible/ansible/issues/21285 - include_role: name: etcd - tasks_from: backup + tasks_from: backup.yml vars: r_etcd_common_backup_tag: pre-migrate r_etcd_common_embedded_etcd: "{{ true }}" @@ -42,7 +42,7 @@ - include_role: name: etcd - tasks_from: backup.archive + tasks_from: backup.archive.yml vars: r_etcd_common_backup_tag: pre-migrate r_etcd_common_embedded_etcd: "{{ true }}" @@ -58,7 +58,7 @@ tasks: - include_role: name: etcd - tasks_from: backup_master_etcd_certificates + tasks_from: backup_master_etcd_certificates.yml - name: Redeploy master etcd certificates import_playbook: master_etcd_certificates.yml @@ -75,10 +75,10 @@ pre_tasks: - include_role: name: etcd - tasks_from: disable_etcd + tasks_from: disable_etcd.yml - include_role: name: etcd - tasks_from: clean_data + tasks_from: clean_data.yml # 6. copy the embedded etcd backup to the external host # TODO(jchaloup): if the etcd and first master are on the same host, just copy the directory @@ -93,7 +93,7 @@ - include_role: name: etcd - tasks_from: backup.fetch + tasks_from: backup.fetch.yml vars: etcd_backup_sync_directory: "{{ g_etcd_client_mktemp.stdout }}" r_etcd_common_backup_tag: pre-migrate @@ -103,7 +103,7 @@ - include_role: name: etcd - tasks_from: backup.copy + tasks_from: backup.copy.yml vars: etcd_backup_sync_directory: "{{ g_etcd_client_mktemp.stdout }}" r_etcd_common_backup_tag: pre-migrate @@ -124,14 +124,14 @@ tasks: - include_role: name: etcd - tasks_from: backup.unarchive + tasks_from: backup.unarchive.yml vars: r_etcd_common_backup_tag: pre-migrate r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}" - include_role: name: etcd - tasks_from: backup.force_new_cluster + tasks_from: backup.force_new_cluster.yml vars: r_etcd_common_backup_tag: pre-migrate r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}" @@ -145,7 +145,7 @@ tasks: - include_role: name: openshift_master - tasks_from: configure_external_etcd + tasks_from: configure_external_etcd.yml vars: etcd_peer_url_scheme: "https" etcd_ip: "{{ hostvars[groups.oo_etcd_to_config.0].openshift.common.ip }}" diff --git a/playbooks/openshift-etcd/private/migrate.yml b/playbooks/openshift-etcd/private/migrate.yml index 4269918c2..313ed8bec 100644 --- a/playbooks/openshift-etcd/private/migrate.yml +++ b/playbooks/openshift-etcd/private/migrate.yml @@ -17,9 +17,8 @@ tasks: - include_role: name: etcd - tasks_from: migrate.pre_check + tasks_from: migrate.pre_check.yml vars: - r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" etcd_peer: "{{ ansible_default_ipv4.address }}" # TODO: This will be different for release-3.6 branch @@ -28,8 +27,8 @@ tasks: - set_fact: master_services: - - "{{ openshift.common.service_type + '-master-controllers' }}" - - "{{ openshift.common.service_type + '-master-api' }}" + - "{{ openshift_service_type + '-master-controllers' }}" + - "{{ openshift_service_type + '-master-api' }}" - debug: msg: "master service name: {{ master_services }}" - name: Stop masters @@ -46,10 +45,9 @@ post_tasks: - include_role: name: etcd - tasks_from: backup + tasks_from: backup.yml vars: r_etcd_common_backup_tag: pre-migration - r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" - name: Gate on etcd backup @@ -74,7 +72,7 @@ pre_tasks: - include_role: name: etcd - tasks_from: disable_etcd + tasks_from: disable_etcd.yml - name: Migrate data on first etcd hosts: oo_etcd_to_migrate[0] @@ -82,9 +80,8 @@ tasks: - include_role: name: etcd - tasks_from: migrate + tasks_from: migrate.yml vars: - r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" etcd_peer: "{{ openshift.common.ip }}" etcd_url_scheme: "https" etcd_peer_url_scheme: "https" @@ -95,9 +92,8 @@ tasks: - include_role: name: etcd - tasks_from: clean_data + tasks_from: clean_data.yml vars: - r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" etcd_peer: "{{ openshift.common.ip }}" etcd_url_scheme: "https" etcd_peer_url_scheme: "https" @@ -132,7 +128,7 @@ tasks: - include_role: name: etcd - tasks_from: migrate.add_ttls + tasks_from: migrate.add_ttls.yml vars: etcd_peer: "{{ hostvars[groups.oo_etcd_to_migrate.0].openshift.common.ip }}" etcd_url_scheme: "https" @@ -144,7 +140,7 @@ tasks: - include_role: name: etcd - tasks_from: migrate.configure_master + tasks_from: migrate.configure_master.yml when: etcd_migration_failed | length == 0 - debug: msg: "Skipping master re-configuration since migration failed." diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml b/playbooks/openshift-etcd/private/redeploy-ca.yml index 438f704bc..158bcb849 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml +++ b/playbooks/openshift-etcd/private/redeploy-ca.yml @@ -16,12 +16,12 @@ tasks: - include_role: name: etcd - tasks_from: backup_ca_certificates + tasks_from: backup_ca_certificates.yml - include_role: name: etcd - tasks_from: remove_ca_certificates + tasks_from: remove_ca_certificates.yml -- include: ../../../openshift-etcd/private/ca.yml +- import_playbook: ca.yml - name: Create temp directory for syncing certs hosts: localhost @@ -44,7 +44,7 @@ etcd_sync_cert_dir: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}" etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" -- include: ../../../openshift-etcd/private/restart.yml +- import_playbook: restart.yml # Do not restart etcd when etcd certificates were previously expired. when: ('expired' not in (hostvars | oo_select_keys(groups['etcd']) @@ -56,7 +56,7 @@ tasks: - include_role: name: etcd - tasks_from: retrieve_ca_certificates + tasks_from: retrieve_ca_certificates.yml vars: etcd_sync_cert_dir: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}" @@ -82,7 +82,7 @@ state: absent changed_when: false -- include: ../../../openshift-master/private/restart.yml +- import_playbook: ../../openshift-master/private/restart.yml # Do not restart masters when master or etcd certificates were previously expired. when: # masters diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml b/playbooks/openshift-etcd/private/redeploy-certificates.yml index 4a9fbf7eb..1c8eb27ac 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml +++ b/playbooks/openshift-etcd/private/redeploy-certificates.yml @@ -1,6 +1,6 @@ --- - name: Check cert expirys - hosts: "{{ g_check_expiry_hosts }}" + hosts: oo_etcd_to_config vars: openshift_certificate_expiry_show_all: yes roles: @@ -10,3 +10,9 @@ # this playbook. Service restarts will be skipped if any # certificates were previously expired. - role: openshift_certificate_expiry + +- import_playbook: certificates-backup.yml + +- import_playbook: certificates.yml + vars: + etcd_certificates_redeploy: true diff --git a/playbooks/openshift-etcd/private/scaleup.yml b/playbooks/openshift-etcd/private/scaleup.yml index fac8e3f02..3ef043ec8 100644 --- a/playbooks/openshift-etcd/private/scaleup.yml +++ b/playbooks/openshift-etcd/private/scaleup.yml @@ -32,7 +32,7 @@ until: etcd_add_check.rc == 0 - include_role: name: etcd - tasks_from: server_certificates + tasks_from: server_certificates.yml vars: etcd_peers: "{{ groups.oo_new_etcd_to_config | default([], true) }}" etcd_certificates_etcd_hosts: "{{ groups.oo_new_etcd_to_config | default([], true) }}" @@ -78,4 +78,4 @@ post_tasks: - include_role: name: openshift_master - tasks_from: update_etcd_client_urls + tasks_from: update_etcd_client_urls.yml diff --git a/playbooks/openshift-etcd/private/server_certificates.yml b/playbooks/openshift-etcd/private/server_certificates.yml index 14c74baf3..695b53990 100644 --- a/playbooks/openshift-etcd/private/server_certificates.yml +++ b/playbooks/openshift-etcd/private/server_certificates.yml @@ -7,7 +7,7 @@ post_tasks: - include_role: name: etcd - tasks_from: server_certificates + tasks_from: server_certificates.yml vars: etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/openshift-etcd/private/upgrade_backup.yml index 531175c85..7dfea07f1 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml +++ b/playbooks/openshift-etcd/private/upgrade_backup.yml @@ -6,10 +6,9 @@ post_tasks: - include_role: name: etcd - tasks_from: backup + tasks_from: backup.yml vars: r_etcd_common_backup_tag: "{{ etcd_backup_tag }}" - r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" - name: Gate on etcd backup diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml b/playbooks/openshift-etcd/private/upgrade_image_members.yml index 6fca42bd0..c133c0201 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml +++ b/playbooks/openshift-etcd/private/upgrade_image_members.yml @@ -8,7 +8,7 @@ tasks: - include_role: name: etcd - tasks_from: upgrade_image + tasks_from: upgrade_image.yml vars: r_etcd_upgrade_version: "{{ etcd_upgrade_version }}" etcd_peer: "{{ openshift.common.hostname }}" diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/openshift-etcd/private/upgrade_main.yml index 5b8ba3bb2..e373a4a4c 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml +++ b/playbooks/openshift-etcd/private/upgrade_main.yml @@ -6,7 +6,7 @@ # available in the repos. So for Fedora we'll simply skip this, sorry. - name: Backup etcd before upgrading anything - include: backup.yml + import_playbook: upgrade_backup.yml vars: etcd_backup_tag: "pre-upgrade-" when: openshift_etcd_backup | default(true) | bool @@ -16,14 +16,14 @@ tasks: - include_role: name: etcd - tasks_from: drop_etcdctl + tasks_from: drop_etcdctl.yml - name: Perform etcd upgrade - include: ./upgrade.yml + import_playbook: upgrade_step.yml when: openshift_etcd_upgrade | default(true) | bool - name: Backup etcd - include: backup.yml + import_playbook: upgrade_backup.yml vars: etcd_backup_tag: "post-3.0-" when: openshift_etcd_backup | default(true) | bool diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml b/playbooks/openshift-etcd/private/upgrade_rpm_members.yml index 51e8786b3..902c39d9c 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml +++ b/playbooks/openshift-etcd/private/upgrade_rpm_members.yml @@ -8,7 +8,7 @@ tasks: - include_role: name: etcd - tasks_from: upgrade_rpm + tasks_from: upgrade_rpm.yml vars: r_etcd_upgrade_version: "{{ etcd_upgrade_version }}" etcd_peer: "{{ openshift.common.hostname }}" diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/openshift-etcd/private/upgrade_step.yml index c5ff4133c..60127fc68 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml +++ b/playbooks/openshift-etcd/private/upgrade_step.yml @@ -6,47 +6,47 @@ name: etcd tasks_from: version_detect.yml -- include: upgrade_rpm_members.yml +- import_playbook: upgrade_rpm_members.yml vars: etcd_upgrade_version: '2.1' -- include: upgrade_rpm_members.yml +- import_playbook: upgrade_rpm_members.yml vars: etcd_upgrade_version: '2.2' -- include: upgrade_image_members.yml +- import_playbook: upgrade_image_members.yml vars: etcd_upgrade_version: '2.2.5' -- include: upgrade_rpm_members.yml +- import_playbook: upgrade_rpm_members.yml vars: etcd_upgrade_version: '2.3' -- include: upgrade_image_members.yml +- import_playbook: upgrade_image_members.yml vars: etcd_upgrade_version: '2.3.7' -- include: upgrade_rpm_members.yml +- import_playbook: upgrade_rpm_members.yml vars: etcd_upgrade_version: '3.0' -- include: upgrade_image_members.yml +- import_playbook: upgrade_image_members.yml vars: etcd_upgrade_version: '3.0.15' -- include: upgrade_rpm_members.yml +- import_playbook: upgrade_rpm_members.yml vars: etcd_upgrade_version: '3.1' -- include: upgrade_image_members.yml +- import_playbook: upgrade_image_members.yml vars: etcd_upgrade_version: '3.1.3' -- include: upgrade_rpm_members.yml +- import_playbook: upgrade_rpm_members.yml vars: etcd_upgrade_version: '3.2' -- include: upgrade_image_members.yml +- import_playbook: upgrade_image_members.yml vars: etcd_upgrade_version: '3.2.7' @@ -56,7 +56,7 @@ tasks: - include_role: name: etcd - tasks_from: upgrade_image + tasks_from: upgrade_image.yml vars: etcd_peer: "{{ openshift.common.hostname }}" when: diff --git a/playbooks/openshift-etcd/redeploy-ca.yml b/playbooks/openshift-etcd/redeploy-ca.yml new file mode 100644 index 000000000..769d694ba --- /dev/null +++ b/playbooks/openshift-etcd/redeploy-ca.yml @@ -0,0 +1,4 @@ +--- +- import_playbook: ../init/main.yml + +- import_playbook: private/redeploy-ca.yml diff --git a/playbooks/openshift-etcd/redeploy-certificates.yml b/playbooks/openshift-etcd/redeploy-certificates.yml new file mode 100644 index 000000000..753878d70 --- /dev/null +++ b/playbooks/openshift-etcd/redeploy-certificates.yml @@ -0,0 +1,10 @@ +--- +- import_playbook: ../init/main.yml + +- import_playbook: private/redeploy-certificates.yml + +- import_playbook: private/restart.yml + vars: + g_etcd_certificates_expired: "{{ ('expired' in (hostvars | oo_select_keys(groups['etcd']) | oo_collect('check_results.check_results.etcd') | oo_collect('health'))) | bool }}" + +- import_playbook: ../openshift-master/private/restart.yml diff --git a/playbooks/openshift-etcd/upgrade.yml b/playbooks/openshift-etcd/upgrade.yml new file mode 100644 index 000000000..ccc797527 --- /dev/null +++ b/playbooks/openshift-etcd/upgrade.yml @@ -0,0 +1,4 @@ +--- +- import_playbook: ../init/evaluate_groups.yml + +- import_playbook: private/upgrade_main.yml diff --git a/playbooks/openshift-glusterfs/README.md b/playbooks/openshift-glusterfs/README.md index f62aea229..107bbfff6 100644 --- a/playbooks/openshift-glusterfs/README.md +++ b/playbooks/openshift-glusterfs/README.md @@ -26,6 +26,9 @@ file. The hosts in this group are the nodes of the GlusterFS cluster. devices but you must specify the following variables in `[OSEv3:vars]`: * `openshift_storage_glusterfs_is_missing=False` * `openshift_storage_glusterfs_heketi_is_missing=False` + * If GlusterFS will be running natively, the target hosts must also be listed + in the `nodes` group. They must also already be configured as OpenShift + nodes before this playbook runs. By default, pods for a native GlusterFS cluster will be created in the `default` namespace. To change this, specify diff --git a/playbooks/openshift-glusterfs/private/registry.yml b/playbooks/openshift-glusterfs/private/registry.yml index 75c1f0300..917b729f9 100644 --- a/playbooks/openshift-glusterfs/private/registry.yml +++ b/playbooks/openshift-glusterfs/private/registry.yml @@ -1,40 +1,11 @@ --- - import_playbook: config.yml -- name: Initialize GlusterFS registry PV and PVC vars - hosts: oo_first_master - tags: hosted - tasks: - - set_fact: - glusterfs_pv: [] - glusterfs_pvc: [] - - - set_fact: - glusterfs_pv: - - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-volume" - capacity: "{{ openshift.hosted.registry.storage.volume.size }}" - access_modes: "{{ openshift.hosted.registry.storage.access.modes }}" - storage: - glusterfs: - endpoints: "{{ openshift.hosted.registry.storage.glusterfs.endpoints }}" - path: "{{ openshift.hosted.registry.storage.glusterfs.path }}" - readOnly: "{{ openshift.hosted.registry.storage.glusterfs.readOnly }}" - glusterfs_pvc: - - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim" - capacity: "{{ openshift.hosted.registry.storage.volume.size }}" - access_modes: "{{ openshift.hosted.registry.storage.access.modes }}" - when: openshift.hosted.registry.storage.glusterfs.swap - - name: Create persistent volumes hosts: oo_first_master - tags: - - hosted - vars: - persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups, glusterfs_pv) }}" - persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims(glusterfs_pvc) }}" roles: - role: openshift_persistent_volumes - when: persistent_volumes | union(glusterfs_pv) | length > 0 or persistent_volume_claims | union(glusterfs_pvc) | length > 0 + when: openshift_hosted_registry_storage_glusterfs_swap | default(False) - name: Create Hosted Resources hosts: oo_first_master diff --git a/playbooks/openshift-hosted/private/cockpit-ui.yml b/playbooks/openshift-hosted/private/cockpit-ui.yml index 359132dd0..d6529425b 100644 --- a/playbooks/openshift-hosted/private/cockpit-ui.yml +++ b/playbooks/openshift-hosted/private/cockpit-ui.yml @@ -5,4 +5,4 @@ - role: cockpit-ui when: - openshift_hosted_manage_registry | default(true) | bool - - not openshift.docker.hosted_registry_insecure | default(false) | bool + - not (openshift_docker_hosted_registry_insecure | default(false)) | bool diff --git a/playbooks/openshift-hosted/private/create_persistent_volumes.yml b/playbooks/openshift-hosted/private/create_persistent_volumes.yml index 8a60a30b8..41ae2eb69 100644 --- a/playbooks/openshift-hosted/private/create_persistent_volumes.yml +++ b/playbooks/openshift-hosted/private/create_persistent_volumes.yml @@ -1,9 +1,5 @@ --- - name: Create Hosted Resources - persistent volumes hosts: oo_first_master - vars: - persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}" - persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}" roles: - role: openshift_persistent_volumes - when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0 diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml b/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml index 7e9363c5f..7e9363c5f 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml +++ b/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml b/playbooks/openshift-hosted/private/redeploy-router-certificates.yml index 2116c745c..2116c745c 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml +++ b/playbooks/openshift-hosted/private/redeploy-router-certificates.yml diff --git a/playbooks/openshift-hosted/redeploy-registry-certificates.yml b/playbooks/openshift-hosted/redeploy-registry-certificates.yml new file mode 100644 index 000000000..518a1d624 --- /dev/null +++ b/playbooks/openshift-hosted/redeploy-registry-certificates.yml @@ -0,0 +1,4 @@ +--- +- import_playbook: ../init/main.yml + +- import_playbook: private/redeploy-registry-certificates.yml diff --git a/playbooks/openshift-hosted/redeploy-router-certificates.yml b/playbooks/openshift-hosted/redeploy-router-certificates.yml new file mode 100644 index 000000000..a74dd8c79 --- /dev/null +++ b/playbooks/openshift-hosted/redeploy-router-certificates.yml @@ -0,0 +1,4 @@ +--- +- import_playbook: ../init/main.yml + +- import_playbook: private/redeploy-router-certificates.yml diff --git a/playbooks/openshift-loadbalancer/private/config.yml b/playbooks/openshift-loadbalancer/private/config.yml index d737b836b..2636d857e 100644 --- a/playbooks/openshift-loadbalancer/private/config.yml +++ b/playbooks/openshift-loadbalancer/private/config.yml @@ -11,15 +11,6 @@ status: "In Progress" start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" -- name: Configure firewall and docker for load balancers - hosts: oo_lb_to_config:!oo_masters_to_config:!oo_nodes_to_config - vars: - openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}" - roles: - - role: os_firewall - - role: openshift_docker - when: openshift.common.is_containerized | default(False) | bool and not skip_docker_role | default(False) | bool - - name: Configure load balancers hosts: oo_lb_to_config vars: diff --git a/playbooks/openshift-logging/config.yml b/playbooks/openshift-logging/config.yml new file mode 100644 index 000000000..83d330284 --- /dev/null +++ b/playbooks/openshift-logging/config.yml @@ -0,0 +1,9 @@ +--- +# +# This playbook is a preview of upcoming changes for installing +# Hosted logging on. See inventory/hosts.example for the +# currently supported method. +# +- import_playbook: ../init/main.yml + +- import_playbook: private/config.yml diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/openshift-logging/private/config.yml index bc59bd95a..bc59bd95a 100644 --- a/playbooks/common/openshift-cluster/openshift_logging.yml +++ b/playbooks/openshift-logging/private/config.yml diff --git a/playbooks/openshift-logging/private/filter_plugins b/playbooks/openshift-logging/private/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/openshift-logging/private/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/openshift-logging/private/library b/playbooks/openshift-logging/private/library new file mode 120000 index 000000000..ba40d2f56 --- /dev/null +++ b/playbooks/openshift-logging/private/library @@ -0,0 +1 @@ +../../../library
\ No newline at end of file diff --git a/playbooks/openshift-logging/private/lookup_plugins b/playbooks/openshift-logging/private/lookup_plugins new file mode 120000 index 000000000..ac79701db --- /dev/null +++ b/playbooks/openshift-logging/private/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/openshift-logging/private/roles b/playbooks/openshift-logging/private/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/openshift-logging/private/roles @@ -0,0 +1 @@ +../../../roles
\ No newline at end of file diff --git a/playbooks/openshift-master/private/additional_config.yml b/playbooks/openshift-master/private/additional_config.yml index b7cfbe4e4..a90cd6b22 100644 --- a/playbooks/openshift-master/private/additional_config.yml +++ b/playbooks/openshift-master/private/additional_config.yml @@ -19,8 +19,6 @@ openshift_master_ha: "{{ groups.oo_masters | length > 1 }}" omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}" roles: - - role: openshift_master_cluster - when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker" - role: openshift_project_request_template when: openshift_project_request_template_manage - role: openshift_examples diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml b/playbooks/openshift-master/private/certificates-backup.yml index 4dbc041b0..4dbc041b0 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml +++ b/playbooks/openshift-master/private/certificates-backup.yml diff --git a/playbooks/openshift-master/private/config.yml b/playbooks/openshift-master/private/config.yml index afb8d6bd1..15d301ddb 100644 --- a/playbooks/openshift-master/private/config.yml +++ b/playbooks/openshift-master/private/config.yml @@ -19,7 +19,6 @@ roles: - role: openshift_excluder r_openshift_excluder_action: disable - r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" - name: Gather and set facts for master hosts hosts: oo_masters_to_config @@ -180,9 +179,7 @@ | oo_collect('openshift.common.ip') | default([]) | join(',') }}" roles: - - role: os_firewall - role: openshift_master_facts - - role: openshift_hosted_facts - role: openshift_clock - role: openshift_cloud_provider - role: openshift_builddefaults @@ -228,6 +225,8 @@ - name: Configure API Aggregation on masters hosts: oo_masters serial: 1 + roles: + - role: openshift_facts tasks: - include_tasks: tasks/wire_aggregator.yml @@ -237,7 +236,6 @@ roles: - role: openshift_excluder r_openshift_excluder_action: enable - r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" - name: Master Install Checkpoint End hosts: all diff --git a/playbooks/openshift-master/private/redeploy-certificates.yml b/playbooks/openshift-master/private/redeploy-certificates.yml new file mode 100644 index 000000000..c0f75ae80 --- /dev/null +++ b/playbooks/openshift-master/private/redeploy-certificates.yml @@ -0,0 +1,6 @@ +--- +- import_playbook: certificates-backup.yml + +- import_playbook: certificates.yml + vars: + openshift_certificates_redeploy: true diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml b/playbooks/openshift-master/private/redeploy-openshift-ca.yml index 5a837d80d..9f5502141 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml +++ b/playbooks/openshift-master/private/redeploy-openshift-ca.yml @@ -56,7 +56,7 @@ - groups.oo_etcd_to_config | default([]) | length == 0 - (g_master_config_output.content|b64decode|from_yaml).etcdConfig.servingInfo.clientCA != 'ca-bundle.crt' # Set servingInfo.clientCA to client-ca-bundle.crt in order to roll the CA certificate. - # This change will be reverted in playbooks/byo/openshift-cluster/redeploy-certificates.yml + # This change will be reverted in playbooks/redeploy-certificates.yml - modify_yaml: dest: "{{ openshift.common.config_base }}/master/master-config.yaml" yaml_key: servingInfo.clientCA @@ -207,7 +207,7 @@ group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}" with_items: "{{ client_users }}" -- include: ../../../openshift-master/private/restart.yml +- import_playbook: restart.yml # Do not restart masters when master or etcd certificates were previously expired. when: # masters @@ -272,7 +272,7 @@ state: absent changed_when: false -- include: ../../../openshift-node/private/restart.yml +- import_playbook: ../../openshift-node/private/restart.yml # Do not restart nodes when node, master or etcd certificates were previously expired. when: # nodes diff --git a/playbooks/openshift-master/private/scaleup.yml b/playbooks/openshift-master/private/scaleup.yml index 8229eccfa..007b23ea3 100644 --- a/playbooks/openshift-master/private/scaleup.yml +++ b/playbooks/openshift-master/private/scaleup.yml @@ -20,11 +20,11 @@ - restart master controllers handlers: - name: restart master api - service: name={{ openshift.common.service_type }}-master-controllers state=restarted + service: name={{ openshift_service_type }}-master-controllers state=restarted notify: verify api server # We retry the controllers because the API may not be 100% initialized yet. - name: restart master controllers - command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" + command: "systemctl restart {{ openshift_service_type }}-master-controllers" retries: 3 delay: 5 register: result diff --git a/playbooks/openshift-master/private/tasks/wire_aggregator.yml b/playbooks/openshift-master/private/tasks/wire_aggregator.yml index 97acc5d5d..4f55d5c82 100644 --- a/playbooks/openshift-master/private/tasks/wire_aggregator.yml +++ b/playbooks/openshift-master/private/tasks/wire_aggregator.yml @@ -180,21 +180,19 @@ #restart master serially here - name: restart master api - systemd: name={{ openshift.common.service_type }}-master-api state=restarted + systemd: name={{ openshift_service_type }}-master-api state=restarted when: - yedit_output.changed - - openshift.master.cluster_method == 'native' # We retry the controllers because the API may not be 100% initialized yet. - name: restart master controllers - command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" + command: "systemctl restart {{ openshift_service_type }}-master-controllers" retries: 3 delay: 5 register: result until: result.rc == 0 when: - yedit_output.changed - - openshift.master.cluster_method == 'native' - name: Verify API Server # Using curl here since the uri module requires python-httplib2 and diff --git a/playbooks/openshift-master/private/validate_restart.yml b/playbooks/openshift-master/private/validate_restart.yml index 5dbb21502..1077d0b9c 100644 --- a/playbooks/openshift-master/private/validate_restart.yml +++ b/playbooks/openshift-master/private/validate_restart.yml @@ -14,9 +14,6 @@ - role: common local_facts: rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}" - - role: master - local_facts: - cluster_method: "{{ openshift_master_cluster_method | default(None) }}" # Creating a temp file on localhost, we then check each system that will # be rebooted to see if that file exists, if so we know we're running diff --git a/playbooks/openshift-master/redeploy-certificates.yml b/playbooks/openshift-master/redeploy-certificates.yml new file mode 100644 index 000000000..8b7272485 --- /dev/null +++ b/playbooks/openshift-master/redeploy-certificates.yml @@ -0,0 +1,6 @@ +--- +- import_playbook: ../init/main.yml + +- import_playbook: private/redeploy-certificates.yml + +- import_playbook: private/restart.yml diff --git a/playbooks/openshift-master/redeploy-openshift-ca.yml b/playbooks/openshift-master/redeploy-openshift-ca.yml new file mode 100644 index 000000000..27f4e6b7d --- /dev/null +++ b/playbooks/openshift-master/redeploy-openshift-ca.yml @@ -0,0 +1,4 @@ +--- +- import_playbook: ../init/main.yml + +- import_playbook: private/redeploy-openshift-ca.yml diff --git a/playbooks/openshift-nfs/private/config.yml b/playbooks/openshift-nfs/private/config.yml index 6ea77e00b..3625efcc6 100644 --- a/playbooks/openshift-nfs/private/config.yml +++ b/playbooks/openshift-nfs/private/config.yml @@ -14,7 +14,6 @@ - name: Configure nfs hosts: oo_nfs_to_config roles: - - role: os_firewall - role: openshift_storage_nfs - name: NFS Install Checkpoint End diff --git a/playbooks/openshift-node/private/additional_config.yml b/playbooks/openshift-node/private/additional_config.yml index 261e2048f..b86cb3cc2 100644 --- a/playbooks/openshift-node/private/additional_config.yml +++ b/playbooks/openshift-node/private/additional_config.yml @@ -33,7 +33,6 @@ roles: - role: flannel etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}" - embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" when: openshift_use_flannel | default(false) | bool - name: Additional node config diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/nodes-backup.yml b/playbooks/openshift-node/private/certificates-backup.yml index 2ad84b3b9..2ad84b3b9 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/nodes-backup.yml +++ b/playbooks/openshift-node/private/certificates-backup.yml diff --git a/playbooks/openshift-node/private/configure_nodes.yml b/playbooks/openshift-node/private/configure_nodes.yml index 06f3df9fa..32b288c8b 100644 --- a/playbooks/openshift-node/private/configure_nodes.yml +++ b/playbooks/openshift-node/private/configure_nodes.yml @@ -4,14 +4,12 @@ vars: openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] | union(groups['oo_masters_to_config']) | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" roles: - - role: os_firewall - role: openshift_clock - role: openshift_node - role: tuned diff --git a/playbooks/openshift-node/private/containerized_nodes.yml b/playbooks/openshift-node/private/containerized_nodes.yml index 3c3ac3646..ef07669cb 100644 --- a/playbooks/openshift-node/private/containerized_nodes.yml +++ b/playbooks/openshift-node/private/containerized_nodes.yml @@ -5,7 +5,6 @@ vars: openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] | union(groups['oo_masters_to_config']) | union(groups['oo_etcd_to_config'] | default([]))) @@ -13,7 +12,6 @@ }}" roles: - - role: os_firewall - role: openshift_clock - role: openshift_node openshift_ca_host: "{{ groups.oo_first_master.0 }}" diff --git a/playbooks/openshift-node/private/enable_excluders.yml b/playbooks/openshift-node/private/enable_excluders.yml index 5288b14f9..30713e694 100644 --- a/playbooks/openshift-node/private/enable_excluders.yml +++ b/playbooks/openshift-node/private/enable_excluders.yml @@ -5,4 +5,3 @@ roles: - role: openshift_excluder r_openshift_excluder_action: enable - r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" diff --git a/playbooks/openshift-node/private/image_prep.yml b/playbooks/openshift-node/private/image_prep.yml index b7ac27bda..6b517197d 100644 --- a/playbooks/openshift-node/private/image_prep.yml +++ b/playbooks/openshift-node/private/image_prep.yml @@ -1,12 +1,10 @@ --- - name: normalize groups - import_playbook: ../../init/evaluate_groups.yml - -- name: initialize the facts - import_playbook: ../../init/facts.yml - -- name: initialize the repositories - import_playbook: ../../init/repos.yml + import_playbook: ../../prerequisites.yml + vars: + skip_version: True + skip_sanity_checks: True + skip_validate_hostnames: True - name: run node config setup import_playbook: setup.yml diff --git a/playbooks/openshift-node/private/network_manager.yml b/playbooks/openshift-node/private/network_manager.yml index fffed4dfb..39640345f 100644 --- a/playbooks/openshift-node/private/network_manager.yml +++ b/playbooks/openshift-node/private/network_manager.yml @@ -1,6 +1,4 @@ --- -- import_playbook: ../../init/evaluate_groups.yml - - name: Install and configure NetworkManager hosts: oo_all_hosts become: yes @@ -9,6 +7,8 @@ package: name: 'NetworkManager' state: present + register: result + until: result | success - name: configure NetworkManager lineinfile: diff --git a/playbooks/openshift-node/private/redeploy-certificates.yml b/playbooks/openshift-node/private/redeploy-certificates.yml new file mode 100644 index 000000000..c0f75ae80 --- /dev/null +++ b/playbooks/openshift-node/private/redeploy-certificates.yml @@ -0,0 +1,6 @@ +--- +- import_playbook: certificates-backup.yml + +- import_playbook: certificates.yml + vars: + openshift_certificates_redeploy: true diff --git a/playbooks/openshift-node/private/restart.yml b/playbooks/openshift-node/private/restart.yml index c3beb59b7..0786bd7d3 100644 --- a/playbooks/openshift-node/private/restart.yml +++ b/playbooks/openshift-node/private/restart.yml @@ -16,10 +16,6 @@ retries: 3 delay: 30 - - name: Update docker facts - openshift_facts: - role: docker - - name: Restart containerized services service: name: "{{ item }}" @@ -27,9 +23,9 @@ with_items: - etcd_container - openvswitch - - "{{ openshift.common.service_type }}-master-api" - - "{{ openshift.common.service_type }}-master-controllers" - - "{{ openshift.common.service_type }}-node" + - "{{ openshift_service_type }}-master-api" + - "{{ openshift_service_type }}-master-controllers" + - "{{ openshift_service_type }}-node" failed_when: false when: openshift.common.is_containerized | bool @@ -44,7 +40,7 @@ - name: restart node service: - name: "{{ openshift.common.service_type }}-node" + name: "{{ openshift_service_type }}-node" state: restarted - name: Wait for node to be ready diff --git a/playbooks/openshift-node/private/setup.yml b/playbooks/openshift-node/private/setup.yml index 794c03a67..541913aef 100644 --- a/playbooks/openshift-node/private/setup.yml +++ b/playbooks/openshift-node/private/setup.yml @@ -5,7 +5,6 @@ roles: - role: openshift_excluder r_openshift_excluder_action: disable - r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" - name: Evaluate node groups hosts: localhost diff --git a/playbooks/openshift-node/redeploy-certificates.yml b/playbooks/openshift-node/redeploy-certificates.yml new file mode 100644 index 000000000..8b7272485 --- /dev/null +++ b/playbooks/openshift-node/redeploy-certificates.yml @@ -0,0 +1,6 @@ +--- +- import_playbook: ../init/main.yml + +- import_playbook: private/redeploy-certificates.yml + +- import_playbook: private/restart.yml diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md index f3fe13530..d361d6278 100644 --- a/playbooks/openstack/README.md +++ b/playbooks/openstack/README.md @@ -6,7 +6,7 @@ etc.). The result is an environment ready for OpenShift installation via [openshift-ansible]. We provide everything necessary to be able to install OpenShift on -OpenStack (including the DNS and load balancer servers when +OpenStack (including the load balancer servers when necessary). In addition, we work on providing integration with the OpenStack-native services (storage, lbaas, baremetal as a service, dns, etc.). @@ -24,7 +24,7 @@ The OpenStack release must be Newton (for Red Hat OpenStack this is version 10) or newer. It must also satisfy these requirements: * Heat (Orchestration) must be available -* The deployment image (CentOS 7 or RHEL 7) must be loaded +* The deployment image (CentOS 7.4 or RHEL 7) must be loaded * The deployment flavor must be available to your user - `m1.medium` / 4GB RAM + 40GB disk should be enough for testing - look at @@ -38,18 +38,6 @@ Optional: * External Neutron network with a floating IP address pool -## DNS Requirements - -OpenShift requires DNS to operate properly. OpenStack supports DNS-as-a-service -in the form of the Designate project, but the playbooks here don't support it -yet. Until we do, you will need to provide a DNS solution yourself (or in case -you are not running Designate when we do). - -If your server supports nsupdate, we will use it to add the necessary records. - -TODO(shadower): describe how to build a sample DNS server and how to configure -our playbooks for nsupdate. - ## Installation @@ -57,14 +45,13 @@ There are four main parts to the installation: 1. [Preparing Ansible and dependencies](#1-preparing-ansible-and-dependencies) 2. [Configuring the desired OpenStack environment and OpenShift cluster](#2-configuring-the-openstack-environment-and-openshift-cluster) -3. [Creating the OpenStack resources (VMs, networking, etc.)](#3-creating-the-openstack-resources-vms-networking-etc) -4. [Installing OpenShift](#4-installing-openshift) +3. [Creating the OpenStack Resources and Installing OpenShift](#3-creating-the-openstack-resources-and-installing-openshift) This guide is going to install [OpenShift Origin][origin] with [CentOS 7][centos7] images with minimal customisation. -We will create the VMs for running OpenShift, in a new Neutron -network, assign Floating IP addresses and configure DNS. +We will create the VMs for running OpenShift, in a new Neutron network and +assign Floating IP addresses. The OpenShift cluster will have a single Master node that will run `etcd`, a single Infra node and two App nodes. @@ -156,14 +143,6 @@ $ vi inventory/group_vars/all.yml 4. Set the `openshift_openstack_default_flavor` to the flavor you want your OpenShift VMs to use. - See `openstack flavor list` for the list of available flavors. -5. Set the `openshift_openstack_dns_nameservers` to the list of the IP addresses - of the DNS servers used for the **private** address resolution. - -**NOTE ON DNS**: at minimum, the OpenShift nodes need to be able to access each -other by their hostname. OpenStack doesn't provide this by default, so you -need to provide a DNS server. Put the address of that DNS server in -`openshift_openstack_dns_nameservers` variable. - @@ -191,7 +170,7 @@ the [Sample OpenShift Inventory][sample-openshift-inventory] and the [advanced configuration][advanced-configuration]. -### 3. Creating the OpenStack resources (VMs, networking, etc.) +### 3. Creating the OpenStack Resources and Installing OpenShift We provide an `ansible.cfg` file which has some useful defaults -- you should copy it to the directory you're going to run `ansible-playbook` from. @@ -200,13 +179,18 @@ copy it to the directory you're going to run `ansible-playbook` from. $ cp openshift-ansible/ansible.cfg ansible.cfg ``` -Then run the provisioning playbook -- this will create the OpenStack +Then run the provision + install playbook -- this will create the OpenStack resources: ```bash -$ ansible-playbook --user openshift -i inventory openshift-ansible/playbooks/openstack/openshift-cluster/provision.yaml +$ ansible-playbook --user openshift -i inventory \ + openshift-ansible/playbooks/openstack/openshift-cluster/provision_install.yaml \ + -e openshift_repos_enable_testing=true ``` +Note, you may want to use the testing repo for development purposes only. +Normally, `openshift_repos_enable_testing` should not be specified. + If you're using multiple inventories, make sure you pass the path to the right one to `-i`. @@ -214,15 +198,6 @@ If your SSH private key is not in `~/.ssh/id_rsa` use the `--private-key` option to specify the correct path. -### 4. Installing OpenShift - -Run the `byo/config.yml` playbook on top of the OpenStack nodes we have -prepared. - -```bash -$ ansible-playbook -i inventory openshift-ansible/playbooks/byo/config.yml -``` - ### Next Steps @@ -240,7 +215,6 @@ advanced configuration: * [External Dns][external-dns] * Multiple Clusters (TODO) * [Cinder Registry][cinder-registry] -* [Bastion Node][bastion] [ansible]: https://www.ansible.com/ @@ -252,11 +226,10 @@ advanced configuration: [hardware-requirements]: https://docs.openshift.org/latest/install_config/install/prerequisites.html#hardware [origin]: https://www.openshift.org/ [centos7]: https://www.centos.org/ -[sample-openshift-inventory]: https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.example +[sample-openshift-inventory]: https://github.com/openshift/openshift-ansible/blob/master/inventory/hosts.example [advanced-configuration]: ./advanced-configuration.md [accessing-openshift]: ./advanced-configuration.md#accessing-the-openshift-cluster [uninstall-openshift]: ./advanced-configuration.md#removing-the-openshift-cluster [loadbalancer]: ./advanced-configuration.md#multi-master-configuration [external-dns]: ./advanced-configuration.md#dns-configuration-variables [cinder-registry]: ./advanced-configuration.md#creating-and-using-a-cinder-volume-for-the-openshift-registry -[bastion]: ./advanced-configuration.md#configure-static-inventory-and-access-via-a-bastion-node diff --git a/playbooks/openstack/advanced-configuration.md b/playbooks/openstack/advanced-configuration.md index 90cc20b98..2c9b70b5f 100644 --- a/playbooks/openstack/advanced-configuration.md +++ b/playbooks/openstack/advanced-configuration.md @@ -23,68 +23,45 @@ There are no additional dependencies for the cluster nodes. Required configuration steps are done by Heat given a specific user data config that normally should not be changed. -## Required galaxy modules - -In order to pull in external dependencies for DNS configuration steps, -the following commads need to be executed: - - ansible-galaxy install \ - -r openshift-ansible-contrib/playbooks/provisioning/openstack/galaxy-requirements.yaml \ - -p openshift-ansible-contrib/roles - -Alternatively you can install directly from github: - - ansible-galaxy install git+https://github.com/redhat-cop/infra-ansible,master \ - -p openshift-ansible-contrib/roles - -Notes: -* This assumes we're in the directory that contains the clonned -openshift-ansible-contrib repo in its root path. -* When trying to install a different version, the previous one must be removed first -(`infra-ansible` directory from [roles](https://github.com/openshift/openshift-ansible-contrib/tree/master/roles)). -Otherwise, even if there are differences between the two versions, installation of the newer version is skipped. +## Accessing the OpenShift Cluster +### Configure DNS -## Accessing the OpenShift Cluster +OpenShift requires a two public DNS records to function fully. The first one points to +the master/load balancer and provides the UI/API access. The other one is a +wildcard domain that resolves app route requests to the infra node. A private DNS +server and records are not required and not managed here. -### Use the Cluster DNS +If you followed the default installation from the README section, there is no +DNS configured. You should add two entries to the `/etc/hosts` file on the +Ansible host (where you to do a quick validation. A real deployment will +however require a DNS server with the following entries set. -In addition to the OpenShift nodes, we created a DNS server with all -the necessary entries. We will configure your *Ansible host* to use -this new DNS and talk to the deployed OpenShift. +First, run the `openstack server list` command and note the floating IP +addresses of the *master* and *infra* nodes (we will use `10.40.128.130` for +master and `10.40.128.134` for infra here). -First, get the DNS IP address: +Then add the following entries to your `/etc/hosts`: -```bash -$ openstack server show dns-0.openshift.example.com --format value --column addresses -openshift-ansible-openshift.example.com-net=192.168.99.11, 10.40.128.129 +``` +10.40.128.130 console.openshift.example.com +10.40.128.134 cakephp-mysql-example-test.apps.openshift.example.com ``` -Note the floating IP address (it's `10.40.128.129` in this case) -- if -you're not sure, try pinging them both -- it's the one that responds -to pings. +This points the cluster domain (as defined in the +`openshift_master_cluster_public_hostname` Ansible variable in `OSEv3`) to the +master node and any routes for deployed apps to the infra node. -Next, edit your `/etc/resolv.conf` as root and put `nameserver DNS_IP` as your -**first entry**. +If you deploy another app, it will end up with a different URL (e.g. +myapp-test.apps.openshift.example.com) and you will need to add that too. This +is why a real deployment should always run a DNS where the second entry will be +a wildcard `*.apps.openshift.example.com). -If your `/etc/resolv.conf` currently looks like this: +This will be sufficient to validate the cluster here. -``` -; generated by /usr/sbin/dhclient-script -search openstacklocal -nameserver 192.168.0.3 -nameserver 192.168.0.2 -``` +Take a look at the [External DNS](#dns-configuration-variables) section for +configuring a DNS service. -Change it to this: - -``` -; generated by /usr/sbin/dhclient-script -search openstacklocal -nameserver 10.40.128.129 -nameserver 192.168.0.3 -nameserver 192.168.0.2 -``` ### Get the `oc` Client @@ -182,15 +159,26 @@ So the provisioned cluster nodes will start using those natively as default nameservers. Technically, this allows to deploy OpenShift clusters without dnsmasq proxies. -The `openshift_openstack_clusterid` and `openshift_openstack_public_dns_domain` will form the cluster's DNS domain all -your servers will be under. With the default values, this will be -`openshift.example.com`. For workloads, the default subdomain is 'apps'. -That sudomain can be set as well by the `openshift_openstack_app_subdomain` variable in -the inventory. +The `openshift_openstack_clusterid` and `openshift_openstack_public_dns_domain` +will form the cluster's public DNS domain all your servers will be under. With +the default values, this will be `openshift.example.com`. For workloads, the +default subdomain is 'apps'. That sudomain can be set as well by the +`openshift_openstack_app_subdomain` variable in the inventory. + +If you want to use a two sets of hostnames for public and private/prefixed DNS +records for your externally managed public DNS server, you can specify +`openshift_openstack_public_hostname_suffix` and/or +`openshift_openstack_private_hostname_suffix`. The suffixes will be added +to the nsupdate records sent to the external DNS server. Those are empty by default. + +**Note** the real hostnames, Nova servers' or ansible hostnames and inventory +variables will not be updated. The deployment may be done on arbitrary named +hosts with the hostnames managed by cloud-init. Inventory hostnames will ignore +the suffixes. The `openstack_<role name>_hostname` is a set of variables used for customising -hostnames of servers with a given role. When such a variable stays commented, -default hostname (usually the role name) is used. +public names of Nova servers provisioned with a given role. When such a variable stays commented, +default value (usually the role name) is used. The `openshift_openstack_dns_nameservers` is a list of DNS servers accessible from all the created Nova servers. These will provide the internal name resolution for @@ -205,7 +193,7 @@ When Network Manager is enabled for provisioned cluster nodes, which is normally the case, you should not change the defaults and always deploy dnsmasq. `openshift_openstack_external_nsupdate_keys` describes an external authoritative DNS server(s) -processing dynamic records updates in the public and private cluster views: +processing dynamic records updates in the public only cluster view: openshift_openstack_external_nsupdate_keys: public: @@ -213,10 +201,6 @@ processing dynamic records updates in the public and private cluster views: key_algorithm: 'hmac-md5' key_name: 'update-key' server: <public DNS server IP> - private: - key_secret: <some nsupdate key 2> - key_algorithm: 'hmac-sha256' - server: <public or private DNS server IP> Here, for the public view section, we specified another key algorithm and optional `key_name`, which normally defaults to the cluster's DNS domain. @@ -224,24 +208,6 @@ This just illustrates a compatibility mode with a DNS service deployed by OpenShift on OSP10 reference architecture, and used in a mixed mode with another external DNS server. -Another example defines an external DNS server for the public view -additionally to the in-stack DNS server used for the private view only: - - openshift_openstack_external_nsupdate_keys: - public: - key_secret: <some nsupdate key> - key_algorithm: 'hmac-sha256' - server: <public DNS server IP> - -Here, updates matching the public view will be hitting the given public -server IP. While updates matching the private view will be sent to the -auto evaluated in-stack DNS server's **public** IP. - -Note, for the in-stack DNS server, private view updates may be sent only -via the public IP of the server. You can not send updates via the private -IP yet. This forces the in-stack private server to have a floating IP. -See also the [security notes](#security-notes) - ## Flannel networking In order to configure the @@ -330,14 +296,6 @@ The `openshift_openstack_required_packages` variable also provides a list of the prerequisite packages to be installed before to deploy an OpenShift cluster. Those are ignored though, if the `manage_packages: False`. -The `openstack_inventory` controls either a static inventory will be created after the -cluster nodes provisioned on OpenStack cloud. Note, the fully dynamic inventory -is yet to be supported, so the static inventory will be created anyway. - -The `openstack_inventory_path` points the directory to host the generated static inventory. -It should point to the copied example inventory directory, otherwise ti creates -a new one for you. - ## Multi-master configuration Please refer to the official documentation for the @@ -347,7 +305,6 @@ variables](https://docs.openshift.com/container-platform/3.6/install_config/inst in `inventory/group_vars/OSEv3.yml`. For example, given a load balancer node under the ansible group named `ext_lb`: - openshift_master_cluster_method: native openshift_master_cluster_hostname: "{{ groups.ext_lb.0 }}" openshift_master_cluster_public_hostname: "{{ groups.ext_lb.0 }}" @@ -386,18 +343,6 @@ be the case for development environments. When turned off, the servers will be provisioned omitting the ``yum update`` command. This brings security implications though, and is not recommended for production deployments. -### DNS servers security options - -Aside from `openshift_openstack_node_ingress_cidr` restricting public access to in-stack DNS -servers, there are following (bind/named specific) DNS security -options available: - - named_public_recursion: 'no' - named_private_recursion: 'yes' - -External DNS servers, which is not included in the 'dns' hosts group, -are not managed. It is up to you to configure such ones. - ## Configure the OpenShift parameters Finally, you need to update the DNS entry in @@ -409,7 +354,7 @@ installation for example by specifying the authentication. The full list of options is available in this sample inventory: -https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.ose.example +https://github.com/openshift/openshift-ansible/blob/master/inventory/hosts.example Note, that in order to deploy OpenShift origin, you should update the following variables for the `inventory/group_vars/OSEv3.yml`, `all.yml`: @@ -540,43 +485,6 @@ You can also run the registry setup playbook directly: -## Configure static inventory and access via a bastion node - -Example inventory variables: - - openshift_openstack_use_bastion: true - openshift_openstack_bastion_ingress_cidr: "{{openshift_openstack_subnet_prefix}}.0/24" - openstack_private_ssh_key: ~/.ssh/id_rsa - openstack_inventory: static - openstack_inventory_path: ../../../../inventory - openstack_ssh_config_path: /tmp/ssh.config.openshift.ansible.openshift.example.com - -The `openshift_openstack_subnet_prefix` is the openstack private network for your cluster. -And the `openshift_openstack_bastion_ingress_cidr` defines accepted range for SSH connections to nodes -additionally to the `openshift_openstack_ssh_ingress_cidr`` (see the security notes above). - -The SSH config will be stored on the ansible control node by the -gitven path. Ansible uses it automatically. To access the cluster nodes with -that ssh config, use the `-F` prefix, f.e.: - - ssh -F /tmp/ssh.config.openshift.ansible.openshift.example.com master-0.openshift.example.com echo OK - -Note, relative paths will not work for the `openstack_ssh_config_path`, but it -works for the `openstack_private_ssh_key` and `openstack_inventory_path`. In this -guide, the latter points to the current directory, where you run ansible commands -from. - -To verify nodes connectivity, use the command: - - ansible -v -i inventory/hosts -m ping all - -If something is broken, double-check the inventory variables, paths and the -generated `<openstack_inventory_path>/hosts` and `openstack_ssh_config_path` files. - -The `inventory: dynamic` can be used instead to access cluster nodes directly via -floating IPs. In this mode you can not use a bastion node and should specify -the dynamic inventory file in your ansible commands , like `-i openstack.py`. - ## Using Docker on the Ansible host If you don't want to worry about the dependencies, you can use the @@ -606,28 +514,6 @@ the playbooks: ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml -### Run the playbook - -Assuming your OpenStack (Keystone) credentials are in the `keystonerc` -this is how you stat the provisioning process from your ansible control node: - - . keystonerc - ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml - -Note, here you start with an empty inventory. The static inventory will be populated -with data so you can omit providing additional arguments for future ansible commands. - -If bastion enabled, the generates SSH config must be applied for ansible. -Otherwise, it is auto included by the previous step. In order to execute it -as a separate playbook, use the following command: - - ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/post-provision-openstack.yml - -The first infra node then becomes a bastion node as well and proxies access -for future ansible commands. The post-provision step also configures Satellite, -if requested, and DNS server, and ensures other OpenShift requirements to be met. - - ## Running Custom Post-Provision Actions A custom playbook can be run like this: @@ -729,27 +615,12 @@ A library of custom post-provision actions exists in `openshift-ansible-contrib/ Once it succeeds, you can install openshift by running: - ansible-playbook openshift-ansible/playbooks/byo/config.yml + ansible-playbook openshift-ansible/playbooks/deploy_cluster.yml ## Access UI OpenShift UI may be accessed via the 1st master node FQDN, port 8443. -When using a bastion, you may want to make an SSH tunnel from your control node -to access UI on the `https://localhost:8443`, with this inventory variable: - - openshift_openstack_ui_ssh_tunnel: True - -Note, this requires sudo rights on the ansible control node and an absolute path -for the `openstack_private_ssh_key`. You should also update the control node's -`/etc/hosts`: - - 127.0.0.1 master-0.openshift.example.com - -In order to access UI, the ssh-tunnel service will be created and started on the -control node. Make sure to remove these changes and the service manually, when not -needed anymore. - ## Scale Deployment up/down ### Scaling up @@ -768,5 +639,3 @@ Usage: ``` ansible-playbook -i <path to inventory> openshift-ansible-contrib/playbooks/provisioning/openstack/scale-up.yaml` [-e increment_by=<number>] [-e openshift_ansible_dir=<path to openshift-ansible>] ``` - -Note: This playbook works only without a bastion node (`openshift_openstack_use_bastion: False`). diff --git a/playbooks/openstack/openshift-cluster/install.yml b/playbooks/openstack/openshift-cluster/install.yml index 1c4f609e3..3211f619a 100644 --- a/playbooks/openstack/openshift-cluster/install.yml +++ b/playbooks/openstack/openshift-cluster/install.yml @@ -8,8 +8,5 @@ # values here. We do it in the OSEv3 group vars. Do we need to add # some logic here? -- name: run the initialization - include: ../../init/main.yml - -- name: run the config - include: ../../common/openshift-cluster/config.yml +- name: run the cluster deploy + import_playbook: ../../deploy_cluster.yml diff --git a/playbooks/openstack/openshift-cluster/provision.yml b/playbooks/openstack/openshift-cluster/provision.yml index 36d8c8215..583e72b51 100644 --- a/playbooks/openstack/openshift-cluster/provision.yml +++ b/playbooks/openstack/openshift-cluster/provision.yml @@ -10,7 +10,7 @@ # NOTE(shadower): Bring in the host groups: - name: evaluate groups - include: ../../init/evaluate_groups.yml + import_playbook: ../../init/evaluate_groups.yml - name: Wait for the nodes and gather their facts @@ -26,9 +26,9 @@ - name: Gather facts for the new nodes setup: +- name: set common facts + import_playbook: ../../init/facts.yml -# NOTE(shadower): the (internal) DNS must be functional at this point!! -# That will have happened in provision.yml if nsupdate was configured. # TODO(shadower): consider splitting this up so people can stop here # and configure their DNS if they have to. @@ -47,6 +47,13 @@ hosts: oo_all_hosts become: yes gather_facts: yes + roles: + - role: rhel_subscribe + when: + - ansible_distribution == "RedHat" + - rhsub_user | default(False) + - rhsub_pass | default(False) + tasks: - name: Install dependencies include_role: diff --git a/playbooks/openstack/openshift-cluster/provision_install.yml b/playbooks/openstack/openshift-cluster/provision_install.yml index 5d88c105f..fc2854605 100644 --- a/playbooks/openstack/openshift-cluster/provision_install.yml +++ b/playbooks/openstack/openshift-cluster/provision_install.yml @@ -1,9 +1,9 @@ --- - name: Check the prerequisites for cluster provisioning in OpenStack - include: prerequisites.yml + import_playbook: prerequisites.yml - name: Include the provision.yml playbook to create cluster - include: provision.yml + import_playbook: provision.yml - name: Include the install.yml playbook to install cluster - include: install.yml + import_playbook: install.yml diff --git a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml index 1e55adb9e..933117127 100644 --- a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml +++ b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml @@ -1,12 +1,12 @@ --- +## Openshift product versions and repos to install from openshift_deployment_type: origin +#openshift_repos_enable_testing: true #openshift_deployment_type: openshift-enterprise #openshift_release: v3.5 openshift_master_default_subdomain: "apps.{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}" -openshift_master_cluster_method: native -openshift_master_cluster_hostname: "console.{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}" -openshift_master_cluster_public_hostname: "{{ openshift_master_cluster_hostname }}" +openshift_master_cluster_public_hostname: "console.{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}" osm_default_node_selector: 'region=primary' diff --git a/playbooks/openstack/sample-inventory/group_vars/all.yml b/playbooks/openstack/sample-inventory/group_vars/all.yml index 921edb867..c7afe9a24 100644 --- a/playbooks/openstack/sample-inventory/group_vars/all.yml +++ b/playbooks/openstack/sample-inventory/group_vars/all.yml @@ -10,7 +10,6 @@ openshift_openstack_dns_nameservers: [] #openshift_openstack_node_hostname: "app-node" #openshift_openstack_lb_hostname: "lb" #openshift_openstack_etcd_hostname: "etcd" -#openshift_openstack_dns_hostname: "dns" openshift_openstack_keypair_name: "openshift" openshift_openstack_external_network_name: "public" @@ -34,7 +33,6 @@ openshift_openstack_external_network_name: "public" #openshift_openstack_node_image_name: "centos7" #openshift_openstack_lb_image_name: "centos7" #openshift_openstack_etcd_image_name: "centos7" -#openshift_openstack_dns_image_name: "centos7" openshift_openstack_default_image_name: "centos7" openshift_openstack_num_masters: 1 @@ -49,7 +47,6 @@ openshift_openstack_num_nodes: 2 #openshift_openstack_node_flavor: "m1.medium" #openshift_openstack_lb_flavor: "m1.medium" #openshift_openstack_etcd_flavor: "m1.medium" -#openshift_openstack_dns_flavor: "m1.medium" openshift_openstack_default_flavor: "m1.medium" # # Numerical index of nodes to remove @@ -62,7 +59,6 @@ openshift_openstack_default_flavor: "m1.medium" #openshift_openstack_docker_infra_volume_size: "15" #openshift_openstack_docker_node_volume_size: "15" #openshift_openstack_docker_etcd_volume_size: "2" -#openshift_openstack_docker_dns_volume_size: "1" #openshift_openstack_docker_lb_volume_size: "5" openshift_openstack_docker_volume_size: "15" @@ -86,31 +82,13 @@ openshift_openstack_docker_volume_size: "15" openshift_openstack_subnet_prefix: "192.168.99" -## Red Hat subscription defaults to false which means we will not attempt to -## subscribe the nodes -#rhsm_register: False - -# # Using Red Hat Satellite: -#rhsm_register: True -#rhsm_satellite: 'sat-6.example.com' -#rhsm_org: 'OPENSHIFT_ORG' -#rhsm_activationkey: '<activation-key>' - -# # Or using RHN username, password and optionally pool: -#rhsm_register: True -#rhsm_username: '<username>' -#rhsm_password: '<password>' -#rhsm_pool: '<pool id>' - -#rhsm_repos: -# - "rhel-7-server-rpms" -# - "rhel-7-server-ose-3.5-rpms" -# - "rhel-7-server-extras-rpms" -# - "rhel-7-fast-datapath-rpms" +## Red Hat subscription: +#rhsub_user: '<username>' +#rhsub_pass: '<password>' +#rhsub_pool: '<pool name>' # # Roll-your-own DNS -#openshift_openstack_num_dns: 0 #openshift_openstack_external_nsupdate_keys: # public: # key_secret: 'SKqKNdpfk7llKxZ57bbxUnUDobaaJp9t8CjXLJPl+fRI5mPcSBuxTAyvJPa6Y9R7vUg9DwCy/6WTpgLNqnV4Hg==' @@ -121,10 +99,6 @@ openshift_openstack_subnet_prefix: "192.168.99" # key_algorithm: 'hmac-md5' # server: '192.168.1.2' -# # Customize DNS server security options -#named_public_recursion: 'no' -#named_private_recursion: 'yes' - # NOTE(shadower): Do not change this value. The Ansible user is currently # hardcoded to `openshift`. diff --git a/playbooks/openstack/sample-inventory/inventory.py b/playbooks/openstack/sample-inventory/inventory.py index 47c56d94d..ad3fd936b 100755 --- a/playbooks/openstack/sample-inventory/inventory.py +++ b/playbooks/openstack/sample-inventory/inventory.py @@ -79,10 +79,19 @@ def build_inventory(): public_v4 = server.public_v4 or server.private_v4 if public_v4: - hostvars['public_v4'] = public_v4 + hostvars['public_v4'] = server.public_v4 + hostvars['openshift_public_ip'] = server.public_v4 # TODO(shadower): what about multiple networks? if server.private_v4: hostvars['private_v4'] = server.private_v4 + # NOTE(shadower): Yes, we set both hostname and IP to the private + # IP address for each node. OpenStack doesn't resolve nodes by + # name at all, so using a hostname here would require an internal + # DNS which would complicate the setup and potentially introduce + # performance issues. + hostvars['openshift_ip'] = server.private_v4 + hostvars['openshift_hostname'] = server.private_v4 + hostvars['openshift_public_hostname'] = server.name node_labels = server.metadata.get('node_labels') if node_labels: diff --git a/playbooks/prerequisites.yml b/playbooks/prerequisites.yml index 582dfe794..7b7868cfe 100644 --- a/playbooks/prerequisites.yml +++ b/playbooks/prerequisites.yml @@ -1,7 +1,12 @@ --- -- name: Place holder for prerequisites - hosts: localhost - gather_facts: false - tasks: - - name: Debug placeholder - debug: msg="Prerequisites ran." +- import_playbook: init/main.yml + vars: + skip_verison: True + +# This is required for container runtime for crio, only needs to run once. +- name: Configure os_firewall + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config:oo_nodes_to_config + roles: + - role: os_firewall + +- import_playbook: container-runtime/private/config.yml diff --git a/playbooks/redeploy-certificates.yml b/playbooks/redeploy-certificates.yml new file mode 100644 index 000000000..b5fcb951d --- /dev/null +++ b/playbooks/redeploy-certificates.yml @@ -0,0 +1,26 @@ +--- +- import_playbook: init/main.yml + +- import_playbook: openshift-etcd/private/redeploy-certificates.yml + +- import_playbook: openshift-master/private/redeploy-certificates.yml + +- import_playbook: openshift-node/private/redeploy-certificates.yml + +- import_playbook: openshift-etcd/private/restart.yml + vars: + g_etcd_certificates_expired: "{{ ('expired' in (hostvars | oo_select_keys(groups['etcd']) | oo_collect('check_results.check_results.etcd') | oo_collect('health'))) | bool }}" + +- import_playbook: openshift-master/private/restart.yml + +- import_playbook: openshift-node/private/restart.yml + +- import_playbook: openshift-hosted/private/redeploy-router-certificates.yml + when: openshift_hosted_manage_router | default(true) | bool + +- import_playbook: openshift-hosted/private/redeploy-registry-certificates.yml + when: openshift_hosted_manage_registry | default(true) | bool + +- import_playbook: openshift-master/private/revert-client-ca.yml + +- import_playbook: openshift-master/private/restart.yml diff --git a/playbooks/roles b/playbooks/roles new file mode 120000 index 000000000..d8c4472ca --- /dev/null +++ b/playbooks/roles @@ -0,0 +1 @@ +../roles
\ No newline at end of file |