diff options
Diffstat (limited to 'playbooks')
119 files changed, 1335 insertions, 1773 deletions
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 07f10d48c..5ed55a817 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -151,6 +151,14 @@ - lbr0 - vlinuxbr - vovsbr + + - name: Remove virtual devices + command: nmcli delete device "{{ item }}" + failed_when: False + with_items: + - tun0 + - docker0 + when: openshift_remove_all | default(true) | bool - shell: atomic uninstall "{{ item }}"-master-api @@ -264,12 +272,30 @@ - "{{ directories.results | default([]) }}" - files + - shell: systemctl daemon-reload + changed_when: False + + - name: restart container-engine + service: name=container-engine state=stopped enabled=no + failed_when: false + register: container_engine + + - name: restart docker + service: name=docker state=stopped enabled=no + failed_when: false + when: not (container_engine | changed) + register: l_docker_restart_docker_in_pb_result + until: not l_docker_restart_docker_in_pb_result | failed + retries: 3 + delay: 30 + - name: Remove remaining files file: path={{ item }} state=absent with_items: - /etc/ansible/facts.d/openshift.fact - /etc/openshift - /etc/openshift-sdn + - /etc/pki/ca-trust/source/anchors/openshift-ca.crt - /etc/sysconfig/atomic-openshift-node - /etc/sysconfig/atomic-openshift-node-dep - /etc/sysconfig/openshift-node-dep @@ -284,23 +310,38 @@ - /etc/systemd/system/origin-node-dep.service - /etc/systemd/system/origin-node.service - /etc/systemd/system/origin-node.service.wants + - /var/lib/docker + + - name: Rebuild ca-trust + command: update-ca-trust + + - name: Reset Docker proxy configuration + lineinfile: + state=absent + dest=/etc/sysconfig/docker + regexp='(NO_PROXY|HTTP_PROXY|HTTPS_PROXY)=.*' + + - name: Reset Docker registry configuration + lineinfile: + state=absent + dest=/etc/sysconfig/docker + regexp='(ADD_REGISTRY|BLOCK_REGISTRY|INSECURE_REGISTRY)=.*' + + - name: Detect Docker storage configuration + shell: vgs -o name | grep docker + register: docker_vg_name + failed_when: false + changed_when: false - - shell: systemctl daemon-reload - changed_when: False + - name: Wipe out Docker storage contents + command: vgremove -f {{ item }} + with_items: "{{ docker_vg_name.stdout_lines }}" + when: docker_vg_name.rc == 0 - - name: restart container-engine - service: name=container-engine state=restarted - failed_when: false - register: container_engine + - name: Wipe out Docker storage configuration + file: path=/etc/sysconfig/docker-storage state=absent + when: docker_vg_name.rc == 0 - - name: restart docker - service: name=docker state=restarted - failed_when: false - when: not (container_engine | changed) - register: l_docker_restart_docker_in_pb_result - until: not l_docker_restart_docker_in_pb_result | failed - retries: 3 - delay: 30 - hosts: masters become: yes @@ -525,3 +566,7 @@ with_items: - /etc/ansible/facts.d/openshift.fact - /var/lib/haproxy/stats + # Here we remove only limits.conf rather than directory, as users may put their files. + # - /etc/systemd/system/haproxy.service.d + - /etc/systemd/system/haproxy.service.d/limits.conf + - /etc/systemd/system/haproxy.service diff --git a/playbooks/aws/BUILD_AMI.md b/playbooks/aws/BUILD_AMI.md new file mode 100644 index 000000000..468264a9a --- /dev/null +++ b/playbooks/aws/BUILD_AMI.md @@ -0,0 +1,21 @@ +# Build AMI + +When seeking to deploy a working openshift cluster using these plays, a few +items must be in place. + +These are: + +1. Create an instance, using a specified ssh key. +2. Run openshift-ansible setup roles to ensure packages and services are correctly configured. +3. Create the AMI. +4. If encryption is desired + - A KMS key is created with the name of $clusterid + - An encrypted AMI will be produced with $clusterid KMS key +5. Terminate the instance used to configure the AMI. + +More AMI specific options can be found in ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml). When creating an encrypted AMI please specify use_encryption: +``` +# openshift_aws_ami_encrypt: True # defaults to false +``` + +**Note**: This will ensure to take the recently created AMI and encrypt it to be used later. If encryption is not desired then set the value to false (defaults to false). The AMI id will be fetched and used according to its most recent creation date. diff --git a/playbooks/aws/PREREQUISITES.md b/playbooks/aws/PREREQUISITES.md new file mode 100644 index 000000000..4f428dcc3 --- /dev/null +++ b/playbooks/aws/PREREQUISITES.md @@ -0,0 +1,40 @@ +# Prerequisites + +When seeking to deploy a working openshift cluster using these plays, a few +items must be in place. + +These are: + +1) vpc +2) security group to build the AMI in. +3) ssh keys to log into instances + +These items can be provisioned ahead of time, or you can utilize the plays here +to create these items. + +If you wish to provision these items yourself, or you already have these items +provisioned and wish to utilize existing components, please refer to +provisioning_vars.yml.example. + +If you wish to have these items created for you, continue with this document. + +# Running prerequisites.yml + +Warning: Running these plays will provision items in your AWS account (if not +present), and you may incur billing charges. These plays are not suitable +for the free-tier. + +## Step 1: +Ensure you have specified all the necessary provisioning variables. See +provisioning_vars.example.yml and README.md for more information. + +## Step 2: +``` +$ ansible-playbook -i inventory.yml prerequisites.yml -e @provisioning_vars.yml +``` + +This will create a VPC, security group, and ssh_key. These plays are idempotent, +and multiple runs should result in no additional provisioning of these components. + +You can also verify that you will successfully utilize existing components with +these plays. diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md index 816cb35b4..417fb539a 100644 --- a/playbooks/aws/README.md +++ b/playbooks/aws/README.md @@ -8,6 +8,13 @@ With recent desire for provisioning from customers and developers alike, the AWS deploy highly scalable Openshift clusters utilizing AWS auto scale groups and custom AMIs. +To speed in the provisioning of medium and large clusters, openshift-node +instances are created using a pre-built AMI. A list of pre-built AMIs will +be available soon. + +If the deployer wishes to build their own AMI for provisioning, instructions +to do so are provided here. + ### Where do I start? Before any provisioning may occur, AWS account credentials must be present in the environment. This can be done in two ways: @@ -31,8 +38,13 @@ Before any provisioning may occur, AWS account credentials must be present in th ### Let's Provision! -The newly added playbooks are the following: -- build_ami.yml - Builds a custom AMI. This currently requires the user to supply a valid AMI with access to repositories that contain openshift repositories. +Warning: Running these plays will provision items in your AWS account (if not +present), and you may incur billing charges. These plays are not suitable +for the free-tier. + +#### High-level overview +- prerequisites.yml - Provision VPC, Security Groups, SSH keys, if needed. See PREREQUISITES.md for more information. +- build_ami.yml - Builds a custom AMI. See BUILD_AMI.md for more information. - provision.yml - Create a vpc, elbs, security groups, launch config, asg's, etc. - install.yml - Calls the openshift-ansible installer on the newly created instances - provision_nodes.yml - Creates the infra and compute node scale groups @@ -41,82 +53,39 @@ The newly added playbooks are the following: The current expected work flow should be to provide an AMI with access to Openshift repositories. There should be a repository specified in the `openshift_additional_repos` parameter of the inventory file. The next expectation is a minimal set of values in the `provisioning_vars.yml` file to configure the desired settings for cluster instances. These settings are AWS specific and should be tailored to the consumer's AWS custom account settings. +Values specified in provisioning_vars.yml may instead be specified in your inventory group_vars +under the appropriate groups. Most variables can exist in the 'all' group. + ```yaml --- -# when creating an AMI set this to True -# when installing a cluster set this to False -openshift_node_bootstrap: True - -# specify a clusterid -# openshift_aws_clusterid: default - -# specify a region -# openshift_aws_region: us-east-1 - -# must specify a base_ami when building an AMI -# openshift_aws_base_ami: # base image for AMI to build from -# specify when using a custom AMI -# openshift_aws_ami: - -# when creating an encrypted AMI please specify use_encryption -# openshift_aws_ami_encrypt: False - -# custom certificates are required for the ELB -# openshift_aws_iam_cert_path: '/path/to/cert/wildcard.<clusterid>.<domain>.com.crt' -# openshift_aws_iam_cert_key_path: '/path/to/key/wildcard.<clusterid>.<domain>.com.key' -# openshift_aws_iam_cert_chain_path: '/path/to/ca_cert_file/ca.crt' - -# This is required for any ec2 instances -# openshift_aws_ssh_key_name: myuser_key - -# This will ensure these users are created -#openshift_aws_users: -#- key_name: myuser_key -# username: myuser -# pub_key: | -# ssh-rsa AAAA +# Minimum mandatory provisioning variables. See provisioning_vars.yml.example. +# for more information. +openshift_deployment_type: # 'origin' or 'openshift-enterprise' +openshift_release: # example: v3.7 +openshift_pkg_version: # example: -3.7.0 +openshift_aws_ssh_key_name: # example: myuser_key +openshift_aws_base_ami: # example: ami-12345678 +# These are required when doing SSL on the ELBs +openshift_aws_iam_cert_path: # example: '/path/to/wildcard.<clusterid>.example.com.crt' +openshift_aws_iam_cert_key_path: # example: '/path/to/wildcard.<clusterid>.example.com.key' ``` If customization is required for the instances, scale groups, or any other configurable option please see the ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml) for variables and overrides. These overrides can be placed in the `provisioning_vars.yml`, `inventory`, or `group_vars`. -In order to create the bootstrap-able AMI we need to create an openshift-ansible inventory file. This file enables us to create the AMI using the openshift-ansible node roles. The exception here is that there will be no hosts specified by the inventory file. Here is an example: - -```ini -[OSEv3:children] -masters -nodes -etcd - -[OSEv3:vars] -################################################################################ -# Ensure these variables are set for bootstrap -################################################################################ -# openshift_deployment_type is required for installation -openshift_deployment_type=origin +In order to create the bootstrap-able AMI we need to create a basic openshift-ansible inventory. This enables us to create the AMI using the openshift-ansible node roles. This inventory should not include any hosts, but certain variables should be defined in the appropriate groups, just as deploying a cluster +using the normal openshift-ansible method. See provisioning-inventory.example.ini for an example. -# required when building an AMI. This will -# be dependent on the version provided by the yum repository -openshift_pkg_version=-3.6.0 - -openshift_master_bootstrap_enabled=True - -openshift_hosted_router_wait=False -openshift_hosted_registry_wait=False - -# Repository for installation -openshift_additional_repos=[{'name': 'openshift-repo', 'id': 'openshift-repo', 'baseurl': 'https://mirror.openshift.com/enterprise/enterprise-3.6/latest/x86_64/os/', 'enabled': 'yes', 'gpgcheck': 0, 'sslverify': 'no', 'sslclientcert': '/var/lib/yum/client-cert.pem', 'sslclientkey': '/var/lib/yum/client-key.pem', 'gpgkey': 'https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-release https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-beta https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-openshifthosted'}] - -################################################################################ -# cluster specific settings maybe be placed here +There are more examples of cluster inventory settings [`here`](../../inventory/byo/). -[masters] +#### Step 0 (optional) -[etcd] +You may provision a VPC, Security Group, and SSH keypair to build the AMI. -[nodes] +``` +$ ansible-playbook -i inventory.yml prerequisites.yml -e @provisioning_vars.yml ``` -There are more examples of cluster inventory settings [`here`](../../inventory/byo/). +See PREREQUISITES.md for more information. #### Step 1 @@ -126,24 +95,6 @@ Once the `inventory` and the `provisioning_vars.yml` file has been updated with $ ansible-playbook -i inventory.yml build_ami.yml -e @provisioning_vars.yml ``` -1. This script will build a VPC. Default name will be clusterid if not specified. -2. Create an ssh key required for the instance. -3. Create a security group. -4. Create an instance using the key from step 2 or a specified key. -5. Run openshift-ansible setup roles to ensure packages and services are correctly configured. -6. Create the AMI. -7. If encryption is desired - - A KMS key is created with the name of $clusterid - - An encrypted AMI will be produced with $clusterid KMS key -8. Terminate the instance used to configure the AMI. - -More AMI specific options can be found in ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml). When creating an encrypted AMI please specify use_encryption: -``` -# openshift_aws_ami_encrypt: True # defaults to false -``` - -**Note**: This will ensure to take the recently created AMI and encrypt it to be used later. If encryption is not desired then set the value to false (defaults to false). The AMI id will be fetched and used according to its most recent creation date. - #### Step 2 Now that we have created an AMI for our Openshift installation, there are two ways to use the AMI. @@ -167,16 +118,14 @@ $ ansible-playbook provision.yml -e @provisioning_vars.yml ``` This playbook runs through the following steps: -1. Ensures a VPC is created. -2. Ensures a SSH key exists. -3. Creates an s3 bucket for the registry named $clusterid-docker-registry -4. Create master security groups. -5. Create a master launch config. -6. Create the master auto scaling groups. -7. If certificates are desired for ELB, they will be uploaded. -8. Create internal and external master ELBs. -9. Add newly created masters to the correct groups. -10. Set a couple of important facts for the masters. +1. Creates an s3 bucket for the registry named $clusterid-docker-registry +2. Create master security groups. +3. Create a master launch config. +4. Create the master auto scaling groups. +5. If certificates are desired for ELB, they will be uploaded. +6. Create internal and external master ELBs. +7. Add newly created masters to the correct groups. +8. Set a couple of important facts for the masters. At this point we have successfully created the infrastructure including the master nodes. @@ -195,13 +144,13 @@ Once this playbook completes, the cluster masters should be installed and config #### Step 5 -Now that we have a cluster deployed it will be more interesting to create some node types. This can be done easily with the following playbook: +Now that we have the cluster masters deployed, we need to deploy our infrastructure and compute nodes: ``` $ ansible-playbook provision_nodes.yml -e @provisioning_vars.yml ``` -Once this playbook completes, it should create the compute and infra node scale groups. These nodes will attempt to register themselves to the cluster. These requests must be approved by an administrator. +Once this playbook completes, it should create the compute and infra node scale groups. These nodes will attempt to register themselves to the cluster. These requests must be approved by an administrator in Step 6. #### Step 6 diff --git a/playbooks/aws/openshift-cluster/accept.yml b/playbooks/aws/openshift-cluster/accept.yml index ffc367f9f..c2c8bea50 100755 --- a/playbooks/aws/openshift-cluster/accept.yml +++ b/playbooks/aws/openshift-cluster/accept.yml @@ -42,12 +42,12 @@ until: "'instances' in instancesout and instancesout.instances|length > 0" - debug: - msg: "{{ instancesout.instances|map(attribute='private_dns_name') | list | regex_replace('.ec2.internal') }}" + msg: "{{ instancesout.instances|map(attribute='private_dns_name') | list }}" - name: approve nodes oc_adm_csr: #approve_all: True - nodes: "{{ instancesout.instances|map(attribute='private_dns_name') | list | regex_replace('.ec2.internal') }}" - timeout: 0 + nodes: "{{ instancesout.instances|map(attribute='private_dns_name') | list }}" + timeout: 60 register: nodeout delegate_to: "{{ mastersout.instances[0].public_ip_address }}" diff --git a/playbooks/aws/openshift-cluster/build_ami.yml b/playbooks/aws/openshift-cluster/build_ami.yml index 1ab1e8041..fae30eb0a 100644 --- a/playbooks/aws/openshift-cluster/build_ami.yml +++ b/playbooks/aws/openshift-cluster/build_ami.yml @@ -17,14 +17,6 @@ - name: openshift_aws_region msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}" -- include: provision_vpc.yml - -- include: provision_ssh_keypair.yml - -- include: provision_sec_group.yml - vars: - openshift_aws_node_group_type: compute - - include: provision_instance.yml vars: openshift_aws_node_group_type: compute @@ -34,7 +26,10 @@ tasks: - name: set the user to perform installation set_fact: - ansible_ssh_user: "{{ openshift_aws_build_ami_ssh_user | default('root') }}" + ansible_ssh_user: "{{ openshift_aws_build_ami_ssh_user | default(ansible_ssh_user) }}" + openshift_node_bootstrap: True + openshift_node_image_prep_packages: + - cloud-utils-growpart # This is the part that installs all of the software and configs for the instance # to become a node. diff --git a/playbooks/aws/openshift-cluster/hosted.yml b/playbooks/aws/openshift-cluster/hosted.yml new file mode 100644 index 000000000..db6e3b8e1 --- /dev/null +++ b/playbooks/aws/openshift-cluster/hosted.yml @@ -0,0 +1,22 @@ +--- +- include: ../../common/openshift-cluster/openshift_hosted.yml + +- include: ../../common/openshift-cluster/openshift_metrics.yml + when: openshift_metrics_install_metrics | default(false) | bool + +- include: ../../common/openshift-cluster/openshift_logging.yml + when: openshift_logging_install_logging | default(false) | bool + +- include: ../../common/openshift-cluster/service_catalog.yml + when: openshift_enable_service_catalog | default(false) | bool + +- include: ../../common/openshift-management/config.yml + when: openshift_management_install_management | default(false) | bool + +- name: Print deprecated variable warning message if necessary + hosts: oo_first_master + gather_facts: no + tasks: + - debug: msg="{{__deprecation_message}}" + when: + - __deprecation_message | default ('') | length > 0 diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml index 4d0bf9531..1e8118490 100644 --- a/playbooks/aws/openshift-cluster/install.yml +++ b/playbooks/aws/openshift-cluster/install.yml @@ -21,5 +21,29 @@ - name: run the std_include include: ../../common/openshift-cluster/std_include.yml -- name: run the config - include: ../../common/openshift-cluster/config.yml +- name: perform the installer openshift-checks + include: ../../common/openshift-checks/install.yml + +- name: etcd install + include: ../../common/openshift-etcd/config.yml + +- name: include nfs + include: ../../common/openshift-nfs/config.yml + when: groups.oo_nfs_to_config | default([]) | count > 0 + +- name: include loadbalancer + include: ../../common/openshift-loadbalancer/config.yml + when: groups.oo_lb_to_config | default([]) | count > 0 + +- name: include openshift-master config + include: ../../common/openshift-master/config.yml + +- name: include master additional config + include: ../../common/openshift-master/additional_config.yml + +- name: include master additional config + include: ../../common/openshift-node/config.yml + +- name: include openshift-glusterfs + include: ../../common/openshift-glusterfs/config.yml + when: groups.oo_glusterfs_to_config | default([]) | count > 0 diff --git a/playbooks/aws/openshift-cluster/prerequisites.yml b/playbooks/aws/openshift-cluster/prerequisites.yml new file mode 100644 index 000000000..f5eb01b14 --- /dev/null +++ b/playbooks/aws/openshift-cluster/prerequisites.yml @@ -0,0 +1,6 @@ +--- +- include: provision_vpc.yml + +- include: provision_ssh_keypair.yml + +- include: provision_sec_group.yml diff --git a/playbooks/aws/openshift-cluster/provision_install.yml b/playbooks/aws/openshift-cluster/provision_install.yml index e787deced..78dd6a49b 100644 --- a/playbooks/aws/openshift-cluster/provision_install.yml +++ b/playbooks/aws/openshift-cluster/provision_install.yml @@ -6,11 +6,14 @@ - name: Include the provision.yml playbook to create cluster include: provision.yml -- name: Include the install.yml playbook to install cluster +- name: Include the install.yml playbook to install cluster on masters include: install.yml -- name: Include the install.yml playbook to install cluster +- name: provision the infra/compute playbook to install node resources include: provision_nodes.yml - name: Include the accept.yml playbook to accept nodes into the cluster include: accept.yml + +- name: Include the hosted.yml playbook to finish the hosted configuration + include: hosted.yml diff --git a/playbooks/aws/openshift-cluster/provision_sec_group.yml b/playbooks/aws/openshift-cluster/provision_sec_group.yml index 039357adb..7d74a691a 100644 --- a/playbooks/aws/openshift-cluster/provision_sec_group.yml +++ b/playbooks/aws/openshift-cluster/provision_sec_group.yml @@ -6,7 +6,7 @@ connection: local gather_facts: no tasks: - - name: create an instance and prepare for ami + - name: create security groups include_role: name: openshift_aws tasks_from: security_group.yml diff --git a/playbooks/aws/openshift-cluster/provisioning_vars.example.yml b/playbooks/aws/openshift-cluster/provisioning_vars.example.yml deleted file mode 100644 index 28eb9c993..000000000 --- a/playbooks/aws/openshift-cluster/provisioning_vars.example.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- -# when creating an AMI set this option to True -# when installing the cluster, set this to False -openshift_node_bootstrap: True - -# specify a clusterid -#openshift_aws_clusterid: default - -# must specify a base_ami when building an AMI -#openshift_aws_base_ami: - -# when creating an encrypted AMI please specify use_encryption -#openshift_aws_ami_encrypt: False - -# custom certificates are required for the ELB -#openshift_aws_iam_cert_path: '/path/to/wildcard.<clusterid>.example.com.crt' -#openshift_aws_iam_key_path: '/path/to/wildcard.<clusterid>.example.com.key' -#openshift_aws_iam_cert_chain_path: '/path/to/cert.ca.crt' - -# This is required for any ec2 instances -#openshift_aws_ssh_key_name: myuser_key - -# This will ensure these users are created -#openshift_aws_users: -#- key_name: myuser_key -# username: myuser -# pub_key: | -# ssh-rsa AAAA diff --git a/playbooks/aws/provisioning-inventory.example.ini b/playbooks/aws/provisioning-inventory.example.ini new file mode 100644 index 000000000..238a7eb2f --- /dev/null +++ b/playbooks/aws/provisioning-inventory.example.ini @@ -0,0 +1,25 @@ +[OSEv3:children] +masters +nodes +etcd + +[OSEv3:vars] +################################################################################ +# Ensure these variables are set for bootstrap +################################################################################ +# openshift_deployment_type is required for installation +openshift_deployment_type=origin + +openshift_master_bootstrap_enabled=True + +openshift_hosted_router_wait=False +openshift_hosted_registry_wait=False + +################################################################################ +# cluster specific settings maybe be placed here + +[masters] + +[etcd] + +[nodes] diff --git a/playbooks/aws/provisioning_vars.yml.example b/playbooks/aws/provisioning_vars.yml.example new file mode 100644 index 000000000..1491fb868 --- /dev/null +++ b/playbooks/aws/provisioning_vars.yml.example @@ -0,0 +1,120 @@ +--- +# Variables that are commented in this file are optional; uncommented variables +# are mandatory. + +# Default values for each variable are provided, as applicable. +# Example values for mandatory variables are provided as a comment at the end +# of the line. + +# ------------------------ # +# Common/Cluster Variables # +# ------------------------ # +# Variables in this section affect all areas of the cluster + +# Deployment type must be specified. +openshift_deployment_type: # 'origin' or 'openshift-enterprise' + +# openshift_release must be specified. Use whatever version of openshift +# that is supported by openshift-ansible that you wish. +openshift_release: # v3.7 + +# This will be dependent on the version provided by the yum repository +openshift_pkg_version: # -3.7.0 + +# specify a clusterid +# This value is also used as the default value for many other components. +#openshift_aws_clusterid: default + +# AWS region +# This value will instruct the plays where all items should be created. +# Multi-region deployments are not supported using these plays at this time. +#openshift_aws_region: us-east-1 + +#openshift_aws_create_launch_config: true +#openshift_aws_create_scale_group: true + +# --- # +# VPC # +# --- # + +# openshift_aws_create_vpc defaults to true. If you don't wish to provision +# a vpc, set this to false. +#openshift_aws_create_vpc: true + +# Name of the vpc. Needs to be set if using a pre-existing vpc. +#openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}" + +# Name of the subnet in the vpc to use. Needs to be set if using a pre-existing +# vpc + subnet. +#openshift_aws_subnet_name: + +# -------------- # +# Security Group # +# -------------- # + +# openshift_aws_create_security_groups defaults to true. If you wish to use +# an existing security group, set this to false. +#openshift_aws_create_security_groups: true + +# openshift_aws_build_ami_group is the name of the security group to build the +# ami in. This defaults to the value of openshift_aws_clusterid. +#openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}" + +# openshift_aws_launch_config_security_groups specifies the security groups to +# apply to the launch config. The launch config security groups will be what +# the cluster actually is deployed in. +#openshift_aws_launch_config_security_groups: see roles/openshift_aws/defaults.yml + +# openshift_aws_node_security_groups are created when +# openshift_aws_create_security_groups is set to true. +#openshift_aws_node_security_groups: see roles/openshift_aws/defaults.yml + +# -------- # +# ssh keys # +# -------- # + +# Specify the key pair name here to connect to the provisioned instances. This +# can be an existing key, or it can be one of the keys specified in +# openshift_aws_users +openshift_aws_ssh_key_name: # myuser_key + +# This will ensure these user and public keys are created. +#openshift_aws_users: +#- key_name: myuser_key +# username: myuser +# pub_key: | +# ssh-rsa AAAA + +# When building the AMI, specify the user to ssh to the instance as. +# openshift_aws_build_ami_ssh_user: root + +# --------- # +# AMI Build # +# --------- # +# Variables in this section apply to building a node AMI for use in your +# openshift cluster. + +# must specify a base_ami when building an AMI +openshift_aws_base_ami: # ami-12345678 + +# when creating an encrypted AMI please specify use_encryption +#openshift_aws_ami_encrypt: False + +# -- # +# S3 # +# -- # + +# Create an s3 bucket. +#openshift_aws_create_s3: True + +# --- # +# ELB # +# --- # + +# openshift_aws_elb_name will be the base-name of the ELBs. +#openshift_aws_elb_name: "{{ openshift_aws_clusterid }}" + +# custom certificates are required for the ELB +openshift_aws_iam_cert_path: # '/path/to/wildcard.<clusterid>.example.com.crt' +openshift_aws_iam_cert_key_path: # '/path/to/wildcard.<clusterid>.example.com.key' +openshift_aws_iam_cert_chain_path: # '/path/to/cert.ca.crt' diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml index 60fa44c5b..f2e52782b 100644 --- a/playbooks/byo/openshift-cluster/config.yml +++ b/playbooks/byo/openshift-cluster/config.yml @@ -8,5 +8,3 @@ - always - include: ../../common/openshift-cluster/config.yml - vars: - openshift_deployment_subtype: "{{ deployment_subtype | default(none) }}" diff --git a/playbooks/byo/openshift-cluster/redeploy-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-certificates.yml index 255b0dbf7..f53d34145 100644 --- a/playbooks/byo/openshift-cluster/redeploy-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-certificates.yml @@ -42,3 +42,7 @@ - include: ../../common/openshift-cluster/redeploy-certificates/registry.yml when: openshift_hosted_manage_registry | default(true) | bool + +- include: ../../common/openshift-master/revert-client-ca.yml + +- include: ../../common/openshift-master/restart.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/README.md b/playbooks/byo/openshift-cluster/upgrades/README.md index 0f64f40f3..d9b1fc2ca 100644 --- a/playbooks/byo/openshift-cluster/upgrades/README.md +++ b/playbooks/byo/openshift-cluster/upgrades/README.md @@ -4,6 +4,5 @@ cluster. Additional notes for the associated upgrade playbooks are provided in their respective directories. # Upgrades available +- [OpenShift Container Platform 3.6 to 3.7](v3_7/README.md) (works also to upgrade OpenShift Origin from 3.6.x to 3.7.x) - [OpenShift Container Platform 3.5 to 3.6](v3_6/README.md) (works also to upgrade OpenShift Origin from 1.5.x to 3.6.x) -- [OpenShift Container Platform 3.4 to 3.5](v3_5/README.md) (works also to upgrade OpenShift Origin from 1.4.x to 1.5.x) -- [OpenShift Container Platform 3.3 to 3.4](v3_4/README.md) (works also to upgrade OpenShift Origin from 1.3.x to 1.4.x) diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml deleted file mode 100644 index 697a18c4d..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -# -# Full Control Plane + Nodes Upgrade -# -- include: ../../initialize_groups.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml deleted file mode 100644 index 4d284c279..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -# -# Control Plane Upgrade Playbook -# -# Upgrades masters and Docker (only on standalone etcd hosts) -# -# This upgrade does not include: -# - node service running on masters -# - docker running on masters -# - node service running on dedicated nodes -# -# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. -# -- include: ../../initialize_groups.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_4/README.md deleted file mode 100644 index 85b807dc6..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/README.md +++ /dev/null @@ -1,18 +0,0 @@ -# v3.4 Major and Minor Upgrade Playbook - -## Overview -This playbook currently performs the -following steps. - - * Upgrade and restart master services - * Unschedule node. - * Upgrade and restart docker - * Upgrade and restart node services - * Modifies the subset of the configuration necessary - * Applies the latest cluster policies - * Updates the default router if one exists - * Updates the default registry if one exists - * Updates image streams and quickstarts - -## Usage -ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml deleted file mode 100644 index 8cce91b3f..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -# -# Full Control Plane + Nodes Upgrade -# -- include: ../../initialize_groups.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml deleted file mode 100644 index 8e5d0f5f9..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -# -# Control Plane Upgrade Playbook -# -# Upgrades masters and Docker (only on standalone etcd hosts) -# -# This upgrade does not include: -# - node service running on masters -# - docker running on masters -# - node service running on dedicated nodes -# -# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. -# -- include: ../../initialize_groups.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml deleted file mode 100644 index d5329b858..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -# -# Node Upgrade Playbook -# -# Upgrades nodes only, but requires the control plane to have already been upgraded. -# -- include: ../../initialize_groups.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_5/README.md deleted file mode 100644 index 53eebe65e..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/README.md +++ /dev/null @@ -1,18 +0,0 @@ -# v3.5 Major and Minor Upgrade Playbook - -## Overview -This playbook currently performs the -following steps. - - * Upgrade and restart master services - * Unschedule node. - * Upgrade and restart docker - * Upgrade and restart node services - * Modifies the subset of the configuration necessary - * Applies the latest cluster policies - * Updates the default router if one exists - * Updates the default registry if one exists - * Updates image streams and quickstarts - -## Usage -ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml deleted file mode 100644 index 5b3f6ab06..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -# -# Node Upgrade Playbook -# -# Upgrades nodes only, but requires the control plane to have already been upgraded. -# -- include: ../../initialize_groups.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md index 4bf53be81..914e0f5b2 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md +++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md @@ -1,4 +1,4 @@ -# v3.6 Major and Minor Upgrade Playbook +# v3.7 Major and Minor Upgrade Playbook ## Overview This playbook currently performs the following steps. diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_8/README.md index 6892f6324..d9be6ae3b 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/README.md +++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/README.md @@ -1,11 +1,10 @@ -# v3.3 Major and Minor Upgrade Playbook +# v3.6 Major and Minor Upgrade Playbook ## Overview -This playbook currently performs the -following steps. +This playbook currently performs the following steps. * Upgrade and restart master services - * Unschedule node. + * Unschedule node * Upgrade and restart docker * Upgrade and restart node services * Modifies the subset of the configuration necessary @@ -15,4 +14,7 @@ following steps. * Updates image streams and quickstarts ## Usage -ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml + +``` +ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml +``` diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml index f44d55ad2..3d4e6a790 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml @@ -4,4 +4,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml index 2377713fa..d83305119 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml @@ -13,4 +13,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml index 180a2821f..a972bb7a6 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml @@ -6,4 +6,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-etcd/embedded2external.yml b/playbooks/byo/openshift-etcd/embedded2external.yml new file mode 100644 index 000000000..6690a7624 --- /dev/null +++ b/playbooks/byo/openshift-etcd/embedded2external.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-etcd/embedded2external.yml diff --git a/playbooks/byo/openshift-management/add_container_provider.yml b/playbooks/byo/openshift-management/add_container_provider.yml new file mode 100644 index 000000000..3378b5abd --- /dev/null +++ b/playbooks/byo/openshift-management/add_container_provider.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/evaluate_groups.yml + +- include: ../../common/openshift-management/add_container_provider.yml diff --git a/playbooks/byo/openshift-management/add_many_container_providers.yml b/playbooks/byo/openshift-management/add_many_container_providers.yml new file mode 100644 index 000000000..62fdb11c5 --- /dev/null +++ b/playbooks/byo/openshift-management/add_many_container_providers.yml @@ -0,0 +1,36 @@ +--- +- hosts: localhost + tasks: + - name: Ensure the container provider configuration is defined + assert: + that: container_providers_config is defined + msg: | + Error: Must provide providers config path. Fix: Add '-e container_providers_config=/path/to/your/config' to the ansible-playbook command + + - name: Include providers/management configuration + include_vars: + file: "{{ container_providers_config }}" + + - name: Ensure this cluster is a container provider + uri: + url: "https://{{ management_server['hostname'] }}/api/providers" + body_format: json + method: POST + user: "{{ management_server['user'] }}" + password: "{{ management_server['password'] }}" + validate_certs: no + # Docs on formatting the BODY of the POST request: + # http://manageiq.org/docs/reference/latest/api/reference/providers.html#specifying-connection-configurations + body: "{{ item }}" + failed_when: false + with_items: "{{ container_providers }}" + register: results + + # Include openshift_management for access to filter_plugins. + - include_role: + name: openshift_management + tasks_from: noop + + - name: print each result + debug: + msg: "{{ results.results | oo_filter_container_providers }}" diff --git a/playbooks/byo/openshift-management/config.yml b/playbooks/byo/openshift-management/config.yml index 33a555cc1..209c66502 100644 --- a/playbooks/byo/openshift-management/config.yml +++ b/playbooks/byo/openshift-management/config.yml @@ -1,8 +1,6 @@ --- - include: ../openshift-cluster/initialize_groups.yml - tags: - - always -- include: ../../common/openshift-cluster/evaluate_groups.yml +- include: ../../common/openshift-cluster/std_include.yml - include: ../../common/openshift-management/config.yml diff --git a/playbooks/byo/openshift-management/roles b/playbooks/byo/openshift-management/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/byo/openshift-management/roles @@ -0,0 +1 @@ +../../../roles
\ No newline at end of file diff --git a/playbooks/byo/openshift-management/uninstall.yml b/playbooks/byo/openshift-management/uninstall.yml index ebd6fb261..e95c1c88a 100644 --- a/playbooks/byo/openshift-management/uninstall.yml +++ b/playbooks/byo/openshift-management/uninstall.yml @@ -1,6 +1,2 @@ --- -# - include: ../openshift-cluster/initialize_groups.yml -# tags: -# - always - - include: ../../common/openshift-management/uninstall.yml diff --git a/playbooks/common/openshift-checks/adhoc.yml b/playbooks/common/openshift-checks/adhoc.yml index dfcef8435..d0deaeb65 100644 --- a/playbooks/common/openshift-checks/adhoc.yml +++ b/playbooks/common/openshift-checks/adhoc.yml @@ -1,12 +1,13 @@ --- -- name: OpenShift health checks +- name: OpenShift Health Checks hosts: oo_all_hosts + roles: - openshift_health_checker vars: - r_openshift_health_checker_playbook_context: adhoc post_tasks: - - name: Run health checks + - name: Run health checks (adhoc) action: openshift_health_check args: checks: '{{ openshift_checks | default([]) }}' diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/common/openshift-checks/health.yml index 21ea785ef..d0921b9d3 100644 --- a/playbooks/common/openshift-checks/health.yml +++ b/playbooks/common/openshift-checks/health.yml @@ -1,11 +1,13 @@ --- -- name: Run OpenShift health checks +- name: OpenShift Health Checks hosts: oo_all_hosts + roles: - openshift_health_checker vars: - r_openshift_health_checker_playbook_context: health post_tasks: - - action: openshift_health_check + - name: Run health checks (@health) + action: openshift_health_check args: checks: ['@health'] diff --git a/playbooks/common/openshift-checks/install.yml b/playbooks/common/openshift-checks/install.yml new file mode 100644 index 000000000..93cf6c359 --- /dev/null +++ b/playbooks/common/openshift-checks/install.yml @@ -0,0 +1,51 @@ +--- +- name: Health Check Checkpoint Start + hosts: all + gather_facts: false + tasks: + - name: Set Health Check 'In Progress' + run_once: true + set_stats: + data: + installer_phase_health: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- name: OpenShift Health Checks + hosts: oo_all_hosts + any_errors_fatal: true + roles: + - openshift_health_checker + vars: + - r_openshift_health_checker_playbook_context: install + post_tasks: + - name: Run health checks (install) - EL + when: ansible_distribution != "Fedora" + action: openshift_health_check + args: + checks: + - disk_availability + - memory_availability + - package_availability + - package_version + - docker_image_availability + - docker_storage + + - name: Run health checks (install) - Fedora + when: ansible_distribution == "Fedora" + action: openshift_health_check + args: + checks: + - docker_image_availability + +- name: Health Check Checkpoint End + hosts: all + gather_facts: false + tasks: + - name: Set Health Check 'Complete' + run_once: true + set_stats: + data: + installer_phase_health: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/common/openshift-checks/pre-install.yml index 88e6f9120..32449d4e4 100644 --- a/playbooks/common/openshift-checks/pre-install.yml +++ b/playbooks/common/openshift-checks/pre-install.yml @@ -1,11 +1,13 @@ --- -- name: run OpenShift pre-install checks +- name: OpenShift Health Checks hosts: oo_all_hosts + roles: - openshift_health_checker vars: - r_openshift_health_checker_playbook_context: pre-install post_tasks: - - action: openshift_health_check + - name: Run health checks (@preflight) + action: openshift_health_check args: checks: ['@preflight'] diff --git a/playbooks/common/openshift-cluster/cockpit-ui.yml b/playbooks/common/openshift-cluster/cockpit-ui.yml index 5ddafdb07..359132dd0 100644 --- a/playbooks/common/openshift-cluster/cockpit-ui.yml +++ b/playbooks/common/openshift-cluster/cockpit-ui.yml @@ -3,4 +3,6 @@ hosts: oo_first_master roles: - role: cockpit-ui - when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool) + when: + - openshift_hosted_manage_registry | default(true) | bool + - not openshift.docker.hosted_registry_insecure | default(false) | bool diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index b399ea995..3b4d6f9a6 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -1,22 +1,5 @@ --- -# TODO: refactor this into its own include -# and pass a variable for ctx -- name: Verify Requirements - hosts: oo_all_hosts - roles: - - openshift_health_checker - vars: - - r_openshift_health_checker_playbook_context: install - post_tasks: - - action: openshift_health_check - args: - checks: - - disk_availability - - memory_availability - - package_availability - - package_version - - docker_image_availability - - docker_storage +- include: ../openshift-checks/install.yml - include: ../openshift-etcd/config.yml @@ -44,9 +27,9 @@ when: openshift_logging_install_logging | default(false) | bool - include: service_catalog.yml - when: openshift_enable_service_catalog | default(false) | bool + when: openshift_enable_service_catalog | default(true) | bool -- include: openshift_management.yml +- include: ../openshift-management/config.yml when: openshift_management_install_management | default(false) | bool - name: Print deprecated variable warning message if necessary diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml index be14b06f0..fe765aa5d 100644 --- a/playbooks/common/openshift-cluster/enable_dnsmasq.yml +++ b/playbooks/common/openshift-cluster/enable_dnsmasq.yml @@ -5,9 +5,6 @@ hosts: oo_masters_to_config:oo_nodes_to_config roles: - openshift_facts - post_tasks: - - fail: msg="This playbook requires a master version of at least Origin 1.1 or OSE 3.1" - when: not openshift.common.version_gte_3_1_1_or_1_1_1 | bool - name: Reconfigure masters to listen on our new dns_port hosts: oo_masters_to_config diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml index e55b2f964..78b552279 100644 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -51,7 +51,7 @@ when: - g_etcd_hosts | default([]) | length not in [3,1] - not openshift_master_unsupported_embedded_etcd | default(False) - - not openshift_node_bootstrap | default(False) + - not (openshift_node_bootstrap | default(False)) - name: Evaluate oo_all_hosts add_host: diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml index be2f8b5f4..91223d368 100644 --- a/playbooks/common/openshift-cluster/initialize_facts.yml +++ b/playbooks/common/openshift-cluster/initialize_facts.yml @@ -10,6 +10,7 @@ - name: load openshift_facts module include_role: name: openshift_facts + static: yes # TODO: Should this role be refactored into health_checks?? - name: Run openshift_sanitize_inventory to set variables @@ -145,7 +146,19 @@ https_proxy: "{{ openshift_https_proxy | default(None) }}" no_proxy: "{{ openshift_no_proxy | default(None) }}" generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}" - no_proxy_internal_hostnames: "{{ openshift_no_proxy_internal_hostnames | default(None) }}" + + - name: Set fact of no_proxy_internal_hostnames + openshift_facts: + role: common + local_facts: + no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool - name: initialize_facts set_fact repoquery command set_fact: diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml index e6400ea61..37a5284d5 100644 --- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml +++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml @@ -1,15 +1,4 @@ --- -# openshift_install_base_package_group may be set in a play variable to limit -# the host groups the base package is installed on. This is currently used -# for master/control-plane upgrades. -- name: Set version_install_base_package true on masters and nodes - hosts: "{{ openshift_install_base_package_group | default('oo_masters_to_config:oo_nodes_to_config') }}" - tasks: - - name: Set version_install_base_package true - set_fact: - version_install_base_package: True - when: version_install_base_package is not defined - # NOTE: requires openshift_facts be run - name: Determine openshift_version to configure on first master hosts: oo_first_master diff --git a/playbooks/common/openshift-cluster/install_docker_gc.yml b/playbooks/common/openshift-cluster/install_docker_gc.yml new file mode 100644 index 000000000..1e3dfee07 --- /dev/null +++ b/playbooks/common/openshift-cluster/install_docker_gc.yml @@ -0,0 +1,7 @@ +--- +- name: Install docker gc + hosts: oo_first_master + gather_facts: false + tasks: + - include_role: + name: openshift_docker_gc diff --git a/playbooks/common/openshift-cluster/openshift_default_storage_class.yml b/playbooks/common/openshift-cluster/openshift_default_storage_class.yml index 4b4f19690..62fe0dd60 100644 --- a/playbooks/common/openshift-cluster/openshift_default_storage_class.yml +++ b/playbooks/common/openshift-cluster/openshift_default_storage_class.yml @@ -3,4 +3,4 @@ hosts: oo_first_master roles: - role: openshift_default_storage_class - when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce') + when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce' or openshift_cloudprovider_kind == 'openstack') diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml index 32e5e708a..15ee60dc0 100644 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -1,14 +1,15 @@ --- - name: Hosted Install Checkpoint Start - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set Hosted install 'In Progress' + run_once: true set_stats: data: - installer_phase_hosted: "In Progress" - aggregate: false + installer_phase_hosted: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - include: create_persistent_volumes.yml @@ -25,13 +26,19 @@ - include: openshift_prometheus.yml when: openshift_hosted_prometheus_deploy | default(False) | bool +- include: install_docker_gc.yml + when: + - openshift_use_crio | default(False) | bool + - openshift_crio_enable_docker_gc | default(False) | bool + - name: Hosted Install Checkpoint End - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set Hosted install 'Complete' + run_once: true set_stats: data: - installer_phase_hosted: "Complete" - aggregate: false + installer_phase_hosted: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml index 69f50fbcd..bc59bd95a 100644 --- a/playbooks/common/openshift-cluster/openshift_logging.yml +++ b/playbooks/common/openshift-cluster/openshift_logging.yml @@ -1,14 +1,15 @@ --- - name: Logging Install Checkpoint Start - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set Logging install 'In Progress' + run_once: true set_stats: data: - installer_phase_logging: "In Progress" - aggregate: false + installer_phase_logging: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - name: OpenShift Aggregated Logging hosts: oo_first_master @@ -24,12 +25,13 @@ tasks_from: update_master_config - name: Logging Install Checkpoint End - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set Logging install 'Complete' + run_once: true set_stats: data: - installer_phase_logging: "Complete" - aggregate: false + installer_phase_logging: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-cluster/openshift_management.yml b/playbooks/common/openshift-cluster/openshift_management.yml deleted file mode 100644 index 6e582920b..000000000 --- a/playbooks/common/openshift-cluster/openshift_management.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- name: Management Install Checkpoint Start - hosts: localhost - connection: local - gather_facts: false - tasks: - - name: Set Management install 'In Progress' - set_stats: - data: - installer_phase_Management: "In Progress" - aggregate: false - -- name: Management - include: ../openshift-management/config.yml - -- name: Management Install Checkpoint End - hosts: localhost - connection: local - gather_facts: false - tasks: - - name: Set Management install 'Complete' - set_stats: - data: - installer_phase_Management: "Complete" - aggregate: false diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml index e369dcd86..80cd93e5f 100644 --- a/playbooks/common/openshift-cluster/openshift_metrics.yml +++ b/playbooks/common/openshift-cluster/openshift_metrics.yml @@ -1,14 +1,15 @@ --- - name: Metrics Install Checkpoint Start - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set Metrics install 'In Progress' + run_once: true set_stats: data: - installer_phase_metrics: "In Progress" - aggregate: false + installer_phase_metrics: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - name: OpenShift Metrics hosts: oo_first_master @@ -25,12 +26,13 @@ tasks_from: update_master_config.yaml - name: Metrics Install Checkpoint End - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set Metrics install 'Complete' + run_once: true set_stats: data: - installer_phase_metrics: "Complete" - aggregate: false + installer_phase_metrics: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-cluster/openshift_prometheus.yml b/playbooks/common/openshift-cluster/openshift_prometheus.yml index ac2d250a3..7aa9a16e6 100644 --- a/playbooks/common/openshift-cluster/openshift_prometheus.yml +++ b/playbooks/common/openshift-cluster/openshift_prometheus.yml @@ -1,5 +1,29 @@ --- +- name: Prometheus Install Checkpoint Start + hosts: all + gather_facts: false + tasks: + - name: Set Prometheus install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_prometheus: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + - name: Create Hosted Resources - openshift_prometheus hosts: oo_first_master roles: - role: openshift_prometheus + +- name: Prometheus Install Checkpoint End + hosts: all + gather_facts: false + tasks: + - name: Set Prometheus install 'Complete' + run_once: true + set_stats: + data: + installer_phase_prometheus: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml index 2068ed199..eb225dfb5 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml @@ -1,11 +1,4 @@ --- -- name: Verify OpenShift version is greater than or equal to 1.2 or 3.2 - hosts: oo_first_master - tasks: - - fail: - msg: "The current OpenShift version is less than 1.2/3.2 and does not support CA bundles." - when: not openshift.common.version_gte_3_2_or_1_2 | bool - - name: Check cert expirys hosts: oo_nodes_to_config:oo_masters_to_config:oo_etcd_to_config vars: @@ -43,11 +36,6 @@ when: (g_master_config_output.content|b64decode|from_yaml).oauthConfig.masterCA != 'ca-bundle.crt' - modify_yaml: dest: "{{ openshift.common.config_base }}/master/master-config.yaml" - yaml_key: servingInfo.clientCA - yaml_value: ca.crt - when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca.crt' - - modify_yaml: - dest: "{{ openshift.common.config_base }}/master/master-config.yaml" yaml_key: etcdClientInfo.ca yaml_value: ca-bundle.crt when: @@ -67,6 +55,13 @@ when: - groups.oo_etcd_to_config | default([]) | length == 0 - (g_master_config_output.content|b64decode|from_yaml).etcdConfig.servingInfo.clientCA != 'ca-bundle.crt' + # Set servingInfo.clientCA to client-ca-bundle.crt in order to roll the CA certificate. + # This change will be reverted in playbooks/byo/openshift-cluster/redeploy-certificates.yml + - modify_yaml: + dest: "{{ openshift.common.config_base }}/master/master-config.yaml" + yaml_key: servingInfo.clientCA + yaml_value: client-ca-bundle.crt + when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'client-ca-bundle.crt' - name: Copy current OpenShift CA to legacy directory hosts: oo_masters_to_config @@ -155,6 +150,7 @@ - ca.key - ca-bundle.crt - ca.serial.txt + - client-ca-bundle.crt delegate_to: "{{ openshift_ca_host }}" run_once: true changed_when: false @@ -173,6 +169,7 @@ - ca.key - ca-bundle.crt - ca.serial.txt + - client-ca-bundle.crt - name: Update master client kubeconfig CA data kubeclient_ca: client_path: "{{ openshift.common.config_base }}/master/openshift-master.kubeconfig" diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml index afd5463b2..7e9363c5f 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml @@ -70,9 +70,7 @@ --hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc,docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}" --cert={{ openshift.common.config_base }}/master/registry.crt --key={{ openshift.common.config_base }}/master/registry.key - {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %} --expire-days={{ openshift_hosted_registry_cert_expire_days | default(730) }} - {% endif %} - name: Update registry certificates secret oc_secret: diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml index 95a8f601c..7bb8511f6 100644 --- a/playbooks/common/openshift-cluster/service_catalog.yml +++ b/playbooks/common/openshift-cluster/service_catalog.yml @@ -1,14 +1,15 @@ --- - name: Service Catalog Install Checkpoint Start - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set Service Catalog install 'In Progress' + run_once: true set_stats: data: - installer_phase_servicecatalog: "In Progress" - aggregate: false + installer_phase_servicecatalog: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - name: Service Catalog hosts: oo_first_master @@ -20,12 +21,13 @@ first_master: "{{ groups.oo_first_master[0] }}" - name: Service Catalog Install Checkpoint End - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set Service Catalog install 'Complete' + run_once: true set_stats: data: - installer_phase_servicecatalog: "Complete" - aggregate: false + installer_phase_servicecatalog: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml index 090ad6445..fe376fe31 100644 --- a/playbooks/common/openshift-cluster/std_include.yml +++ b/playbooks/common/openshift-cluster/std_include.yml @@ -1,16 +1,17 @@ --- - name: Initialization Checkpoint Start - hosts: localhost - connection: local + hosts: all gather_facts: false roles: - installer_checkpoint tasks: - name: Set install initialization 'In Progress' + run_once: true set_stats: data: - installer_phase_initialize: "In Progress" - aggregate: false + installer_phase_initialize: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - include: evaluate_groups.yml tags: @@ -37,12 +38,13 @@ - always - name: Initialization Checkpoint End - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set install initialization 'Complete' + run_once: true set_stats: data: - installer_phase_initialize: "Complete" - aggregate: false + installer_phase_initialize: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml b/playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml new file mode 100644 index 000000000..9c9c260fb --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: v1 +kind: Role +metadata: + name: shared-resource-viewer + namespace: openshift +rules: +- apiGroups: + - "" + - template.openshift.io + attributeRestrictions: null + resources: + - templates + verbs: + - get + - list + - watch +- apiGroups: + - "" + - image.openshift.io + attributeRestrictions: null + resources: + - imagestreamimages + - imagestreams + - imagestreamtags + verbs: + - get + - list + - watch +- apiGroups: + - "" + - image.openshift.io + attributeRestrictions: null + resources: + - imagestreams/layers + verbs: + - get diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml index 2826951e6..6ad0b6b86 100644 --- a/playbooks/common/openshift-cluster/upgrades/init.yml +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -9,7 +9,12 @@ - name: Ensure firewall is not switched during upgrade hosts: oo_all_hosts + vars: + openshift_master_installed_version: "{{ hostvars[groups.oo_first_master.0].openshift.common.version }}" tasks: + - name: set currently installed version + set_fact: + openshift_currently_installed_version: "{{ openshift_master_installed_version }}" - name: Check if iptables is running command: systemctl status iptables changed_when: false diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml index 72de63070..fc1cbf32a 100644 --- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml @@ -30,6 +30,7 @@ ansible_become: "{{ g_sudo | default(omit) }}" with_items: " {{ groups['oo_nodes_to_config'] }}" when: + - hostvars[item].openshift is defined - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list changed_when: false diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index 07e521a89..c634e0ab8 100644 --- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -85,6 +85,8 @@ roles: - openshift_manageiq + - role: openshift_project_request_template + when: openshift_project_request_template_manage # Create the new templates shipped in 3.2, existing templates are left # unmodified. This prevents the subsequent role definition for # openshift_examples from failing when trying to replace templates that do @@ -103,14 +105,20 @@ openshift_hosted_templates_import_command: replace # Check for warnings to be printed at the end of the upgrade: -- name: Check for warnings +- name: Clean up and display warnings hosts: oo_masters_to_config - tasks: + tags: + - always + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: enable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" + post_tasks: # Check if any masters are using pluginOrderOverride and warn if so, only for 1.3/3.3 and beyond: - name: grep pluginOrderOverride command: grep pluginOrderOverride {{ openshift.common.config_base }}/master/master-config.yaml register: grep_plugin_order_override - when: openshift.common.version_gte_3_3_or_1_3 | bool changed_when: false failed_when: false @@ -121,12 +129,8 @@ - not grep_plugin_order_override | skipped - grep_plugin_order_override.rc == 0 -- name: Re-enable excluder if it was previously enabled - hosts: oo_masters_to_config - tags: - - always - gather_facts: no - roles: - - role: openshift_excluder - r_openshift_excluder_action: enable - r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" + - name: Warn if shared-resource-viewer could not be updated + debug: + msg: "WARNING the shared-resource-viewer role could not be upgraded to 3.6 spec because it's marked protected, please see https://bugzilla.redhat.com/show_bug.cgi?id=1493213" + when: + - __shared_resource_viewer_protected | default(false) diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml index 45022cd61..6a5bc24f7 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml @@ -9,16 +9,29 @@ local_facts: ha: "{{ groups.oo_masters_to_config | length > 1 }}" - - name: Ensure HA Master is running - service: - name: "{{ openshift.common.service_type }}-master-api" - state: started - enabled: yes - when: openshift.common.is_containerized | bool + - when: openshift.common.is_containerized | bool + block: + - set_fact: + master_services: + - "{{ openshift.common.service_type }}-master" - - name: Ensure HA Master is running - service: - name: "{{ openshift.common.service_type }}-master-controllers" - state: started - enabled: yes - when: openshift.common.is_containerized | bool + # In case of the non-ha to ha upgrade. + - name: Check if the {{ openshift.common.service_type }}-master-api.service exists + command: > + systemctl list-units {{ openshift.common.service_type }}-master-api.service --no-legend + register: master_api_service_status + + - set_fact: + master_services: + - "{{ openshift.common.service_type }}-master-api" + - "{{ openshift.common.service_type }}-master-controllers" + when: + - master_api_service_status.stdout_lines | length > 0 + - (openshift.common.service_type + '-master-api.service') in master_api_service_status.stdout_lines[0] + + - name: Ensure Master is running + service: + name: "{{ item }}" + state: started + enabled: yes + with_items: "{{ master_services }}" diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml index ad6325ca0..2a8de50a2 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml @@ -1,12 +1,14 @@ --- -- name: Verify Host Requirements +- name: OpenShift Health Checks hosts: oo_all_hosts + any_errors_fatal: true roles: - openshift_health_checker vars: - r_openshift_health_checker_playbook_context: upgrade post_tasks: - - action: openshift_health_check + - name: Run health checks (upgrade) + action: openshift_health_check args: checks: - disk_availability diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml index 142ce5f3d..13fa37b09 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -4,6 +4,12 @@ msg: Verify OpenShift is already installed when: openshift.common.version is not defined +- name: Update oreg_auth docker login credentials if necessary + include_role: + name: docker + tasks_from: registry_auth.yml + when: oreg_auth_user is defined + - name: Verify containers are available for upgrade command: > docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }} @@ -37,7 +43,7 @@ fail: msg: "OpenShift {{ avail_openshift_version }} is available, but {{ openshift_upgrade_target }} or greater is required" when: - - openshift_pkg_version | default('0.0', True) | version_compare(openshift_release, '<') + - (openshift_pkg_version | default('-0.0', True)).split('-')[1] | version_compare(openshift_release, '<') - name: Fail when openshift version does not meet minium requirement for Origin upgrade fail: diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml deleted file mode 100644 index 8cc46ab68..000000000 --- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -# When we update package "a-${version}" and a requires b >= ${version} if we -# don't specify the version of b yum will choose the latest version of b -# available and the whole set of dependencies end up at the latest version. -# Since the package module, unlike the yum module, doesn't flatten a list -# of packages into one transaction we need to do that explicitly. The ansible -# core team tells us not to rely on yum module transaction flattening anyway. - -# TODO: If the sdn package isn't already installed this will install it, we -# should fix that -- name: Upgrade master packages - package: name={{ master_pkgs | join(',') }} state=present - vars: - master_pkgs: - - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" - - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - - PyYAML - when: - - component == "master" - - not openshift.common.is_atomic | bool - -- name: Upgrade node packages - package: name={{ node_pkgs | join(',') }} state=present - vars: - node_pkgs: - - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" - - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - - PyYAML - when: - - component == "node" - - not openshift.common.is_atomic | bool diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index da47491c1..399b818a7 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -3,22 +3,6 @@ # Upgrade Masters ############################################################################### -# oc adm migrate storage should be run prior to etcd v3 upgrade -# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060 -- name: Pre master upgrade - Upgrade all storage - hosts: oo_first_master - tasks: - - name: Upgrade all storage - command: > - {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig - migrate storage --include=* --confirm - register: l_pb_upgrade_control_plane_pre_upgrade_storage - when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool - failed_when: - - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool - - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0 - - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool - # If facts cache were for some reason deleted, this fact may not be set, and if not set # it will always default to true. This causes problems for the etcd data dir fact detection # so we must first make sure this is set correctly before attempting the backup. @@ -31,7 +15,6 @@ role: master local_facts: embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" - debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level | default(2)) }}" - name: Upgrade and backup etcd include: ./etcd/main.yml @@ -49,6 +32,22 @@ - include: create_service_signer_cert.yml +# oc adm migrate storage should be run prior to etcd v3 upgrade +# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060 +- name: Pre master upgrade - Upgrade all storage + hosts: oo_first_master + tasks: + - name: Upgrade all storage + command: > + {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig + migrate storage --include=* --confirm + register: l_pb_upgrade_control_plane_pre_upgrade_storage + when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool + failed_when: + - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool + - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0 + - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool + # Set openshift_master_facts separately. In order to reconcile # admission_config's, we currently must run openshift_master_facts and # then run openshift_facts. @@ -64,13 +63,9 @@ vars: openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" serial: 1 - handlers: - - include: ../../../../roles/openshift_master/handlers/main.yml - static: yes - roles: - - openshift_facts - - lib_utils - post_tasks: + tasks: + - include_role: + name: openshift_facts # Run the pre-upgrade hook if defined: - debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}" @@ -79,52 +74,9 @@ - include: "{{ openshift_master_upgrade_pre_hook }}" when: openshift_master_upgrade_pre_hook is defined - - include: rpm_upgrade.yml component=master - when: not openshift.common.is_containerized | bool - - - include_vars: ../../../../roles/openshift_master_facts/vars/main.yml - - - include: upgrade_scheduler.yml - - - include: "{{ master_config_hook }}" - when: master_config_hook is defined - - - include_vars: ../../../../roles/openshift_master/vars/main.yml - - - name: Remove any legacy systemd units and update systemd units - include: ../../../../roles/openshift_master/tasks/systemd_units.yml - - - name: Check for ca-bundle.crt - stat: - path: "{{ openshift.common.config_base }}/master/ca-bundle.crt" - register: ca_bundle_stat - failed_when: false - - - name: Check for ca.crt - stat: - path: "{{ openshift.common.config_base }}/master/ca.crt" - register: ca_crt_stat - failed_when: false - - - name: Migrate ca.crt to ca-bundle.crt - command: mv ca.crt ca-bundle.crt - args: - chdir: "{{ openshift.common.config_base }}/master" - when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists - - - name: Link ca.crt to ca-bundle.crt - file: - src: "{{ openshift.common.config_base }}/master/ca-bundle.crt" - path: "{{ openshift.common.config_base }}/master/ca.crt" - state: link - when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists - - - name: Update oreg value - yedit: - src: "{{ openshift.common.config_base }}/master/master-config.yaml" - key: 'imageConfig.format' - value: "{{ oreg_url | default(oreg_url_master) }}" - when: oreg_url is defined or oreg_url_master is defined + - include_role: + name: openshift_master + tasks_from: upgrade.yml # Run the upgrade hook prior to restarting services/system if defined: - debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}" @@ -151,7 +103,9 @@ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig migrate storage --include=clusterpolicies --confirm register: l_pb_upgrade_control_plane_post_upgrade_storage - when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool + when: + - openshift_upgrade_post_storage_migration_enabled | default(true) | bool + - openshift_version | version_compare('3.7','<') failed_when: - openshift_upgrade_post_storage_migration_enabled | default(true) | bool - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 @@ -193,13 +147,14 @@ # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe # restart. skip_docker_role: True + __master_shared_resource_viewer_file: "shared_resource_viewer_role.yaml" tasks: - name: Reconcile Cluster Roles command: > {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-roles --additive-only=true --confirm -o name register: reconcile_cluster_role_result - when: not openshift.common.version_gte_3_7 | bool + when: openshift_version | version_compare('3.7','<') changed_when: - reconcile_cluster_role_result.stdout != '' - reconcile_cluster_role_result.rc == 0 @@ -214,7 +169,7 @@ --exclude-groups=system:unauthenticated --exclude-users=system:anonymous --additive-only=true --confirm -o name - when: not openshift.common.version_gte_3_7 | bool + when: openshift_version | version_compare('3.7','<') register: reconcile_bindings_result changed_when: - reconcile_bindings_result.stdout != '' @@ -229,7 +184,51 @@ changed_when: - reconcile_jenkins_role_binding_result.stdout != '' - reconcile_jenkins_role_binding_result.rc == 0 - when: (not openshift.common.version_gte_3_7 | bool) and (openshift.common.version_gte_3_4_or_1_4 | bool) + when: + - openshift_version | version_compare('3.7','<') + + - when: openshift_upgrade_target | version_compare('3.7','<') + block: + - name: Retrieve shared-resource-viewer + oc_obj: + state: list + kind: role + name: "shared-resource-viewer" + namespace: "openshift" + register: objout + + - name: Determine if shared-resource-viewer is protected + set_fact: + __shared_resource_viewer_protected: true + when: + - "'results' in objout" + - "'results' in objout['results']" + - "'annotations' in objout['results']['results'][0]['metadata']" + - "'openshift.io/reconcile-protect' in objout['results']['results'][0]['metadata']['annotations']" + - "objout['results']['results'][0]['metadata']['annotations']['openshift.io/reconcile-protect'] == 'true'" + - copy: + src: "{{ item }}" + dest: "/tmp/{{ item }}" + with_items: + - "{{ __master_shared_resource_viewer_file }}" + when: __shared_resource_viewer_protected is not defined + + - name: Fixup shared-resource-viewer role + oc_obj: + state: present + kind: role + name: "shared-resource-viewer" + namespace: "openshift" + files: + - "/tmp/{{ __master_shared_resource_viewer_file }}" + delete_after: true + when: __shared_resource_viewer_protected is not defined + register: result + retries: 3 + delay: 5 + until: result.rc == 0 + ignore_errors: true + - name: Reconcile Security Context Constraints command: > diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml deleted file mode 100644 index 8558bf3e9..000000000 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml +++ /dev/null @@ -1,173 +0,0 @@ ---- -# Upgrade predicates -- vars: - prev_predicates: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}" - prev_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, regions_enabled=False) }}" - default_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', regions_enabled=False) }}" - # older_predicates are the set of predicates that have previously been - # hard-coded into openshift_facts - older_predicates: - - - name: MatchNodeSelector - - name: PodFitsResources - - name: PodFitsPorts - - name: NoDiskConflict - - name: NoVolumeZoneConflict - - name: MaxEBSVolumeCount - - name: MaxGCEPDVolumeCount - - name: Region - argument: - serviceAffinity: - labels: - - region - - - name: MatchNodeSelector - - name: PodFitsResources - - name: PodFitsPorts - - name: NoDiskConflict - - name: NoVolumeZoneConflict - - name: Region - argument: - serviceAffinity: - labels: - - region - - - name: MatchNodeSelector - - name: PodFitsResources - - name: PodFitsPorts - - name: NoDiskConflict - - name: Region - argument: - serviceAffinity: - labels: - - region - # older_predicates_no_region are the set of predicates that have previously - # been hard-coded into openshift_facts, with the Region predicate removed - older_predicates_no_region: - - - name: MatchNodeSelector - - name: PodFitsResources - - name: PodFitsPorts - - name: NoDiskConflict - - name: NoVolumeZoneConflict - - name: MaxEBSVolumeCount - - name: MaxGCEPDVolumeCount - - - name: MatchNodeSelector - - name: PodFitsResources - - name: PodFitsPorts - - name: NoDiskConflict - - name: NoVolumeZoneConflict - - - name: MatchNodeSelector - - name: PodFitsResources - - name: PodFitsPorts - - name: NoDiskConflict - block: - - # Handle case where openshift_master_predicates is defined - - block: - - debug: - msg: "WARNING: openshift_master_scheduler_predicates is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_predicates }}" - when: openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region] - - - debug: - msg: "WARNING: openshift_master_scheduler_predicates does not match current defaults of: {{ openshift_master_scheduler_default_predicates }}" - when: openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates - when: openshift_master_scheduler_predicates | default(none) is not none - - # Handle cases where openshift_master_predicates is not defined - - block: - - debug: - msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler predicates: {{ openshift_master_scheduler_current_predicates }}\ncurrent scheduler default predicates are: {{ openshift_master_scheduler_default_predicates }}" - when: - - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates - - openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates] - - - set_fact: - openshift_upgrade_scheduler_predicates: "{{ openshift_master_scheduler_default_predicates }}" - when: - - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates - - openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates] - - - set_fact: - openshift_upgrade_scheduler_predicates: "{{ default_predicates_no_region }}" - when: - - openshift_master_scheduler_current_predicates != default_predicates_no_region - - openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region] - - when: openshift_master_scheduler_predicates | default(none) is none - - -# Upgrade priorities -- vars: - prev_priorities: "{{ lookup('openshift_master_facts_default_priorities', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}" - prev_priorities_no_zone: "{{ lookup('openshift_master_facts_default_priorities', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, zones_enabled=False) }}" - default_priorities_no_zone: "{{ lookup('openshift_master_facts_default_priorities', zones_enabled=False) }}" - # older_priorities are the set of priorities that have previously been - # hard-coded into openshift_facts - older_priorities: - - - name: LeastRequestedPriority - weight: 1 - - name: SelectorSpreadPriority - weight: 1 - - name: Zone - weight: 2 - argument: - serviceAntiAffinity: - label: zone - # older_priorities_no_region are the set of priorities that have previously - # been hard-coded into openshift_facts, with the Zone priority removed - older_priorities_no_zone: - - - name: LeastRequestedPriority - weight: 1 - - name: SelectorSpreadPriority - weight: 1 - block: - - # Handle case where openshift_master_priorities is defined - - block: - - debug: - msg: "WARNING: openshift_master_scheduler_priorities is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_priorities }}" - when: openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone] - - - debug: - msg: "WARNING: openshift_master_scheduler_priorities does not match current defaults of: {{ openshift_master_scheduler_default_priorities }}" - when: openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities - when: openshift_master_scheduler_priorities | default(none) is not none - - # Handle cases where openshift_master_priorities is not defined - - block: - - debug: - msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler priorities: {{ openshift_master_scheduler_current_priorities }}\ncurrent scheduler default priorities are: {{ openshift_master_scheduler_default_priorities }}" - when: - - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities - - openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities] - - - set_fact: - openshift_upgrade_scheduler_priorities: "{{ openshift_master_scheduler_default_priorities }}" - when: - - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities - - openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities] - - - set_fact: - openshift_upgrade_scheduler_priorities: "{{ default_priorities_no_zone }}" - when: - - openshift_master_scheduler_current_priorities != default_priorities_no_zone - - openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone] - - when: openshift_master_scheduler_priorities | default(none) is none - - -# Update scheduler -- vars: - scheduler_config: - kind: Policy - apiVersion: v1 - predicates: "{{ openshift_upgrade_scheduler_predicates - | default(openshift_master_scheduler_current_predicates) }}" - priorities: "{{ openshift_upgrade_scheduler_priorities - | default(openshift_master_scheduler_current_priorities) }}" - block: - - name: Update scheduler config - copy: - content: "{{ scheduler_config | to_nice_json }}" - dest: "{{ openshift_master_scheduler_conf }}" - backup: true - when: > - openshift_upgrade_scheduler_predicates is defined or - openshift_upgrade_scheduler_priorities is defined diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml deleted file mode 100644 index d69472fad..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml +++ /dev/null @@ -1,66 +0,0 @@ ---- -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.acceptContentTypes' - yaml_value: 'application/vnd.kubernetes.protobuf,application/json' - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.contentType' - yaml_value: 'application/vnd.kubernetes.protobuf' - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.burst' - yaml_value: 400 - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.qps' - yaml_value: 200 - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.acceptContentTypes' - yaml_value: 'application/vnd.kubernetes.protobuf,application/json' - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.contentType' - yaml_value: 'application/vnd.kubernetes.protobuf' - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.burst' - yaml_value: 600 - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.qps' - yaml_value: 300 - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'controllerConfig.servicesServingCert.signer.certFile' - yaml_value: service-signer.crt - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'controllerConfig.servicesServingCert.signer.keyFile' - yaml_value: service-signer.key - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'admissionConfig.pluginConfig' - yaml_value: "{{ openshift.master.admission_plugin_config }}" - when: "'admission_plugin_config' in openshift.master" - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'admissionConfig.pluginOrderOverride' - yaml_value: - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'kubernetesMasterConfig.admissionConfig' - yaml_value: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml deleted file mode 100644 index 89b524f14..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- modify_yaml: - dest: "{{ openshift.common.config_base}}/node/node-config.yaml" - yaml_key: 'masterClientConnectionOverrides.acceptContentTypes' - yaml_value: 'application/vnd.kubernetes.protobuf,application/json' - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/node/node-config.yaml" - yaml_key: 'masterClientConnectionOverrides.contentType' - yaml_value: 'application/vnd.kubernetes.protobuf' - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/node/node-config.yaml" - yaml_key: 'masterClientConnectionOverrides.burst' - yaml_value: 40 - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/node/node-config.yaml" - yaml_key: 'masterClientConnectionOverrides.qps' - yaml_value: 20 diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/roles b/playbooks/common/openshift-cluster/upgrades/v3_3/roles deleted file mode 120000 index 6bc1a7aef..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/roles +++ /dev/null @@ -1 +0,0 @@ -../../../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml deleted file mode 100644 index a241ef039..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml +++ /dev/null @@ -1,118 +0,0 @@ ---- -# -# Full Control Plane + Nodes Upgrade -# -- include: ../init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" - -# Pre-upgrade - -- include: ../initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos and initialize facts on all hosts - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../disable_master_excluders.yml - tags: - - pre_upgrade - -- include: ../disable_node_excluders.yml - tags: - - pre_upgrade - -- include: ../../initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- name: Verify upgrade targets - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tasks: - - include: ../pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- name: Verify docker upgrade targets - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - include: ../pre/tasks/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - include: ../cleanup_unused_images.yml - -- include: ../upgrade_control_plane.yml - vars: - master_config_hook: "v3_3/master_config_upgrade.yml" - -- include: ../upgrade_nodes.yml - vars: - node_config_hook: "v3_3/node_config_upgrade.yml" - -- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml deleted file mode 100644 index f64f0e003..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml +++ /dev/null @@ -1,119 +0,0 @@ ---- -# -# Control Plane Upgrade Playbook -# -# Upgrades masters and Docker (only on standalone etcd hosts) -# -# This upgrade does not include: -# - node service running on masters -# - docker running on masters -# - node service running on dedicated nodes -# -# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. -# -- include: ../init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" - -# Pre-upgrade -- include: ../initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on control plane hosts - hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../disable_master_excluders.yml - tags: - - pre_upgrade - -- include: ../../initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - openshift_install_base_package_group: "oo_masters_to_config" - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- name: Verify upgrade targets - hosts: oo_masters_to_config - tasks: - - include: ../pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- name: Verify docker upgrade targets - hosts: oo_masters_to_config:oo_etcd_to_config - tasks: - - include: ../pre/tasks/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_etcd_to_config - tasks: - - include: ../cleanup_unused_images.yml - -- include: ../upgrade_control_plane.yml - vars: - master_config_hook: "v3_3/master_config_upgrade.yml" - -- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml deleted file mode 100644 index cee4e9087..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml +++ /dev/null @@ -1,113 +0,0 @@ ---- -# -# Node Upgrade Playbook -# -# Upgrades nodes only, but requires the control plane to have already been upgraded. -# -- include: ../init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" - -# Pre-upgrade -- include: ../initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on nodes - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - roles: - - openshift_repos - tags: - - pre_upgrade - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../disable_node_excluders.yml - tags: - - pre_upgrade - -- include: ../../initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- name: Verify masters are already upgraded - hosts: oo_masters_to_config - tags: - - pre_upgrade - tasks: - - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." - when: openshift.common.version != openshift_version - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- name: Verify upgrade targets - hosts: oo_nodes_to_upgrade - tasks: - - include: ../pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- name: Verify docker upgrade targets - hosts: oo_nodes_to_upgrade - tasks: - - include: ../pre/tasks/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_nodes_to_upgrade - tasks: - - include: ../cleanup_unused_images.yml - -- include: ../upgrade_nodes.yml - vars: - node_config_hook: "v3_3/node_config_upgrade.yml" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml deleted file mode 100644 index ed89dbe8d..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'admissionConfig.pluginConfig' - yaml_value: "{{ openshift.master.admission_plugin_config }}" - when: "'admission_plugin_config' in openshift.master" - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'admissionConfig.pluginOrderOverride' - yaml_value: - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'kubernetesMasterConfig.admissionConfig' - yaml_value: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/roles b/playbooks/common/openshift-cluster/upgrades/v3_4/roles deleted file mode 120000 index 6bc1a7aef..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/roles +++ /dev/null @@ -1 +0,0 @@ -../../../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml deleted file mode 100644 index ae217ba2e..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml +++ /dev/null @@ -1,116 +0,0 @@ ---- -# -# Full Control Plane + Nodes Upgrade -# -- include: ../init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - -# Pre-upgrade - -- include: ../initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos and initialize facts on all hosts - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../disable_master_excluders.yml - tags: - - pre_upgrade - -- include: ../disable_node_excluders.yml - tags: - - pre_upgrade - -- include: ../../initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- name: Verify upgrade targets - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tasks: - - include: ../pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- name: Verify docker upgrade targets - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - include: ../pre/tasks/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - include: ../cleanup_unused_images.yml - -- include: ../upgrade_control_plane.yml - vars: - master_config_hook: "v3_4/master_config_upgrade.yml" - -- include: ../upgrade_nodes.yml - -- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml deleted file mode 100644 index 43da5b629..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml +++ /dev/null @@ -1,119 +0,0 @@ ---- -# -# Control Plane Upgrade Playbook -# -# Upgrades masters and Docker (only on standalone etcd hosts) -# -# This upgrade does not include: -# - node service running on masters -# - docker running on masters -# - node service running on dedicated nodes -# -# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. -# -- include: ../init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - -# Pre-upgrade -- include: ../initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on control plane hosts - hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../disable_master_excluders.yml - tags: - - pre_upgrade - -- include: ../../initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - openshift_install_base_package_group: "oo_masters_to_config" - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- name: Verify upgrade targets - hosts: oo_masters_to_config - tasks: - - include: ../pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- name: Verify docker upgrade targets - hosts: oo_masters_to_config:oo_etcd_to_config - tasks: - - include: ../pre/tasks/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_etcd_to_config - tasks: - - include: ../cleanup_unused_images.yml - -- include: ../upgrade_control_plane.yml - vars: - master_config_hook: "v3_4/master_config_upgrade.yml" - -- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml deleted file mode 100644 index 8531e6045..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml +++ /dev/null @@ -1,111 +0,0 @@ ---- -# -# Node Upgrade Playbook -# -# Upgrades nodes only, but requires the control plane to have already been upgraded. -# -- include: ../init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - -# Pre-upgrade -- include: ../initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on nodes - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - roles: - - openshift_repos - tags: - - pre_upgrade - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../disable_node_excluders.yml - tags: - - pre_upgrade - -- include: ../../initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- name: Verify masters are already upgraded - hosts: oo_masters_to_config - tags: - - pre_upgrade - tasks: - - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." - when: openshift.common.version != openshift_version - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- name: Verify upgrade targets - hosts: oo_nodes_to_upgrade - tasks: - - include: ../pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- name: Verify docker upgrade targets - hosts: oo_nodes_to_upgrade - tasks: - - include: ../pre/tasks/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_nodes_to_upgrade - tasks: - - include: ../cleanup_unused_images.yml - -- include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml deleted file mode 100644 index ed89dbe8d..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'admissionConfig.pluginConfig' - yaml_value: "{{ openshift.master.admission_plugin_config }}" - when: "'admission_plugin_config' in openshift.master" - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'admissionConfig.pluginOrderOverride' - yaml_value: - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'kubernetesMasterConfig.admissionConfig' - yaml_value: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml deleted file mode 100644 index ae63c9ca9..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml +++ /dev/null @@ -1,67 +0,0 @@ ---- -############################################################################### -# Pre upgrade checks for known data problems, if this playbook fails you should -# contact support. If you're not supported contact users@lists.openshift.com -# -# oc_objectvalidator provides these two checks -# 1 - SDN Data issues, never seen in the wild but known possible due to code audits -# https://github.com/openshift/origin/issues/12697 -# 2 - Namespace protections, https://bugzilla.redhat.com/show_bug.cgi?id=1428934 -# -############################################################################### -- name: Verify 3.5 specific upgrade checks - hosts: oo_first_master - roles: - - { role: lib_openshift } - tasks: - - name: Check for invalid namespaces and SDN errors - oc_objectvalidator: - - # What's all this PetSet business about? - # - # 'PetSets' were ALPHA resources in Kube <= 3.4. In >= 3.5 they are - # no longer supported. The BETA resource 'StatefulSets' replaces - # them. We can't migrate clients PetSets to - # StatefulSets. Additionally, Red Hat has never officially supported - # these resource types. Sorry users, but if you were using - # unsupported resources from the Kube documentation then we can't - # help you at this time. - # - # Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1428229 - - name: Check if legacy PetSets exist - oc_obj: - state: list - all_namespaces: true - kind: petsets - register: l_do_petsets_exist - - - name: Fail on unsupported resource migration 'PetSets' - fail: - msg: > - PetSet objects were detected in your cluster. These are an - Alpha feature in upstream Kubernetes 1.4 and are not supported - by Red Hat. In Kubernetes 1.5, they are replaced by the Beta - feature StatefulSets. Red Hat currently does not offer support - for either PetSets or StatefulSets. - - Automatically migrating PetSets to StatefulSets in OpenShift - Container Platform (OCP) 3.5 is not supported. See the - Kubernetes "Upgrading from PetSets to StatefulSets" - documentation for additional information: - - https://kubernetes.io/docs/tasks/manage-stateful-set/upgrade-pet-set-to-stateful-set/ - - PetSets MUST be removed before upgrading to OCP 3.5. Red Hat - strongly recommends reading the above referenced documentation - in its entirety before taking any destructive actions. - - If you want to simply remove all PetSets without manually - migrating to StatefulSets, run this command as a user with - cluster-admin privileges: - - $ oc get petsets --all-namespaces -o yaml | oc delete -f - --cascade=false - when: - # Search did not fail, valid resource type found - - l_do_petsets_exist.results.returncode == 0 - # Items do exist in the search results - - l_do_petsets_exist.results.results.0['items'] | length > 0 diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml deleted file mode 100644 index ed89dbe8d..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'admissionConfig.pluginConfig' - yaml_value: "{{ openshift.master.admission_plugin_config }}" - when: "'admission_plugin_config' in openshift.master" - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'admissionConfig.pluginOrderOverride' - yaml_value: - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'kubernetesMasterConfig.admissionConfig' - yaml_value: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml index 920dc2ffc..dd109cfa9 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -116,6 +116,8 @@ - include: ../cleanup_unused_images.yml - include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_6/master_config_upgrade.yml" - include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index 27d8515dc..8ab68002d 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -76,7 +76,6 @@ # defined, and overriding the normal behavior of protecting the installed version openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - openshift_install_base_package_group: "oo_masters_to_config" # We skip the docker role at this point in upgrade to prevent # unintended package, container, or config upgrades which trigger diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml index f1ca1edb9..f4862e321 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml @@ -120,6 +120,22 @@ - include: ../cleanup_unused_images.yml - include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_7/master_config_upgrade.yml" + +# All controllers must be stopped at the same time then restarted +- name: Cycle all controller services to force new leader election mode + hosts: oo_masters_to_config + gather_facts: no + tasks: + - name: Stop {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: stopped + - name: Start {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: started - include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml index 6c4f9671b..b905d6d86 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -80,7 +80,6 @@ # defined, and overriding the normal behavior of protecting the installed version openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - openshift_install_base_package_group: "oo_masters_to_config" # We skip the docker role at this point in upgrade to prevent # unintended package, container, or config upgrades which trigger @@ -128,4 +127,18 @@ vars: master_config_hook: "v3_7/master_config_upgrade.yml" +# All controllers must be stopped at the same time then restarted +- name: Cycle all controller services to force new leader election mode + hosts: oo_masters_to_config + gather_facts: no + tasks: + - name: Stop {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: stopped + - name: Start {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: started + - include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml index f76fc68d1..74d0cd8ad 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml @@ -11,13 +11,15 @@ tasks: - name: Check for invalid namespaces and SDN errors oc_objectvalidator: - + # DO NOT DISABLE THIS, YOUR UPGRADE WILL FAIL IF YOU DO SO - name: Confirm OpenShift authorization objects are in sync command: > {{ openshift.common.client_binary }} adm migrate authorization - when: not openshift.common.version_gte_3_7 | bool + when: + - openshift_currently_installed_version | version_compare('3.7','<') + - openshift_upgrade_pre_authorization_migration_enabled | default(true) | bool changed_when: false register: l_oc_result until: l_oc_result.rc == 0 - retries: 4 + retries: 2 delay: 15 diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_8/filter_plugins index 7de3c1dd7..7de3c1dd7 120000 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/filter_plugins +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/filter_plugins diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/master_config_upgrade.yml index df59a8782..1d4d1919c 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/master_config_upgrade.yml @@ -1,21 +1,20 @@ --- - modify_yaml: dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'admissionConfig.pluginConfig' - yaml_value: "{{ openshift.master.admission_plugin_config }}" - when: "'admission_plugin_config' in openshift.master" + yaml_key: 'controllerConfig.election.lockName' + yaml_value: 'openshift-master-controllers' - modify_yaml: dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'admissionConfig.pluginOrderOverride' - yaml_value: + yaml_key: 'controllerConfig.serviceServingCert.signer.certFile' + yaml_value: service-signer.crt - modify_yaml: dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'kubernetesMasterConfig.admissionConfig' - yaml_value: + yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile' + yaml_value: service-signer.key - modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'controllerConfig.election.lockName' - yaml_value: 'openshift-master-controllers' + dest: "{{ openshift.common.config_base }}/master/master-config.yaml" + yaml_key: servingInfo.clientCA + yaml_value: ca.crt diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/roles b/playbooks/common/openshift-cluster/upgrades/v3_8/roles index 415645be6..415645be6 120000 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/roles +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/roles diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml index 30e719d8f..b3162bd5f 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml @@ -12,8 +12,8 @@ - pre_upgrade tasks: - set_fact: - openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_target: '3.8' + openshift_upgrade_min: '3.7' # Pre-upgrade @@ -21,6 +21,10 @@ tags: - pre_upgrade +- include: ../pre/verify_etcd3_backend.yml + tags: + - pre_upgrade + - name: Update repos and initialize facts on all hosts hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config tags: @@ -47,6 +51,10 @@ tags: - pre_upgrade +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + - include: ../pre/verify_control_plane_running.yml tags: - pre_upgrade @@ -112,6 +120,22 @@ - include: ../cleanup_unused_images.yml - include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_7/master_config_upgrade.yml" + +# All controllers must be stopped at the same time then restarted +- name: Cycle all controller services to force new leader election mode + hosts: oo_masters_to_config + gather_facts: no + tasks: + - name: Stop {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: stopped + - name: Start {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: started - include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml index e9cec9220..3df5b17b5 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml @@ -21,14 +21,18 @@ - pre_upgrade tasks: - set_fact: - openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_target: '3.8' + openshift_upgrade_min: '3.7' # Pre-upgrade - include: ../initialize_nodes_to_upgrade.yml tags: - pre_upgrade +- include: ../pre/verify_etcd3_backend.yml + tags: + - pre_upgrade + - name: Update repos on control plane hosts hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config tags: @@ -55,6 +59,10 @@ tags: - pre_upgrade +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + - include: ../pre/verify_control_plane_running.yml tags: - pre_upgrade @@ -72,7 +80,6 @@ # defined, and overriding the normal behavior of protecting the installed version openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - openshift_install_base_package_group: "oo_masters_to_config" # We skip the docker role at this point in upgrade to prevent # unintended package, container, or config upgrades which trigger @@ -118,6 +125,20 @@ - include: ../upgrade_control_plane.yml vars: - master_config_hook: "v3_5/master_config_upgrade.yml" + master_config_hook: "v3_7/master_config_upgrade.yml" + +# All controllers must be stopped at the same time then restarted +- name: Cycle all controller services to force new leader election mode + hosts: oo_masters_to_config + gather_facts: no + tasks: + - name: Stop {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: stopped + - name: Start {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: started - include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml index e29d0f8e6..f3d192ba7 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml @@ -14,8 +14,8 @@ - pre_upgrade tasks: - set_fact: - openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_target: '3.8' + openshift_upgrade_min: '3.7' # Pre-upgrade - include: ../initialize_nodes_to_upgrade.yml @@ -48,6 +48,10 @@ tags: - pre_upgrade +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + - include: ../disable_node_excluders.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/validator.yml new file mode 100644 index 000000000..d8540abfb --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/validator.yml @@ -0,0 +1,7 @@ +--- +- name: Verify 3.8 specific upgrade checks + hosts: oo_first_master + roles: + - { role: lib_openshift } + tasks: + - debug: msg="noop" diff --git a/playbooks/common/openshift-etcd/certificates.yml b/playbooks/common/openshift-etcd/certificates.yml index 31a0f50d8..eb6b94f33 100644 --- a/playbooks/common/openshift-etcd/certificates.yml +++ b/playbooks/common/openshift-etcd/certificates.yml @@ -1,29 +1,4 @@ --- -- name: Create etcd server certificates for etcd hosts - hosts: oo_etcd_to_config - any_errors_fatal: true - roles: - - role: openshift_etcd_facts - post_tasks: - - include_role: - name: etcd - tasks_from: server_certificates - vars: - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" - etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" +- include: server_certificates.yml -- name: Create etcd client certificates for master hosts - hosts: oo_masters_to_config - any_errors_fatal: true - roles: - - role: openshift_etcd_facts - - role: openshift_etcd_client_certificates - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}" - etcd_cert_config_dir: "{{ openshift.common.config_base }}/master" - etcd_cert_prefix: "master.etcd-" - openshift_ca_host: "{{ groups.oo_first_master.0 }}" - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config +- include: master_etcd_certificates.yml diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml index 82539dac8..3fe483785 100644 --- a/playbooks/common/openshift-etcd/config.yml +++ b/playbooks/common/openshift-etcd/config.yml @@ -1,14 +1,15 @@ --- - name: etcd Install Checkpoint Start - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set etcd install 'In Progress' + run_once: true set_stats: data: - installer_phase_etcd: "In Progress" - aggregate: false + installer_phase_etcd: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - include: ca.yml @@ -27,12 +28,13 @@ - role: nickhammond.logrotate - name: etcd Install Checkpoint End - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set etcd install 'Complete' + run_once: true set_stats: data: - installer_phase_etcd: "Complete" - aggregate: false + installer_phase_etcd: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-etcd/embedded2external.yml b/playbooks/common/openshift-etcd/embedded2external.yml new file mode 100644 index 000000000..b16b78c4f --- /dev/null +++ b/playbooks/common/openshift-etcd/embedded2external.yml @@ -0,0 +1,172 @@ +--- +- name: Pre-migrate checks + hosts: localhost + tasks: + # Check there is only one etcd host + - assert: + that: groups.oo_etcd_to_config | default([]) | length == 1 + msg: "[etcd] group must contain only one host" + # Check there is only one master + - assert: + that: groups.oo_masters_to_config | default([]) | length == 1 + msg: "[master] group must contain only one host" + +# 1. stop a master +- name: Prepare masters for etcd data migration + hosts: oo_first_master + roles: + - role: openshift_facts + tasks: + - name: Check the master API is ready + include_role: + name: openshift_master + tasks_from: check_master_api_is_ready + - set_fact: + master_service: "{{ openshift.common.service_type + '-master' }}" + embedded_etcd_backup_suffix: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" + - debug: + msg: "master service name: {{ master_service }}" + - name: Stop master + service: + name: "{{ master_service }}" + state: stopped + # 2. backup embedded etcd + # Can't use with_items with include_role: https://github.com/ansible/ansible/issues/21285 + - include_role: + name: etcd + tasks_from: backup + vars: + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + r_etcd_common_backup_tag: pre-migrate + r_etcd_common_embedded_etcd: "{{ true }}" + r_etcd_common_backup_sufix_name: "{{ embedded_etcd_backup_suffix }}" + + - include_role: + name: etcd + tasks_from: backup.archive + vars: + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + r_etcd_common_backup_tag: pre-migrate + r_etcd_common_embedded_etcd: "{{ true }}" + r_etcd_common_backup_sufix_name: "{{ embedded_etcd_backup_suffix }}" + +# 3. deploy certificates (for etcd and master) +- include: ca.yml + +- include: server_certificates.yml + +- name: Backup etcd client certificates for master host + hosts: oo_first_master + tasks: + - include_role: + name: etcd + tasks_from: backup_master_etcd_certificates + +- name: Redeploy master etcd certificates + include: master_etcd_certificates.yml + vars: + etcd_certificates_redeploy: "{{ true }}" + +# 4. deploy external etcd +- include: ../openshift-etcd/config.yml + +# 5. stop external etcd +- name: Cleanse etcd + hosts: oo_etcd_to_config[0] + gather_facts: no + pre_tasks: + - include_role: + name: etcd + tasks_from: disable_etcd + vars: + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + - include_role: + name: etcd + tasks_from: clean_data + vars: + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + +# 6. copy the embedded etcd backup to the external host +# TODO(jchaloup): if the etcd and first master are on the same host, just copy the directory +- name: Copy embedded etcd backup to the external host + hosts: localhost + tasks: + - name: Create local temp directory for syncing etcd backup + local_action: command mktemp -d /tmp/etcd_backup-XXXXXXX + register: g_etcd_client_mktemp + changed_when: False + become: no + + - include_role: + name: etcd + tasks_from: backup.fetch + vars: + r_etcd_common_etcd_runtime: "{{ hostvars[groups.oo_first_master.0].openshift.common.etcd_runtime }}" + etcd_backup_sync_directory: "{{ g_etcd_client_mktemp.stdout }}" + r_etcd_common_backup_tag: pre-migrate + r_etcd_common_embedded_etcd: "{{ true }}" + r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}" + delegate_to: "{{ groups.oo_first_master[0] }}" + + - include_role: + name: etcd + tasks_from: backup.copy + vars: + r_etcd_common_etcd_runtime: "{{ hostvars[groups.oo_etcd_to_config.0].openshift.common.etcd_runtime }}" + etcd_backup_sync_directory: "{{ g_etcd_client_mktemp.stdout }}" + r_etcd_common_backup_tag: pre-migrate + r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}" + delegate_to: "{{ groups.oo_etcd_to_config[0] }}" + + - debug: + msg: "etcd_backup_dest_directory: {{ g_etcd_client_mktemp.stdout }}" + + - name: Delete temporary directory + local_action: file path="{{ g_etcd_client_mktemp.stdout }}" state=absent + changed_when: False + become: no + +# 7. force new cluster from the backup +- name: Force new etcd cluster + hosts: oo_etcd_to_config[0] + tasks: + - include_role: + name: etcd + tasks_from: backup.unarchive + vars: + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + r_etcd_common_backup_tag: pre-migrate + r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}" + + - include_role: + name: etcd + tasks_from: backup.force_new_cluster + vars: + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + r_etcd_common_backup_tag: pre-migrate + r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}" + etcd_peer: "{{ openshift.common.ip }}" + etcd_url_scheme: "https" + etcd_peer_url_scheme: "https" + +# 8. re-configure master to use the external etcd +- name: Configure master to use external etcd + hosts: oo_first_master + tasks: + - include_role: + name: openshift_master + tasks_from: configure_external_etcd + vars: + etcd_peer_url_scheme: "https" + etcd_ip: "{{ hostvars[groups.oo_etcd_to_config.0].openshift.common.ip }}" + etcd_peer_port: 2379 + + # 9. start the master + - name: Start master + service: + name: "{{ master_service }}" + state: started + register: service_status + until: service_status.state is defined and service_status.state == "started" + retries: 5 + delay: 10 diff --git a/playbooks/common/openshift-etcd/master_etcd_certificates.yml b/playbooks/common/openshift-etcd/master_etcd_certificates.yml new file mode 100644 index 000000000..0a25aac57 --- /dev/null +++ b/playbooks/common/openshift-etcd/master_etcd_certificates.yml @@ -0,0 +1,14 @@ +--- +- name: Create etcd client certificates for master hosts + hosts: oo_masters_to_config + any_errors_fatal: true + roles: + - role: openshift_etcd_facts + - role: openshift_etcd_client_certificates + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}" + etcd_cert_config_dir: "{{ openshift.common.config_base }}/master" + etcd_cert_prefix: "master.etcd-" + openshift_ca_host: "{{ groups.oo_first_master.0 }}" + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/common/openshift-etcd/migrate.yml index 2456ad3a8..31362f2f6 100644 --- a/playbooks/common/openshift-etcd/migrate.yml +++ b/playbooks/common/openshift-etcd/migrate.yml @@ -1,4 +1,17 @@ --- +- name: Check if the master has embedded etcd + hosts: localhost + connection: local + become: no + gather_facts: no + tags: + - always + tasks: + - fail: + msg: "Migration of an embedded etcd is not supported. Please, migrate the embedded etcd into an external etcd first." + when: + - groups.oo_etcd_to_config | default([]) | length == 0 + - name: Run pre-checks hosts: oo_etcd_to_migrate tasks: @@ -60,12 +73,11 @@ hosts: oo_etcd_to_migrate gather_facts: no pre_tasks: - - set_fact: - l_etcd_service: "{{ 'etcd_container' if openshift.common.is_containerized else 'etcd' }}" - - name: Disable etcd members - service: - name: "{{ l_etcd_service }}" - state: stopped + - include_role: + name: etcd + tasks_from: disable_etcd + vars: + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - name: Migrate data on first etcd hosts: oo_etcd_to_migrate[0] diff --git a/playbooks/common/openshift-etcd/scaleup.yml b/playbooks/common/openshift-etcd/scaleup.yml index b5ba2bbba..20061366c 100644 --- a/playbooks/common/openshift-etcd/scaleup.yml +++ b/playbooks/common/openshift-etcd/scaleup.yml @@ -46,7 +46,7 @@ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" etcd_initial_cluster_state: "existing" - initial_etcd_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') | regex_replace('\"','') }}" + etcd_initial_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') | regex_replace('\"','') }}" etcd_ca_setup: False r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - role: nickhammond.logrotate @@ -71,7 +71,7 @@ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" openshift_ca_host: "{{ groups.oo_first_master.0 }}" openshift_master_etcd_hosts: "{{ hostvars - | oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config'])) + | oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config'] | default([]) )) | oo_collect('openshift.common.hostname') | default(none, true) }}" openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}" diff --git a/playbooks/common/openshift-etcd/server_certificates.yml b/playbooks/common/openshift-etcd/server_certificates.yml new file mode 100644 index 000000000..10e06747b --- /dev/null +++ b/playbooks/common/openshift-etcd/server_certificates.yml @@ -0,0 +1,15 @@ +--- +- name: Create etcd server certificates for etcd hosts + hosts: oo_etcd_to_config + any_errors_fatal: true + roles: + - role: openshift_etcd_facts + post_tasks: + - include_role: + name: etcd + tasks_from: server_certificates + vars: + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" + etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml index 516618de2..19e14ab3e 100644 --- a/playbooks/common/openshift-glusterfs/config.yml +++ b/playbooks/common/openshift-glusterfs/config.yml @@ -1,14 +1,15 @@ --- - name: GlusterFS Install Checkpoint Start - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set GlusterFS install 'In Progress' + run_once: true set_stats: data: - installer_phase_glusterfs: "In Progress" - aggregate: false + installer_phase_glusterfs: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - name: Open firewall ports for GlusterFS nodes hosts: glusterfs @@ -18,6 +19,11 @@ tasks_from: firewall.yml when: - openshift_storage_glusterfs_is_native | default(True) | bool + - include_role: + name: openshift_storage_glusterfs + tasks_from: kernel_modules.yml + when: + - openshift_storage_glusterfs_is_native | default(True) | bool - name: Open firewall ports for GlusterFS registry nodes hosts: glusterfs_registry @@ -27,6 +33,11 @@ tasks_from: firewall.yml when: - openshift_storage_glusterfs_registry_is_native | default(True) | bool + - include_role: + name: openshift_storage_glusterfs + tasks_from: kernel_modules.yml + when: + - openshift_storage_glusterfs_registry_is_native | default(True) | bool - name: Configure GlusterFS hosts: oo_first_master @@ -37,12 +48,13 @@ when: groups.oo_glusterfs_to_config | default([]) | count > 0 - name: GlusterFS Install Checkpoint End - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set GlusterFS install 'Complete' + run_once: true set_stats: data: - installer_phase_glusterfs: "Complete" - aggregate: false + installer_phase_glusterfs: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml index ecbb092bc..d737b836b 100644 --- a/playbooks/common/openshift-loadbalancer/config.yml +++ b/playbooks/common/openshift-loadbalancer/config.yml @@ -1,14 +1,24 @@ --- - name: Load Balancer Install Checkpoint Start - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set load balancer install 'In Progress' + run_once: true set_stats: data: - installer_phase_loadbalancer: "In Progress" - aggregate: false + installer_phase_loadbalancer: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- name: Configure firewall and docker for load balancers + hosts: oo_lb_to_config:!oo_masters_to_config:!oo_nodes_to_config + vars: + openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}" + roles: + - role: os_firewall + - role: openshift_docker + when: openshift.common.is_containerized | default(False) | bool and not skip_docker_role | default(False) | bool - name: Configure load balancers hosts: oo_lb_to_config @@ -25,16 +35,17 @@ + openshift_loadbalancer_additional_backends | default([]) }}" openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}" roles: - - role: os_firewall - role: openshift_loadbalancer + - role: tuned - name: Load Balancer Install Checkpoint End - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set load balancer install 'Complete' + run_once: true set_stats: data: - installer_phase_loadbalancer: "Complete" - aggregate: false + installer_phase_loadbalancer: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-management/add_container_provider.yml b/playbooks/common/openshift-management/add_container_provider.yml new file mode 100644 index 000000000..facb3a5b9 --- /dev/null +++ b/playbooks/common/openshift-management/add_container_provider.yml @@ -0,0 +1,8 @@ +--- +- name: Add Container Provider to Management + hosts: oo_first_master + tasks: + - name: Run the Management Integration Tasks + include_role: + name: openshift_management + tasks_from: add_container_provider diff --git a/playbooks/common/openshift-management/config.yml b/playbooks/common/openshift-management/config.yml index 0aaafe440..3f1cdf713 100644 --- a/playbooks/common/openshift-management/config.yml +++ b/playbooks/common/openshift-management/config.yml @@ -1,4 +1,16 @@ --- +- name: Management Install Checkpoint Start + hosts: all + gather_facts: false + tasks: + - name: Set Management install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_management: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + - name: Setup CFME hosts: oo_first_master pre_tasks: @@ -13,3 +25,15 @@ name: openshift_management vars: template_dir: "{{ hostvars[groups.masters.0].r_openshift_management_mktemp.stdout }}" + +- name: Management Install Checkpoint End + hosts: all + gather_facts: false + tasks: + - name: Set Management install 'Complete' + run_once: true + set_stats: + data: + installer_phase_management: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-management/uninstall.yml b/playbooks/common/openshift-management/uninstall.yml index 698d93405..9f35cc276 100644 --- a/playbooks/common/openshift-management/uninstall.yml +++ b/playbooks/common/openshift-management/uninstall.yml @@ -1,6 +1,6 @@ --- - name: Uninstall CFME - hosts: masters + hosts: masters[0] tasks: - name: Run the CFME Uninstall Role Tasks include_role: diff --git a/playbooks/common/openshift-master/additional_config.yml b/playbooks/common/openshift-master/additional_config.yml index ee76e2ed7..32f638d42 100644 --- a/playbooks/common/openshift-master/additional_config.yml +++ b/playbooks/common/openshift-master/additional_config.yml @@ -1,14 +1,15 @@ --- - name: Master Additional Install Checkpoint Start - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set Master Additional install 'In Progress' + run_once: true set_stats: data: - installer_phase_master_additional: "In Progress" - aggregate: false + installer_phase_master_additional: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - name: Additional master configuration hosts: oo_first_master @@ -20,16 +21,18 @@ roles: - role: openshift_master_cluster when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker" + - role: openshift_project_request_template + when: openshift_project_request_template_manage - role: openshift_examples when: openshift_install_examples | default(true, true) | bool registry_url: "{{ openshift.master.registry_url }}" - role: openshift_hosted_templates registry_url: "{{ openshift.master.registry_url }}" - role: openshift_manageiq - when: openshift_use_manageiq | default(false) | bool + when: openshift_use_manageiq | default(true) | bool - role: cockpit when: - - openshift.common.is_atomic + - not openshift.common.is_atomic | bool - deployment_type == 'openshift-enterprise' - osm_use_cockpit is undefined or osm_use_cockpit | bool - openshift.common.deployment_subtype != 'registry' @@ -37,12 +40,13 @@ when: openshift_use_flannel | default(false) | bool - name: Master Additional Install Checkpoint End - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set Master Additional install 'Complete' + run_once: true set_stats: data: - installer_phase_master_additional: "Complete" - aggregate: false + installer_phase_master_additional: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index bc1fee982..6b0fd6b7c 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -1,14 +1,15 @@ --- - name: Master Install Checkpoint Start - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set Master install 'In Progress' + run_once: true set_stats: data: - installer_phase_master: "In Progress" - aggregate: false + installer_phase_master: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - include: certificates.yml @@ -198,6 +199,7 @@ openshift_master_default_registry_value: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value }}" openshift_master_default_registry_value_api: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_api }}" openshift_master_default_registry_value_controllers: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_controllers }}" + - role: tuned - role: nuage_ca when: openshift_use_nuage | default(false) | bool - role: nuage_common @@ -206,6 +208,18 @@ when: openshift_use_nuage | default(false) | bool - role: calico_master when: openshift_use_calico | default(false) | bool + tasks: + - include_role: + name: kuryr + tasks_from: master + when: openshift_use_kuryr | default(false) | bool + + - name: Setup the node group config maps + include_role: + name: openshift_node_group + when: openshift_master_bootstrap_enabled | default(false) | bool + run_once: True + post_tasks: - name: Create group for deployment type group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }} @@ -226,12 +240,13 @@ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" - name: Master Install Checkpoint End - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set Master install 'Complete' + run_once: true set_stats: data: - installer_phase_master: "Complete" - aggregate: false + installer_phase_master: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js b/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js deleted file mode 100644 index d0a9f11dc..000000000 --- a/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js +++ /dev/null @@ -1,2 +0,0 @@ -// empty file so that the master-config can still point to a file that exists -// this file will be replaced by the template service broker role if enabled diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml index 4f8b758fd..4e1b3a3be 100644 --- a/playbooks/common/openshift-master/restart_services.yml +++ b/playbooks/common/openshift-master/restart_services.yml @@ -1,22 +1,4 @@ --- -- name: Restart master API - service: - name: "{{ openshift.common.service_type }}-master-api" - state: restarted - when: openshift_master_ha | bool -- name: Wait for master API to come back online - wait_for: - host: "{{ openshift.common.hostname }}" - state: started - delay: 10 - port: "{{ openshift.master.api_port }}" - timeout: 600 - when: openshift_master_ha | bool -- name: Restart master controllers - service: - name: "{{ openshift.common.service_type }}-master-controllers" - state: restarted - # Ignore errrors since it is possible that type != simple for - # pre-3.1.1 installations. - ignore_errors: true - when: openshift_master_ha | bool +- include_role: + name: openshift_master + tasks_from: restart.yml diff --git a/playbooks/common/openshift-master/revert-client-ca.yml b/playbooks/common/openshift-master/revert-client-ca.yml new file mode 100644 index 000000000..9ae23bf5b --- /dev/null +++ b/playbooks/common/openshift-master/revert-client-ca.yml @@ -0,0 +1,17 @@ +--- +- name: Set servingInfo.clientCA = ca.crt in master config + hosts: oo_masters_to_config + tasks: + - name: Read master config + slurp: + src: "{{ openshift.common.config_base }}/master/master-config.yaml" + register: g_master_config_output + + # servingInfo.clientCA may be set as the client-ca-bundle.crt from + # CA redeployment and this task reverts that change. + - name: Set servingInfo.clientCA = ca.crt in master config + modify_yaml: + dest: "{{ openshift.common.config_base }}/master/master-config.yaml" + yaml_key: servingInfo.clientCA + yaml_value: ca.crt + when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca.crt' diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml index f4dc9df8a..4c415ebce 100644 --- a/playbooks/common/openshift-master/scaleup.yml +++ b/playbooks/common/openshift-master/scaleup.yml @@ -22,16 +22,17 @@ - name: restart master api service: name={{ openshift.common.service_type }}-master-controllers state=restarted notify: verify api server + # We retry the controllers because the API may not be 100% initialized yet. - name: restart master controllers - service: name={{ openshift.common.service_type }}-master-controllers state=restarted + command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" + retries: 3 + delay: 5 + register: result + until: result.rc == 0 - name: verify api server command: > curl --silent --tlsv1.2 - {% if openshift.common.version_gte_3_2_or_1_2 | bool %} --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt - {% else %} - --cacert {{ openshift.common.config_base }}/master/ca.crt - {% endif %} {{ openshift.master.api_url }}/healthz/ready args: # Disables the following warning: diff --git a/playbooks/common/openshift-master/tasks/wire_aggregator.yml b/playbooks/common/openshift-master/tasks/wire_aggregator.yml index 560eea785..97acc5d5d 100644 --- a/playbooks/common/openshift-master/tasks/wire_aggregator.yml +++ b/playbooks/common/openshift-master/tasks/wire_aggregator.yml @@ -136,9 +136,15 @@ when: - not front_proxy_kubeconfig.stat.exists -- name: copy tech preview extension file for service console UI - copy: - src: openshift-ansible-catalog-console.js +- name: Delete temp directory + file: + name: "{{ certtemp.stdout }}" + state: absent + changed_when: False + +- name: Setup extension file for service console UI + template: + src: ../templates/openshift-ansible-catalog-console.js dest: /etc/origin/master/openshift-ansible-catalog-console.js - name: Update master config @@ -179,8 +185,13 @@ - yedit_output.changed - openshift.master.cluster_method == 'native' +# We retry the controllers because the API may not be 100% initialized yet. - name: restart master controllers - systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted + command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" + retries: 3 + delay: 5 + register: result + until: result.rc == 0 when: - yedit_output.changed - openshift.master.cluster_method == 'native' @@ -190,11 +201,7 @@ # wait_for port doesn't provide health information. command: > curl --silent --tlsv1.2 - {% if openshift.common.version_gte_3_2_or_1_2 | bool %} --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt - {% else %} - --cacert {{ openshift.common.config_base }}/master/ca.crt - {% endif %} {{ openshift.master.api_url }}/healthz/ready args: # Disables the following warning: @@ -207,9 +214,3 @@ changed_when: false when: - yedit_output.changed - -- name: Delete temp directory - file: - name: "{{ certtemp.stdout }}" - state: absent - changed_when: False diff --git a/playbooks/common/openshift-master/templates/openshift-ansible-catalog-console.js b/playbooks/common/openshift-master/templates/openshift-ansible-catalog-console.js new file mode 100644 index 000000000..fd02325ba --- /dev/null +++ b/playbooks/common/openshift-master/templates/openshift-ansible-catalog-console.js @@ -0,0 +1 @@ +window.OPENSHIFT_CONSTANTS.TEMPLATE_SERVICE_BROKER_ENABLED = {{ 'true' if (template_service_broker_install | default(True)) else 'false' }}; diff --git a/playbooks/common/openshift-nfs/config.yml b/playbooks/common/openshift-nfs/config.yml index 66303d6f7..6ea77e00b 100644 --- a/playbooks/common/openshift-nfs/config.yml +++ b/playbooks/common/openshift-nfs/config.yml @@ -1,14 +1,15 @@ --- - name: NFS Install Checkpoint Start - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set NFS install 'In Progress' + run_once: true set_stats: data: - installer_phase_nfs: "In Progress" - aggregate: false + installer_phase_nfs: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - name: Configure nfs hosts: oo_nfs_to_config @@ -17,12 +18,13 @@ - role: openshift_storage_nfs - name: NFS Install Checkpoint End - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set NFS install 'Complete' + run_once: true set_stats: data: - installer_phase_nfs: "Complete" - aggregate: false + installer_phase_nfs: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-node/additional_config.yml b/playbooks/common/openshift-node/additional_config.yml index fe51ef833..ac757397b 100644 --- a/playbooks/common/openshift-node/additional_config.yml +++ b/playbooks/common/openshift-node/additional_config.yml @@ -19,10 +19,14 @@ - group_by: key: oo_nodes_use_{{ (openshift_use_contiv | default(False)) | ternary('contiv','nothing') }} changed_when: False + # Create group for kuryr nodes + - group_by: + key: oo_nodes_use_{{ (openshift_use_kuryr | default(False)) | ternary('kuryr','nothing') }} + changed_when: False - include: etcd_client_config.yml vars: - openshift_node_scale_up_group: "oo_nodes_use_flannel:oo_nodes_use_calico:oo_nodes_use_contiv" + openshift_node_scale_up_group: "oo_nodes_use_flannel:oo_nodes_use_calico:oo_nodes_use_contiv:oo_nodes_use_kuryr" - name: Additional node config hosts: oo_nodes_use_flannel @@ -50,3 +54,11 @@ - role: contiv contiv_role: netplugin when: openshift_use_contiv | default(false) | bool + +- name: Configure Kuryr node + hosts: oo_nodes_use_kuryr + tasks: + - include_role: + name: kuryr + tasks_from: node + when: openshift_use_kuryr | default(false) | bool diff --git a/playbooks/common/openshift-node/clean_image.yml b/playbooks/common/openshift-node/clean_image.yml new file mode 100644 index 000000000..38753d0af --- /dev/null +++ b/playbooks/common/openshift-node/clean_image.yml @@ -0,0 +1,10 @@ +--- +- name: Configure nodes + hosts: oo_nodes_to_config:!oo_containerized_master_nodes + tasks: + - name: Remove any ansible facts created during AMI creation + file: + path: "/etc/ansible/facts.d/{{ item }}" + state: absent + with_items: + - openshift.fact diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 700aab48c..28e3c1b1b 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -1,14 +1,15 @@ --- - name: Node Install Checkpoint Start - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set Node install 'In Progress' + run_once: true set_stats: data: - installer_phase_node: "In Progress" - aggregate: false + installer_phase_node: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - include: certificates.yml @@ -25,12 +26,13 @@ - include: enable_excluders.yml - name: Node Install Checkpoint End - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set Node install 'Complete' + run_once: true set_stats: data: - installer_phase_node: "Complete" - aggregate: false + installer_phase_node: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-node/configure_nodes.yml b/playbooks/common/openshift-node/configure_nodes.yml index c96e4921c..17259422d 100644 --- a/playbooks/common/openshift-node/configure_nodes.yml +++ b/playbooks/common/openshift-node/configure_nodes.yml @@ -13,4 +13,5 @@ roles: - role: os_firewall - role: openshift_node + - role: tuned - role: nickhammond.logrotate diff --git a/playbooks/common/openshift-node/image_prep.yml b/playbooks/common/openshift-node/image_prep.yml index fc06621ee..30651a1df 100644 --- a/playbooks/common/openshift-node/image_prep.yml +++ b/playbooks/common/openshift-node/image_prep.yml @@ -2,13 +2,13 @@ - name: normalize groups include: ../../byo/openshift-cluster/initialize_groups.yml -- name: run the std_include +- name: evaluate the groups include: ../openshift-cluster/evaluate_groups.yml -- name: run the std_include +- name: initialize the facts include: ../openshift-cluster/initialize_facts.yml -- name: run the std_include +- name: initialize the repositories include: ../openshift-cluster/initialize_openshift_repos.yml - name: run node config setup @@ -19,3 +19,6 @@ - name: Re-enable excluders include: enable_excluders.yml + +- name: Remove any undesired artifacts from build + include: clean_image.yml |