diff options
Diffstat (limited to 'playbooks')
217 files changed, 3041 insertions, 2254 deletions
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 07f10d48c..5ed55a817 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -151,6 +151,14 @@ - lbr0 - vlinuxbr - vovsbr + + - name: Remove virtual devices + command: nmcli delete device "{{ item }}" + failed_when: False + with_items: + - tun0 + - docker0 + when: openshift_remove_all | default(true) | bool - shell: atomic uninstall "{{ item }}"-master-api @@ -264,12 +272,30 @@ - "{{ directories.results | default([]) }}" - files + - shell: systemctl daemon-reload + changed_when: False + + - name: restart container-engine + service: name=container-engine state=stopped enabled=no + failed_when: false + register: container_engine + + - name: restart docker + service: name=docker state=stopped enabled=no + failed_when: false + when: not (container_engine | changed) + register: l_docker_restart_docker_in_pb_result + until: not l_docker_restart_docker_in_pb_result | failed + retries: 3 + delay: 30 + - name: Remove remaining files file: path={{ item }} state=absent with_items: - /etc/ansible/facts.d/openshift.fact - /etc/openshift - /etc/openshift-sdn + - /etc/pki/ca-trust/source/anchors/openshift-ca.crt - /etc/sysconfig/atomic-openshift-node - /etc/sysconfig/atomic-openshift-node-dep - /etc/sysconfig/openshift-node-dep @@ -284,23 +310,38 @@ - /etc/systemd/system/origin-node-dep.service - /etc/systemd/system/origin-node.service - /etc/systemd/system/origin-node.service.wants + - /var/lib/docker + + - name: Rebuild ca-trust + command: update-ca-trust + + - name: Reset Docker proxy configuration + lineinfile: + state=absent + dest=/etc/sysconfig/docker + regexp='(NO_PROXY|HTTP_PROXY|HTTPS_PROXY)=.*' + + - name: Reset Docker registry configuration + lineinfile: + state=absent + dest=/etc/sysconfig/docker + regexp='(ADD_REGISTRY|BLOCK_REGISTRY|INSECURE_REGISTRY)=.*' + + - name: Detect Docker storage configuration + shell: vgs -o name | grep docker + register: docker_vg_name + failed_when: false + changed_when: false - - shell: systemctl daemon-reload - changed_when: False + - name: Wipe out Docker storage contents + command: vgremove -f {{ item }} + with_items: "{{ docker_vg_name.stdout_lines }}" + when: docker_vg_name.rc == 0 - - name: restart container-engine - service: name=container-engine state=restarted - failed_when: false - register: container_engine + - name: Wipe out Docker storage configuration + file: path=/etc/sysconfig/docker-storage state=absent + when: docker_vg_name.rc == 0 - - name: restart docker - service: name=docker state=restarted - failed_when: false - when: not (container_engine | changed) - register: l_docker_restart_docker_in_pb_result - until: not l_docker_restart_docker_in_pb_result | failed - retries: 3 - delay: 30 - hosts: masters become: yes @@ -525,3 +566,7 @@ with_items: - /etc/ansible/facts.d/openshift.fact - /var/lib/haproxy/stats + # Here we remove only limits.conf rather than directory, as users may put their files. + # - /etc/systemd/system/haproxy.service.d + - /etc/systemd/system/haproxy.service.d/limits.conf + - /etc/systemd/system/haproxy.service diff --git a/playbooks/aws/BUILD_AMI.md b/playbooks/aws/BUILD_AMI.md new file mode 100644 index 000000000..468264a9a --- /dev/null +++ b/playbooks/aws/BUILD_AMI.md @@ -0,0 +1,21 @@ +# Build AMI + +When seeking to deploy a working openshift cluster using these plays, a few +items must be in place. + +These are: + +1. Create an instance, using a specified ssh key. +2. Run openshift-ansible setup roles to ensure packages and services are correctly configured. +3. Create the AMI. +4. If encryption is desired + - A KMS key is created with the name of $clusterid + - An encrypted AMI will be produced with $clusterid KMS key +5. Terminate the instance used to configure the AMI. + +More AMI specific options can be found in ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml). When creating an encrypted AMI please specify use_encryption: +``` +# openshift_aws_ami_encrypt: True # defaults to false +``` + +**Note**: This will ensure to take the recently created AMI and encrypt it to be used later. If encryption is not desired then set the value to false (defaults to false). The AMI id will be fetched and used according to its most recent creation date. diff --git a/playbooks/aws/PREREQUISITES.md b/playbooks/aws/PREREQUISITES.md new file mode 100644 index 000000000..4f428dcc3 --- /dev/null +++ b/playbooks/aws/PREREQUISITES.md @@ -0,0 +1,40 @@ +# Prerequisites + +When seeking to deploy a working openshift cluster using these plays, a few +items must be in place. + +These are: + +1) vpc +2) security group to build the AMI in. +3) ssh keys to log into instances + +These items can be provisioned ahead of time, or you can utilize the plays here +to create these items. + +If you wish to provision these items yourself, or you already have these items +provisioned and wish to utilize existing components, please refer to +provisioning_vars.yml.example. + +If you wish to have these items created for you, continue with this document. + +# Running prerequisites.yml + +Warning: Running these plays will provision items in your AWS account (if not +present), and you may incur billing charges. These plays are not suitable +for the free-tier. + +## Step 1: +Ensure you have specified all the necessary provisioning variables. See +provisioning_vars.example.yml and README.md for more information. + +## Step 2: +``` +$ ansible-playbook -i inventory.yml prerequisites.yml -e @provisioning_vars.yml +``` + +This will create a VPC, security group, and ssh_key. These plays are idempotent, +and multiple runs should result in no additional provisioning of these components. + +You can also verify that you will successfully utilize existing components with +these plays. diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md index 2b3d4329e..417fb539a 100644 --- a/playbooks/aws/README.md +++ b/playbooks/aws/README.md @@ -8,6 +8,13 @@ With recent desire for provisioning from customers and developers alike, the AWS deploy highly scalable Openshift clusters utilizing AWS auto scale groups and custom AMIs. +To speed in the provisioning of medium and large clusters, openshift-node +instances are created using a pre-built AMI. A list of pre-built AMIs will +be available soon. + +If the deployer wishes to build their own AMI for provisioning, instructions +to do so are provided here. + ### Where do I start? Before any provisioning may occur, AWS account credentials must be present in the environment. This can be done in two ways: @@ -31,8 +38,13 @@ Before any provisioning may occur, AWS account credentials must be present in th ### Let's Provision! -The newly added playbooks are the following: -- build_ami.yml - Builds a custom AMI. This currently requires the user to supply a valid AMI with access to repositories that contain openshift repositories. +Warning: Running these plays will provision items in your AWS account (if not +present), and you may incur billing charges. These plays are not suitable +for the free-tier. + +#### High-level overview +- prerequisites.yml - Provision VPC, Security Groups, SSH keys, if needed. See PREREQUISITES.md for more information. +- build_ami.yml - Builds a custom AMI. See BUILD_AMI.md for more information. - provision.yml - Create a vpc, elbs, security groups, launch config, asg's, etc. - install.yml - Calls the openshift-ansible installer on the newly created instances - provision_nodes.yml - Creates the infra and compute node scale groups @@ -41,87 +53,39 @@ The newly added playbooks are the following: The current expected work flow should be to provide an AMI with access to Openshift repositories. There should be a repository specified in the `openshift_additional_repos` parameter of the inventory file. The next expectation is a minimal set of values in the `provisioning_vars.yml` file to configure the desired settings for cluster instances. These settings are AWS specific and should be tailored to the consumer's AWS custom account settings. +Values specified in provisioning_vars.yml may instead be specified in your inventory group_vars +under the appropriate groups. Most variables can exist in the 'all' group. + ```yaml --- -# when creating an AMI set this to True -# when installing a cluster set this to False -openshift_node_bootstrap: True - -# specify a clusterid -# openshift_aws_clusterid: default - -# specify a region -# openshift_aws_region: us-east-1 - -# must specify a base_ami when building an AMI -# openshift_aws_base_ami: # base image for AMI to build from -# specify when using a custom AMI -# openshift_aws_ami: - -# when creating an encrypted AMI please specify use_encryption -# openshift_aws_ami_encrypt: False - -# custom certificates are required for the ELB -# openshift_aws_iam_cert_path: '/path/to/cert/wildcard.<clusterid>.<domain>.com.crt' -# openshift_aws_iam_cert_key_path: '/path/to/key/wildcard.<clusterid>.<domain>.com.key' -# openshift_aws_iam_cert_chain_path: '/path/to/ca_cert_file/ca.crt' - -# This is required for any ec2 instances -# openshift_aws_ssh_key_name: myuser_key - -# This will ensure these users are created -#openshift_aws_users: -#- key_name: myuser_key -# username: myuser -# pub_key: | -# ssh-rsa AAAA +# Minimum mandatory provisioning variables. See provisioning_vars.yml.example. +# for more information. +openshift_deployment_type: # 'origin' or 'openshift-enterprise' +openshift_release: # example: v3.7 +openshift_pkg_version: # example: -3.7.0 +openshift_aws_ssh_key_name: # example: myuser_key +openshift_aws_base_ami: # example: ami-12345678 +# These are required when doing SSL on the ELBs +openshift_aws_iam_cert_path: # example: '/path/to/wildcard.<clusterid>.example.com.crt' +openshift_aws_iam_cert_key_path: # example: '/path/to/wildcard.<clusterid>.example.com.key' ``` If customization is required for the instances, scale groups, or any other configurable option please see the ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml) for variables and overrides. These overrides can be placed in the `provisioning_vars.yml`, `inventory`, or `group_vars`. -In order to create the bootstrap-able AMI we need to create an openshift-ansible inventory file. This file enables us to create the AMI using the openshift-ansible node roles. The exception here is that there will be no hosts specified by the inventory file. Here is an example: - -```ini -[OSEv3:children] -masters -nodes -etcd - -[OSEv3:children] -masters -nodes -etcd - -[OSEv3:vars] -################################################################################ -# Ensure these variables are set for bootstrap -################################################################################ -# openshift_deployment_type is required for installation -openshift_deployment_type=origin - -# required when building an AMI. This will -# be dependent on the version provided by the yum repository -openshift_pkg_version=-3.6.0 - -openshift_master_bootstrap_enabled=True +In order to create the bootstrap-able AMI we need to create a basic openshift-ansible inventory. This enables us to create the AMI using the openshift-ansible node roles. This inventory should not include any hosts, but certain variables should be defined in the appropriate groups, just as deploying a cluster +using the normal openshift-ansible method. See provisioning-inventory.example.ini for an example. -openshift_hosted_router_wait=False -openshift_hosted_registry_wait=False - -# Repository for installation -openshift_additional_repos=[{'name': 'openshift-repo', 'id': 'openshift-repo', 'baseurl': 'https://mirror.openshift.com/enterprise/enterprise-3.6/latest/x86_64/os/', 'enabled': 'yes', 'gpgcheck': 0, 'sslverify': 'no', 'sslclientcert': '/var/lib/yum/client-cert.pem', 'sslclientkey': '/var/lib/yum/client-key.pem', 'gpgkey': 'https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-release https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-beta https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-openshifthosted'}] - -################################################################################ -# cluster specific settings maybe be placed here +There are more examples of cluster inventory settings [`here`](../../inventory/byo/). -[masters] +#### Step 0 (optional) -[etcd] +You may provision a VPC, Security Group, and SSH keypair to build the AMI. -[nodes] +``` +$ ansible-playbook -i inventory.yml prerequisites.yml -e @provisioning_vars.yml ``` -There are more examples of cluster inventory settings [`here`](../../inventory/byo/). +See PREREQUISITES.md for more information. #### Step 1 @@ -131,24 +95,6 @@ Once the `inventory` and the `provisioning_vars.yml` file has been updated with $ ansible-playbook -i inventory.yml build_ami.yml -e @provisioning_vars.yml ``` -1. This script will build a VPC. Default name will be clusterid if not specified. -2. Create an ssh key required for the instance. -3. Create a security group. -4. Create an instance using the key from step 2 or a specified key. -5. Run openshift-ansible setup roles to ensure packages and services are correctly configured. -6. Create the AMI. -7. If encryption is desired - - A KMS key is created with the name of $clusterid - - An encrypted AMI will be produced with $clusterid KMS key -8. Terminate the instance used to configure the AMI. - -More AMI specific options can be found in ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml). When creating an encrypted AMI please specify use_encryption: -``` -# openshift_aws_ami_encrypt: True # defaults to false -``` - -**Note**: This will ensure to take the recently created AMI and encrypt it to be used later. If encryption is not desired then set the value to false (defaults to false). The AMI id will be fetched and used according to its most recent creation date. - #### Step 2 Now that we have created an AMI for our Openshift installation, there are two ways to use the AMI. @@ -172,16 +118,14 @@ $ ansible-playbook provision.yml -e @provisioning_vars.yml ``` This playbook runs through the following steps: -1. Ensures a VPC is created. -2. Ensures a SSH key exists. -3. Creates an s3 bucket for the registry named $clusterid-docker-registry -4. Create master security groups. -5. Create a master launch config. -6. Create the master auto scaling groups. -7. If certificates are desired for ELB, they will be uploaded. -8. Create internal and external master ELBs. -9. Add newly created masters to the correct groups. -10. Set a couple of important facts for the masters. +1. Creates an s3 bucket for the registry named $clusterid-docker-registry +2. Create master security groups. +3. Create a master launch config. +4. Create the master auto scaling groups. +5. If certificates are desired for ELB, they will be uploaded. +6. Create internal and external master ELBs. +7. Add newly created masters to the correct groups. +8. Set a couple of important facts for the masters. At this point we have successfully created the infrastructure including the master nodes. @@ -200,13 +144,13 @@ Once this playbook completes, the cluster masters should be installed and config #### Step 5 -Now that we have a cluster deployed it will be more interesting to create some node types. This can be done easily with the following playbook: +Now that we have the cluster masters deployed, we need to deploy our infrastructure and compute nodes: ``` $ ansible-playbook provision_nodes.yml -e @provisioning_vars.yml ``` -Once this playbook completes, it should create the compute and infra node scale groups. These nodes will attempt to register themselves to the cluster. These requests must be approved by an administrator. +Once this playbook completes, it should create the compute and infra node scale groups. These nodes will attempt to register themselves to the cluster. These requests must be approved by an administrator in Step 6. #### Step 6 diff --git a/playbooks/aws/openshift-cluster/accept.yml b/playbooks/aws/openshift-cluster/accept.yml index ffc367f9f..c2c8bea50 100755 --- a/playbooks/aws/openshift-cluster/accept.yml +++ b/playbooks/aws/openshift-cluster/accept.yml @@ -42,12 +42,12 @@ until: "'instances' in instancesout and instancesout.instances|length > 0" - debug: - msg: "{{ instancesout.instances|map(attribute='private_dns_name') | list | regex_replace('.ec2.internal') }}" + msg: "{{ instancesout.instances|map(attribute='private_dns_name') | list }}" - name: approve nodes oc_adm_csr: #approve_all: True - nodes: "{{ instancesout.instances|map(attribute='private_dns_name') | list | regex_replace('.ec2.internal') }}" - timeout: 0 + nodes: "{{ instancesout.instances|map(attribute='private_dns_name') | list }}" + timeout: 60 register: nodeout delegate_to: "{{ mastersout.instances[0].public_ip_address }}" diff --git a/playbooks/aws/openshift-cluster/build_ami.yml b/playbooks/aws/openshift-cluster/build_ami.yml index 1e54f0467..fae30eb0a 100644 --- a/playbooks/aws/openshift-cluster/build_ami.yml +++ b/playbooks/aws/openshift-cluster/build_ami.yml @@ -17,71 +17,24 @@ - name: openshift_aws_region msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}" - - name: create an instance and prepare for ami - include_role: - name: openshift_aws - tasks_from: build_ami.yml - vars: - openshift_aws_node_group_type: compute - - - name: fetch newly created instances - ec2_remote_facts: - region: "{{ openshift_aws_region | default('us-east-1') }}" - filters: - "tag:Name": "{{ openshift_aws_base_ami_name | default('ami_base') }}" - instance-state-name: running - register: instancesout - retries: 20 - delay: 3 - until: instancesout.instances|length > 0 - - - name: wait for ssh to become available - wait_for: - port: 22 - host: "{{ instancesout.instances[0].public_ip_address }}" - timeout: 300 - search_regex: OpenSSH - - - name: add host to nodes - add_host: - groups: nodes - name: "{{ instancesout.instances[0].public_dns_name }}" +- include: provision_instance.yml + vars: + openshift_aws_node_group_type: compute - hosts: nodes gather_facts: False tasks: - name: set the user to perform installation set_fact: - ansible_ssh_user: "{{ openshift_aws_build_ami_ssh_user | default('root') }}" - -- name: normalize groups - include: ../../byo/openshift-cluster/initialize_groups.yml - -- name: run the std_include - include: ../../common/openshift-cluster/evaluate_groups.yml - -- name: run the std_include - include: ../../common/openshift-cluster/initialize_facts.yml - -- name: run the std_include - include: ../../common/openshift-cluster/initialize_openshift_repos.yml - -- name: run node config setup - include: ../../common/openshift-node/setup.yml - -- name: run node config - include: ../../common/openshift-node/configure_nodes.yml - -- name: Re-enable excluders - include: ../../common/openshift-node/enable_excluders.yml - -- hosts: localhost - connection: local - become: no - tasks: - - name: seal the ami - include_role: - name: openshift_aws - tasks_from: seal_ami.yml - vars: - openshift_aws_ami_name: "openshift-gi-{{ lookup('pipe', 'date +%Y%m%d%H%M')}}" + ansible_ssh_user: "{{ openshift_aws_build_ami_ssh_user | default(ansible_ssh_user) }}" + openshift_node_bootstrap: True + openshift_node_image_prep_packages: + - cloud-utils-growpart + +# This is the part that installs all of the software and configs for the instance +# to become a node. +- include: ../../common/openshift-node/image_prep.yml + +- include: seal_ami.yml + vars: + openshift_aws_ami_name: "openshift-gi-{{ lookup('pipe', 'date +%Y%m%d%H%M')}}" diff --git a/playbooks/aws/openshift-cluster/hosted.yml b/playbooks/aws/openshift-cluster/hosted.yml new file mode 100644 index 000000000..db6e3b8e1 --- /dev/null +++ b/playbooks/aws/openshift-cluster/hosted.yml @@ -0,0 +1,22 @@ +--- +- include: ../../common/openshift-cluster/openshift_hosted.yml + +- include: ../../common/openshift-cluster/openshift_metrics.yml + when: openshift_metrics_install_metrics | default(false) | bool + +- include: ../../common/openshift-cluster/openshift_logging.yml + when: openshift_logging_install_logging | default(false) | bool + +- include: ../../common/openshift-cluster/service_catalog.yml + when: openshift_enable_service_catalog | default(false) | bool + +- include: ../../common/openshift-management/config.yml + when: openshift_management_install_management | default(false) | bool + +- name: Print deprecated variable warning message if necessary + hosts: oo_first_master + gather_facts: no + tasks: + - debug: msg="{{__deprecation_message}}" + when: + - __deprecation_message | default ('') | length > 0 diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml index 86d58a68e..874d2d38a 100644 --- a/playbooks/aws/openshift-cluster/install.yml +++ b/playbooks/aws/openshift-cluster/install.yml @@ -1,74 +1,46 @@ --- -- name: Setup the vpc and the master node group +- name: Setup the master node group hosts: localhost tasks: - - name: Alert user to variables needed - clusterid - debug: - msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}" - - - name: Alert user to variables needed - region - debug: - msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}" - - - name: fetch newly created instances - ec2_remote_facts: - region: "{{ openshift_aws_region | default('us-east-1') }}" - filters: - "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}" - "tag:host-type": master - instance-state-name: running - register: instancesout - retries: 20 - delay: 3 - until: instancesout.instances|length > 0 - - - name: add new master to masters group - add_host: - groups: "masters,etcd,nodes" - name: "{{ item.public_ip_address }}" - hostname: "{{ openshift_aws_clusterid | default('default') }}-master-{{ item.id[:-5] }}" - with_items: "{{ instancesout.instances }}" - - - name: wait for ssh to become available - wait_for: - port: 22 - host: "{{ item.public_ip_address }}" - timeout: 300 - search_regex: OpenSSH - with_items: "{{ instancesout.instances }}" + - include_role: + name: openshift_aws + tasks_from: setup_master_group.yml - name: set the master facts for hostname to elb hosts: masters gather_facts: no remote_user: root tasks: - - name: fetch elbs - ec2_elb_facts: - region: "{{ openshift_aws_region | default('us-east-1') }}" - names: - - "{{ item }}" - with_items: - - "{{ openshift_aws_clusterid | default('default') }}-master-external" - - "{{ openshift_aws_clusterid | default('default') }}-master-internal" - delegate_to: localhost - register: elbs + - include_role: + name: openshift_aws + tasks_from: master_facts.yml + +- name: run the init + include: ../../init/main.yml + +- name: perform the installer openshift-checks + include: ../../openshift-checks/private/install.yml + +- name: etcd install + include: ../../common/openshift-etcd/config.yml + +- name: include nfs + include: ../../common/openshift-nfs/config.yml + when: groups.oo_nfs_to_config | default([]) | count > 0 - - debug: var=elbs +- name: include loadbalancer + include: ../../common/openshift-loadbalancer/config.yml + when: groups.oo_lb_to_config | default([]) | count > 0 - - name: set fact - set_fact: - openshift_master_cluster_hostname: "{{ elbs.results[1].elbs[0].dns_name }}" - osm_custom_cors_origins: - - "{{ elbs.results[1].elbs[0].dns_name }}" - - "console.{{ openshift_aws_clusterid | default('default') }}.openshift.com" - - "api.{{ openshift_aws_clusterid | default('default') }}.openshift.com" - with_items: "{{ groups['masters'] }}" +- name: include openshift-master config + include: ../../common/openshift-master/config.yml -- name: normalize groups - include: ../../byo/openshift-cluster/initialize_groups.yml +- name: include master additional config + include: ../../common/openshift-master/additional_config.yml -- name: run the std_include - include: ../../common/openshift-cluster/std_include.yml +- name: include master additional config + include: ../../common/openshift-node/config.yml -- name: run the config - include: ../../common/openshift-cluster/config.yml +- name: include openshift-glusterfs + include: ../../common/openshift-glusterfs/config.yml + when: groups.oo_glusterfs_to_config | default([]) | count > 0 diff --git a/playbooks/aws/openshift-cluster/prerequisites.yml b/playbooks/aws/openshift-cluster/prerequisites.yml new file mode 100644 index 000000000..f5eb01b14 --- /dev/null +++ b/playbooks/aws/openshift-cluster/prerequisites.yml @@ -0,0 +1,6 @@ +--- +- include: provision_vpc.yml + +- include: provision_ssh_keypair.yml + +- include: provision_sec_group.yml diff --git a/playbooks/aws/openshift-cluster/provision.yml b/playbooks/aws/openshift-cluster/provision.yml index 8f018abd0..4b5bd22ea 100644 --- a/playbooks/aws/openshift-cluster/provision.yml +++ b/playbooks/aws/openshift-cluster/provision.yml @@ -1,5 +1,5 @@ --- -- name: Setup the vpc and the master node group +- name: Setup the elb and the master node group hosts: localhost tasks: diff --git a/playbooks/aws/openshift-cluster/provision_install.yml b/playbooks/aws/openshift-cluster/provision_install.yml index e787deced..78dd6a49b 100644 --- a/playbooks/aws/openshift-cluster/provision_install.yml +++ b/playbooks/aws/openshift-cluster/provision_install.yml @@ -6,11 +6,14 @@ - name: Include the provision.yml playbook to create cluster include: provision.yml -- name: Include the install.yml playbook to install cluster +- name: Include the install.yml playbook to install cluster on masters include: install.yml -- name: Include the install.yml playbook to install cluster +- name: provision the infra/compute playbook to install node resources include: provision_nodes.yml - name: Include the accept.yml playbook to accept nodes into the cluster include: accept.yml + +- name: Include the hosted.yml playbook to finish the hosted configuration + include: hosted.yml diff --git a/playbooks/aws/openshift-cluster/provision_instance.yml b/playbooks/aws/openshift-cluster/provision_instance.yml new file mode 100644 index 000000000..6e843453c --- /dev/null +++ b/playbooks/aws/openshift-cluster/provision_instance.yml @@ -0,0 +1,12 @@ +--- +# If running this play directly, be sure the variable +# 'openshift_aws_node_group_type' is set correctly for your usage. +# See build_ami.yml for an example. +- hosts: localhost + connection: local + gather_facts: no + tasks: + - name: create an instance and prepare for ami + include_role: + name: openshift_aws + tasks_from: provision_instance.yml diff --git a/playbooks/aws/openshift-cluster/provision_sec_group.yml b/playbooks/aws/openshift-cluster/provision_sec_group.yml new file mode 100644 index 000000000..7d74a691a --- /dev/null +++ b/playbooks/aws/openshift-cluster/provision_sec_group.yml @@ -0,0 +1,13 @@ +--- +# If running this play directly, be sure the variable +# 'openshift_aws_node_group_type' is set correctly for your usage. +# See build_ami.yml for an example. +- hosts: localhost + connection: local + gather_facts: no + tasks: + - name: create security groups + include_role: + name: openshift_aws + tasks_from: security_group.yml + when: openshift_aws_create_security_groups | default(True) | bool diff --git a/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml b/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml new file mode 100644 index 000000000..3ec683958 --- /dev/null +++ b/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml @@ -0,0 +1,12 @@ +--- +- hosts: localhost + connection: local + gather_facts: no + tasks: + - name: create an instance and prepare for ami + include_role: + name: openshift_aws + tasks_from: ssh_keys.yml + vars: + openshift_aws_node_group_type: compute + when: openshift_aws_users | default([]) | length > 0 diff --git a/playbooks/aws/openshift-cluster/provision_vpc.yml b/playbooks/aws/openshift-cluster/provision_vpc.yml new file mode 100644 index 000000000..0a23a6d32 --- /dev/null +++ b/playbooks/aws/openshift-cluster/provision_vpc.yml @@ -0,0 +1,10 @@ +--- +- hosts: localhost + connection: local + gather_facts: no + tasks: + - name: create a vpc + include_role: + name: openshift_aws + tasks_from: vpc.yml + when: openshift_aws_create_vpc | default(True) | bool diff --git a/playbooks/aws/openshift-cluster/provisioning_vars.example.yml b/playbooks/aws/openshift-cluster/provisioning_vars.example.yml deleted file mode 100644 index 28eb9c993..000000000 --- a/playbooks/aws/openshift-cluster/provisioning_vars.example.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- -# when creating an AMI set this option to True -# when installing the cluster, set this to False -openshift_node_bootstrap: True - -# specify a clusterid -#openshift_aws_clusterid: default - -# must specify a base_ami when building an AMI -#openshift_aws_base_ami: - -# when creating an encrypted AMI please specify use_encryption -#openshift_aws_ami_encrypt: False - -# custom certificates are required for the ELB -#openshift_aws_iam_cert_path: '/path/to/wildcard.<clusterid>.example.com.crt' -#openshift_aws_iam_key_path: '/path/to/wildcard.<clusterid>.example.com.key' -#openshift_aws_iam_cert_chain_path: '/path/to/cert.ca.crt' - -# This is required for any ec2 instances -#openshift_aws_ssh_key_name: myuser_key - -# This will ensure these users are created -#openshift_aws_users: -#- key_name: myuser_key -# username: myuser -# pub_key: | -# ssh-rsa AAAA diff --git a/playbooks/aws/openshift-cluster/seal_ami.yml b/playbooks/aws/openshift-cluster/seal_ami.yml new file mode 100644 index 000000000..8239a64fb --- /dev/null +++ b/playbooks/aws/openshift-cluster/seal_ami.yml @@ -0,0 +1,12 @@ +--- +# If running this play directly, be sure the variable +# 'openshift_aws_ami_name' is set correctly for your usage. +# See build_ami.yml for an example. +- hosts: localhost + connection: local + become: no + tasks: + - name: seal the ami + include_role: + name: openshift_aws + tasks_from: seal_ami.yml diff --git a/playbooks/aws/provisioning-inventory.example.ini b/playbooks/aws/provisioning-inventory.example.ini new file mode 100644 index 000000000..238a7eb2f --- /dev/null +++ b/playbooks/aws/provisioning-inventory.example.ini @@ -0,0 +1,25 @@ +[OSEv3:children] +masters +nodes +etcd + +[OSEv3:vars] +################################################################################ +# Ensure these variables are set for bootstrap +################################################################################ +# openshift_deployment_type is required for installation +openshift_deployment_type=origin + +openshift_master_bootstrap_enabled=True + +openshift_hosted_router_wait=False +openshift_hosted_registry_wait=False + +################################################################################ +# cluster specific settings maybe be placed here + +[masters] + +[etcd] + +[nodes] diff --git a/playbooks/aws/provisioning_vars.yml.example b/playbooks/aws/provisioning_vars.yml.example new file mode 100644 index 000000000..1491fb868 --- /dev/null +++ b/playbooks/aws/provisioning_vars.yml.example @@ -0,0 +1,120 @@ +--- +# Variables that are commented in this file are optional; uncommented variables +# are mandatory. + +# Default values for each variable are provided, as applicable. +# Example values for mandatory variables are provided as a comment at the end +# of the line. + +# ------------------------ # +# Common/Cluster Variables # +# ------------------------ # +# Variables in this section affect all areas of the cluster + +# Deployment type must be specified. +openshift_deployment_type: # 'origin' or 'openshift-enterprise' + +# openshift_release must be specified. Use whatever version of openshift +# that is supported by openshift-ansible that you wish. +openshift_release: # v3.7 + +# This will be dependent on the version provided by the yum repository +openshift_pkg_version: # -3.7.0 + +# specify a clusterid +# This value is also used as the default value for many other components. +#openshift_aws_clusterid: default + +# AWS region +# This value will instruct the plays where all items should be created. +# Multi-region deployments are not supported using these plays at this time. +#openshift_aws_region: us-east-1 + +#openshift_aws_create_launch_config: true +#openshift_aws_create_scale_group: true + +# --- # +# VPC # +# --- # + +# openshift_aws_create_vpc defaults to true. If you don't wish to provision +# a vpc, set this to false. +#openshift_aws_create_vpc: true + +# Name of the vpc. Needs to be set if using a pre-existing vpc. +#openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}" + +# Name of the subnet in the vpc to use. Needs to be set if using a pre-existing +# vpc + subnet. +#openshift_aws_subnet_name: + +# -------------- # +# Security Group # +# -------------- # + +# openshift_aws_create_security_groups defaults to true. If you wish to use +# an existing security group, set this to false. +#openshift_aws_create_security_groups: true + +# openshift_aws_build_ami_group is the name of the security group to build the +# ami in. This defaults to the value of openshift_aws_clusterid. +#openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}" + +# openshift_aws_launch_config_security_groups specifies the security groups to +# apply to the launch config. The launch config security groups will be what +# the cluster actually is deployed in. +#openshift_aws_launch_config_security_groups: see roles/openshift_aws/defaults.yml + +# openshift_aws_node_security_groups are created when +# openshift_aws_create_security_groups is set to true. +#openshift_aws_node_security_groups: see roles/openshift_aws/defaults.yml + +# -------- # +# ssh keys # +# -------- # + +# Specify the key pair name here to connect to the provisioned instances. This +# can be an existing key, or it can be one of the keys specified in +# openshift_aws_users +openshift_aws_ssh_key_name: # myuser_key + +# This will ensure these user and public keys are created. +#openshift_aws_users: +#- key_name: myuser_key +# username: myuser +# pub_key: | +# ssh-rsa AAAA + +# When building the AMI, specify the user to ssh to the instance as. +# openshift_aws_build_ami_ssh_user: root + +# --------- # +# AMI Build # +# --------- # +# Variables in this section apply to building a node AMI for use in your +# openshift cluster. + +# must specify a base_ami when building an AMI +openshift_aws_base_ami: # ami-12345678 + +# when creating an encrypted AMI please specify use_encryption +#openshift_aws_ami_encrypt: False + +# -- # +# S3 # +# -- # + +# Create an s3 bucket. +#openshift_aws_create_s3: True + +# --- # +# ELB # +# --- # + +# openshift_aws_elb_name will be the base-name of the ELBs. +#openshift_aws_elb_name: "{{ openshift_aws_clusterid }}" + +# custom certificates are required for the ELB +openshift_aws_iam_cert_path: # '/path/to/wildcard.<clusterid>.example.com.crt' +openshift_aws_iam_cert_key_path: # '/path/to/wildcard.<clusterid>.example.com.key' +openshift_aws_iam_cert_chain_path: # '/path/to/cert.ca.crt' diff --git a/playbooks/byo/openshift-cfme/config.yml b/playbooks/byo/openshift-cfme/config.yml deleted file mode 100644 index 0e8e7a94d..000000000 --- a/playbooks/byo/openshift-cfme/config.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- include: ../openshift-cluster/initialize_groups.yml - tags: - - always - -- include: ../../common/openshift-cluster/evaluate_groups.yml - -- include: ../../common/openshift-cfme/config.yml diff --git a/playbooks/byo/openshift-cfme/uninstall.yml b/playbooks/byo/openshift-cfme/uninstall.yml deleted file mode 100644 index c8ed16859..000000000 --- a/playbooks/byo/openshift-cfme/uninstall.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -# - include: ../openshift-cluster/initialize_groups.yml -# tags: -# - always - -- include: ../../common/openshift-cfme/uninstall.yml diff --git a/playbooks/byo/openshift-checks/certificate_expiry/roles b/playbooks/byo/openshift-checks/certificate_expiry/roles deleted file mode 120000 index 4bdbcbad3..000000000 --- a/playbooks/byo/openshift-checks/certificate_expiry/roles +++ /dev/null @@ -1 +0,0 @@ -../../../../roles
\ No newline at end of file diff --git a/playbooks/byo/openshift-checks/health.yml b/playbooks/byo/openshift-checks/health.yml deleted file mode 100644 index 96a71e4dc..000000000 --- a/playbooks/byo/openshift-checks/health.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- include: ../openshift-cluster/initialize_groups.yml - -- include: ../../common/openshift-cluster/std_include.yml - -- include: ../../common/openshift-checks/health.yml diff --git a/playbooks/byo/openshift-checks/pre-install.yml b/playbooks/byo/openshift-checks/pre-install.yml deleted file mode 100644 index dd93df0bb..000000000 --- a/playbooks/byo/openshift-checks/pre-install.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- include: ../openshift-cluster/initialize_groups.yml - -- include: ../../common/openshift-cluster/std_include.yml - -- include: ../../common/openshift-checks/pre-install.yml diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml index 60fa44c5b..57823847b 100644 --- a/playbooks/byo/openshift-cluster/config.yml +++ b/playbooks/byo/openshift-cluster/config.yml @@ -1,12 +1,4 @@ --- -- include: initialize_groups.yml - tags: - - always - -- include: ../../common/openshift-cluster/std_include.yml - tags: - - always +- include: ../../init/main.yml - include: ../../common/openshift-cluster/config.yml - vars: - openshift_deployment_subtype: "{{ deployment_subtype | default(none) }}" diff --git a/playbooks/byo/openshift-cluster/enable_dnsmasq.yml b/playbooks/byo/openshift-cluster/enable_dnsmasq.yml index 9ce8f0d3c..b429e84e5 100644 --- a/playbooks/byo/openshift-cluster/enable_dnsmasq.yml +++ b/playbooks/byo/openshift-cluster/enable_dnsmasq.yml @@ -1,4 +1,4 @@ --- -- include: initialize_groups.yml +- include: ../../init/evaluate_groups.yml - include: ../../common/openshift-cluster/enable_dnsmasq.yml diff --git a/playbooks/byo/openshift-cluster/initialize_groups.yml b/playbooks/byo/openshift-cluster/initialize_groups.yml deleted file mode 100644 index 2a725510a..000000000 --- a/playbooks/byo/openshift-cluster/initialize_groups.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- name: Create initial host groups for localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tags: - - always - tasks: - - include_vars: cluster_hosts.yml diff --git a/playbooks/byo/openshift-cluster/openshift-hosted.yml b/playbooks/byo/openshift-cluster/openshift-hosted.yml index edd4c8d7b..0855a2fb6 100644 --- a/playbooks/byo/openshift-cluster/openshift-hosted.yml +++ b/playbooks/byo/openshift-cluster/openshift-hosted.yml @@ -1,6 +1,4 @@ --- -- include: initialize_groups.yml - -- include: ../../common/openshift-cluster/std_include.yml +- include: ../../init/main.yml - include: ../../common/openshift-cluster/openshift_hosted.yml diff --git a/playbooks/byo/openshift-cluster/openshift-logging.yml b/playbooks/byo/openshift-cluster/openshift-logging.yml index a523bb47f..74e186f33 100644 --- a/playbooks/byo/openshift-cluster/openshift-logging.yml +++ b/playbooks/byo/openshift-cluster/openshift-logging.yml @@ -4,12 +4,6 @@ # Hosted logging on. See inventory/byo/hosts.*.example for the # currently supported method. # -- include: initialize_groups.yml - tags: - - always - -- include: ../../common/openshift-cluster/std_include.yml - tags: - - always +- include: ../../init/main.yml - include: ../../common/openshift-cluster/openshift_logging.yml diff --git a/playbooks/byo/openshift-cluster/openshift-metrics.yml b/playbooks/byo/openshift-cluster/openshift-metrics.yml index 1135c8c11..e4c9ba1f7 100644 --- a/playbooks/byo/openshift-cluster/openshift-metrics.yml +++ b/playbooks/byo/openshift-cluster/openshift-metrics.yml @@ -1,10 +1,4 @@ --- -- include: initialize_groups.yml - tags: - - always - -- include: ../../common/openshift-cluster/std_include.yml - tags: - - always +- include: ../../init/main.yml - include: ../../common/openshift-cluster/openshift_metrics.yml diff --git a/playbooks/byo/openshift-cluster/openshift-prometheus.yml b/playbooks/byo/openshift-cluster/openshift-prometheus.yml index 4d3f7f42c..634ee2a80 100644 --- a/playbooks/byo/openshift-cluster/openshift-prometheus.yml +++ b/playbooks/byo/openshift-cluster/openshift-prometheus.yml @@ -1,6 +1,4 @@ --- -- include: initialize_groups.yml - -- include: ../../common/openshift-cluster/std_include.yml +- include: ../../init/main.yml - include: ../../common/openshift-cluster/openshift_prometheus.yml diff --git a/playbooks/byo/openshift-cluster/openshift-provisioners.yml b/playbooks/byo/openshift-cluster/openshift-provisioners.yml index 8e80f158b..0949d33c9 100644 --- a/playbooks/byo/openshift-cluster/openshift-provisioners.yml +++ b/playbooks/byo/openshift-cluster/openshift-provisioners.yml @@ -1,6 +1,4 @@ --- -- include: initialize_groups.yml - -- include: ../../common/openshift-cluster/std_include.yml +- include: ../../init/main.yml - include: ../../common/openshift-cluster/openshift_provisioners.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-certificates.yml index 255b0dbf7..6450a4d76 100644 --- a/playbooks/byo/openshift-cluster/redeploy-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-certificates.yml @@ -1,11 +1,5 @@ --- -- include: initialize_groups.yml - tags: - - always - -- include: ../../common/openshift-cluster/std_include.yml - tags: - - always +- include: ../../init/main.yml - include: ../../common/openshift-cluster/redeploy-certificates/check-expiry.yml vars: @@ -42,3 +36,7 @@ - include: ../../common/openshift-cluster/redeploy-certificates/registry.yml when: openshift_hosted_manage_registry | default(true) | bool + +- include: ../../common/openshift-master/revert-client-ca.yml + +- include: ../../common/openshift-master/restart.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml b/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml index 29f821eda..77dd121b3 100644 --- a/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml +++ b/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml @@ -1,10 +1,4 @@ --- -- include: initialize_groups.yml - tags: - - always - -- include: ../../common/openshift-cluster/std_include.yml - tags: - - always +- include: ../../init/main.yml - include: ../../common/openshift-cluster/redeploy-certificates/etcd-ca.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml index f4f2ce00d..f3892f56f 100644 --- a/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml @@ -1,11 +1,5 @@ --- -- include: initialize_groups.yml - tags: - - always - -- include: ../../common/openshift-cluster/std_include.yml - tags: - - always +- include: ../../init/main.yml - include: ../../common/openshift-cluster/redeploy-certificates/check-expiry.yml vars: diff --git a/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml index 049bad8e7..6a4528b7f 100644 --- a/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml @@ -1,11 +1,5 @@ --- -- include: initialize_groups.yml - tags: - - always - -- include: ../../common/openshift-cluster/std_include.yml - tags: - - always +- include: ../../init/main.yml - include: ../../common/openshift-cluster/redeploy-certificates/masters-backup.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml index 345b0c689..891c0e58c 100644 --- a/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml @@ -1,11 +1,5 @@ --- -- include: initialize_groups.yml - tags: - - always - -- include: ../../common/openshift-cluster/std_include.yml - tags: - - always +- include: ../../init/main.yml - include: ../../common/openshift-cluster/redeploy-certificates/nodes-backup.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml b/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml index 6e11a111b..9cebeb1ee 100644 --- a/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml +++ b/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml @@ -1,10 +1,4 @@ --- -- include: initialize_groups.yml - tags: - - always - -- include: ../../common/openshift-cluster/std_include.yml - tags: - - always +- include: ../../init/main.yml - include: ../../common/openshift-cluster/redeploy-certificates/openshift-ca.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml index 30feabab3..36b6250a7 100644 --- a/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml @@ -1,10 +1,4 @@ --- -- include: initialize_groups.yml - tags: - - always - -- include: ../../common/openshift-cluster/std_include.yml - tags: - - always +- include: ../../init/main.yml - include: ../../common/openshift-cluster/redeploy-certificates/registry.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml index 2630fb234..181e03381 100644 --- a/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml @@ -1,10 +1,4 @@ --- -- include: initialize_groups.yml - tags: - - always - -- include: ../../common/openshift-cluster/std_include.yml - tags: - - always +- include: ../../init/main.yml - include: ../../common/openshift-cluster/redeploy-certificates/router.yml diff --git a/playbooks/byo/openshift-cluster/service-catalog.yml b/playbooks/byo/openshift-cluster/service-catalog.yml index 40a7606e7..7be63b0a5 100644 --- a/playbooks/byo/openshift-cluster/service-catalog.yml +++ b/playbooks/byo/openshift-cluster/service-catalog.yml @@ -4,12 +4,6 @@ # Hosted logging on. See inventory/byo/hosts.*.example for the # currently supported method. # -- include: initialize_groups.yml - tags: - - always - -- include: ../../common/openshift-cluster/std_include.yml - tags: - - always +- include: ../../init/main.yml - include: ../../common/openshift-cluster/service_catalog.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/README.md b/playbooks/byo/openshift-cluster/upgrades/README.md index 0f64f40f3..d9b1fc2ca 100644 --- a/playbooks/byo/openshift-cluster/upgrades/README.md +++ b/playbooks/byo/openshift-cluster/upgrades/README.md @@ -4,6 +4,5 @@ cluster. Additional notes for the associated upgrade playbooks are provided in their respective directories. # Upgrades available +- [OpenShift Container Platform 3.6 to 3.7](v3_7/README.md) (works also to upgrade OpenShift Origin from 3.6.x to 3.7.x) - [OpenShift Container Platform 3.5 to 3.6](v3_6/README.md) (works also to upgrade OpenShift Origin from 1.5.x to 3.6.x) -- [OpenShift Container Platform 3.4 to 3.5](v3_5/README.md) (works also to upgrade OpenShift Origin from 1.4.x to 1.5.x) -- [OpenShift Container Platform 3.3 to 3.4](v3_4/README.md) (works also to upgrade OpenShift Origin from 1.3.x to 1.4.x) diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml index 7f31e26e1..c46b22331 100644 --- a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml @@ -1,5 +1,5 @@ --- # Playbook to upgrade Docker to the max allowable version for an OpenShift cluster. -- include: ../../initialize_groups.yml +- include: ../../../../init/evaluate_groups.yml - include: ../../../../common/openshift-cluster/upgrades/docker/docker_upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml index 5bd5d64ab..a9be8dec4 100644 --- a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml +++ b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml @@ -1,6 +1,4 @@ --- -- include: ../initialize_groups.yml - -- include: ../../../common/openshift-cluster/evaluate_groups.yml +- include: ../../../init/evaluate_groups.yml - include: ../../../common/openshift-cluster/upgrades/etcd/main.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_3/README.md deleted file mode 100644 index 6892f6324..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/README.md +++ /dev/null @@ -1,18 +0,0 @@ -# v3.3 Major and Minor Upgrade Playbook - -## Overview -This playbook currently performs the -following steps. - - * Upgrade and restart master services - * Unschedule node. - * Upgrade and restart docker - * Upgrade and restart node services - * Modifies the subset of the configuration necessary - * Applies the latest cluster policies - * Updates the default router if one exists - * Updates the default registry if one exists - * Updates image streams and quickstarts - -## Usage -ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml deleted file mode 100644 index 697a18c4d..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -# -# Full Control Plane + Nodes Upgrade -# -- include: ../../initialize_groups.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml deleted file mode 100644 index 180a2821f..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -# -# Node Upgrade Playbook -# -# Upgrades nodes only, but requires the control plane to have already been upgraded. -# -- include: ../../initialize_groups.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml deleted file mode 100644 index 8cce91b3f..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -# -# Full Control Plane + Nodes Upgrade -# -- include: ../../initialize_groups.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml deleted file mode 100644 index 8e5d0f5f9..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -# -# Control Plane Upgrade Playbook -# -# Upgrades masters and Docker (only on standalone etcd hosts) -# -# This upgrade does not include: -# - node service running on masters -# - docker running on masters -# - node service running on dedicated nodes -# -# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. -# -- include: ../../initialize_groups.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml deleted file mode 100644 index d5329b858..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -# -# Node Upgrade Playbook -# -# Upgrades nodes only, but requires the control plane to have already been upgraded. -# -- include: ../../initialize_groups.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_5/README.md deleted file mode 100644 index 53eebe65e..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/README.md +++ /dev/null @@ -1,18 +0,0 @@ -# v3.5 Major and Minor Upgrade Playbook - -## Overview -This playbook currently performs the -following steps. - - * Upgrade and restart master services - * Unschedule node. - * Upgrade and restart docker - * Upgrade and restart node services - * Modifies the subset of the configuration necessary - * Applies the latest cluster policies - * Updates the default router if one exists - * Updates the default registry if one exists - * Updates image streams and quickstarts - -## Usage -ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml deleted file mode 100644 index f44d55ad2..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -# -# Full Control Plane + Nodes Upgrade -# -- include: ../../initialize_groups.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml deleted file mode 100644 index 2377713fa..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -# -# Control Plane Upgrade Playbook -# -# Upgrades masters and Docker (only on standalone etcd hosts) -# -# This upgrade does not include: -# - node service running on masters -# - docker running on masters -# - node service running on dedicated nodes -# -# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. -# -- include: ../../initialize_groups.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml index 40120b3e8..c880fe7f7 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -2,6 +2,4 @@ # # Full Control Plane + Nodes Upgrade # -- include: ../../initialize_groups.yml - - include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index 408a4c631..aeec5f5cc 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -11,6 +11,4 @@ # # You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. # -- include: ../../initialize_groups.yml - - include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml index b5f42b804..4664a9a2b 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml @@ -4,6 +4,4 @@ # # Upgrades nodes only, but requires the control plane to have already been upgraded. # -- include: ../../initialize_groups.yml - - include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md index 4bf53be81..914e0f5b2 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md +++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md @@ -1,4 +1,4 @@ -# v3.6 Major and Minor Upgrade Playbook +# v3.7 Major and Minor Upgrade Playbook ## Overview This playbook currently performs the following steps. diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml index e41c29682..cbb89bc4d 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml @@ -2,6 +2,4 @@ # # Full Control Plane + Nodes Upgrade # -- include: ../../initialize_groups.yml - - include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml index 21e0fd815..1adfbdec0 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -11,6 +11,4 @@ # # You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. # -- include: ../../initialize_groups.yml - - include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml index 0e09d996e..b4da18281 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml @@ -4,6 +4,4 @@ # # Upgrades nodes only, but requires the control plane to have already been upgraded. # -- include: ../../initialize_groups.yml - - include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_8/README.md index 85b807dc6..d9be6ae3b 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/README.md +++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/README.md @@ -1,11 +1,10 @@ -# v3.4 Major and Minor Upgrade Playbook +# v3.6 Major and Minor Upgrade Playbook ## Overview -This playbook currently performs the -following steps. +This playbook currently performs the following steps. * Upgrade and restart master services - * Unschedule node. + * Unschedule node * Upgrade and restart docker * Upgrade and restart node services * Modifies the subset of the configuration necessary @@ -15,4 +14,7 @@ following steps. * Updates image streams and quickstarts ## Usage -ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml + +``` +ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml +``` diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml new file mode 100644 index 000000000..f7e5dd1d2 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml @@ -0,0 +1,5 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml index 4d284c279..cc04d81c1 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml @@ -11,6 +11,4 @@ # # You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. # -- include: ../../initialize_groups.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml index 5b3f6ab06..37a9f69bb 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml @@ -4,6 +4,4 @@ # # Upgrades nodes only, but requires the control plane to have already been upgraded. # -- include: ../../initialize_groups.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-etcd/certificates.yml b/playbooks/byo/openshift-etcd/certificates.yml index e35cf243f..0e9d42cd6 100644 --- a/playbooks/byo/openshift-etcd/certificates.yml +++ b/playbooks/byo/openshift-etcd/certificates.yml @@ -1,7 +1,5 @@ --- -- include: ../openshift-cluster/initialize_groups.yml - -- include: ../../common/openshift-cluster/std_include.yml +- include: ../../init/main.yml - include: ../../common/openshift-etcd/ca.yml diff --git a/playbooks/byo/openshift-etcd/config.yml b/playbooks/byo/openshift-etcd/config.yml index 1342bd60c..c6e0a9d90 100644 --- a/playbooks/byo/openshift-etcd/config.yml +++ b/playbooks/byo/openshift-etcd/config.yml @@ -1,6 +1,4 @@ --- -- include: ../openshift-cluster/initialize_groups.yml - -- include: ../../common/openshift-cluster/std_include.yml +- include: ../../init/main.yml - include: ../../common/openshift-etcd/config.yml diff --git a/playbooks/byo/openshift-etcd/embedded2external.yml b/playbooks/byo/openshift-etcd/embedded2external.yml new file mode 100644 index 000000000..492f677b0 --- /dev/null +++ b/playbooks/byo/openshift-etcd/embedded2external.yml @@ -0,0 +1,4 @@ +--- +- include: ../../init/main.yml + +- include: ../../common/openshift-etcd/embedded2external.yml diff --git a/playbooks/byo/openshift-etcd/migrate.yml b/playbooks/byo/openshift-etcd/migrate.yml index 2dec2bef6..3020e7db4 100644 --- a/playbooks/byo/openshift-etcd/migrate.yml +++ b/playbooks/byo/openshift-etcd/migrate.yml @@ -1,6 +1,4 @@ --- -- include: ../openshift-cluster/initialize_groups.yml - -- include: ../../common/openshift-cluster/std_include.yml +- include: ../../init/main.yml - include: ../../common/openshift-etcd/migrate.yml diff --git a/playbooks/byo/openshift-etcd/restart.yml b/playbooks/byo/openshift-etcd/restart.yml index 034bba4b4..0889d91ba 100644 --- a/playbooks/byo/openshift-etcd/restart.yml +++ b/playbooks/byo/openshift-etcd/restart.yml @@ -1,6 +1,4 @@ --- -- include: ../openshift-cluster/initialize_groups.yml - -- include: ../../common/openshift-cluster/std_include.yml +- include: ../../init/main.yml - include: ../../common/openshift-etcd/restart.yml diff --git a/playbooks/byo/openshift-etcd/scaleup.yml b/playbooks/byo/openshift-etcd/scaleup.yml index a2a5856a9..e7d62e264 100644 --- a/playbooks/byo/openshift-etcd/scaleup.yml +++ b/playbooks/byo/openshift-etcd/scaleup.yml @@ -1,6 +1,4 @@ --- -- include: ../openshift-cluster/initialize_groups.yml - -- include: ../../common/openshift-cluster/std_include.yml +- include: ../../init/main.yml - include: ../../common/openshift-etcd/scaleup.yml diff --git a/playbooks/byo/openshift-glusterfs/config.yml b/playbooks/byo/openshift-glusterfs/config.yml index 3f11f3991..120476bb8 100644 --- a/playbooks/byo/openshift-glusterfs/config.yml +++ b/playbooks/byo/openshift-glusterfs/config.yml @@ -1,10 +1,4 @@ --- -- include: ../openshift-cluster/initialize_groups.yml - tags: - - always - -- include: ../../common/openshift-cluster/std_include.yml - tags: - - always +- include: ../../init/main.yml - include: ../../common/openshift-glusterfs/config.yml diff --git a/playbooks/byo/openshift-glusterfs/registry.yml b/playbooks/byo/openshift-glusterfs/registry.yml index 6ee6febdb..32734f863 100644 --- a/playbooks/byo/openshift-glusterfs/registry.yml +++ b/playbooks/byo/openshift-glusterfs/registry.yml @@ -1,10 +1,4 @@ --- -- include: ../openshift-cluster/initialize_groups.yml - tags: - - always - -- include: ../../common/openshift-cluster/std_include.yml - tags: - - always +- include: ../../init/main.yml - include: ../../common/openshift-glusterfs/registry.yml diff --git a/playbooks/byo/openshift-loadbalancer/config.yml b/playbooks/byo/openshift-loadbalancer/config.yml index 32c828f97..27925d2e9 100644 --- a/playbooks/byo/openshift-loadbalancer/config.yml +++ b/playbooks/byo/openshift-loadbalancer/config.yml @@ -1,6 +1,4 @@ --- -- include: ../openshift-cluster/initialize_groups.yml - -- include: ../../common/openshift-cluster/std_include.yml +- include: ../../init/main.yml - include: ../../common/openshift-loadbalancer/config.yml diff --git a/playbooks/byo/openshift-management/add_container_provider.yml b/playbooks/byo/openshift-management/add_container_provider.yml new file mode 100644 index 000000000..e2f558550 --- /dev/null +++ b/playbooks/byo/openshift-management/add_container_provider.yml @@ -0,0 +1,4 @@ +--- +- include: ../../init/evaluate_groups.yml + +- include: ../../common/openshift-management/add_container_provider.yml diff --git a/playbooks/byo/openshift-management/add_many_container_providers.yml b/playbooks/byo/openshift-management/add_many_container_providers.yml new file mode 100644 index 000000000..62fdb11c5 --- /dev/null +++ b/playbooks/byo/openshift-management/add_many_container_providers.yml @@ -0,0 +1,36 @@ +--- +- hosts: localhost + tasks: + - name: Ensure the container provider configuration is defined + assert: + that: container_providers_config is defined + msg: | + Error: Must provide providers config path. Fix: Add '-e container_providers_config=/path/to/your/config' to the ansible-playbook command + + - name: Include providers/management configuration + include_vars: + file: "{{ container_providers_config }}" + + - name: Ensure this cluster is a container provider + uri: + url: "https://{{ management_server['hostname'] }}/api/providers" + body_format: json + method: POST + user: "{{ management_server['user'] }}" + password: "{{ management_server['password'] }}" + validate_certs: no + # Docs on formatting the BODY of the POST request: + # http://manageiq.org/docs/reference/latest/api/reference/providers.html#specifying-connection-configurations + body: "{{ item }}" + failed_when: false + with_items: "{{ container_providers }}" + register: results + + # Include openshift_management for access to filter_plugins. + - include_role: + name: openshift_management + tasks_from: noop + + - name: print each result + debug: + msg: "{{ results.results | oo_filter_container_providers }}" diff --git a/playbooks/byo/openshift-management/config.yml b/playbooks/byo/openshift-management/config.yml new file mode 100644 index 000000000..e699fd014 --- /dev/null +++ b/playbooks/byo/openshift-management/config.yml @@ -0,0 +1,4 @@ +--- +- include: ../../init/main.yml + +- include: ../../common/openshift-management/config.yml diff --git a/playbooks/byo/openshift-checks/roles b/playbooks/byo/openshift-management/roles index 20c4c58cf..20c4c58cf 120000 --- a/playbooks/byo/openshift-checks/roles +++ b/playbooks/byo/openshift-management/roles diff --git a/playbooks/byo/openshift-management/uninstall.yml b/playbooks/byo/openshift-management/uninstall.yml new file mode 100644 index 000000000..e95c1c88a --- /dev/null +++ b/playbooks/byo/openshift-management/uninstall.yml @@ -0,0 +1,2 @@ +--- +- include: ../../common/openshift-management/uninstall.yml diff --git a/playbooks/byo/openshift-master/additional_config.yml b/playbooks/byo/openshift-master/additional_config.yml index b3d7b5731..1454190b2 100644 --- a/playbooks/byo/openshift-master/additional_config.yml +++ b/playbooks/byo/openshift-master/additional_config.yml @@ -1,6 +1,4 @@ --- -- include: ../openshift-cluster/initialize_groups.yml - -- include: ../../common/openshift-cluster/std_include.yml +- include: ../../init/main.yml - include: ../../common/openshift-master/additional_config.yml diff --git a/playbooks/byo/openshift-master/certificates.yml b/playbooks/byo/openshift-master/certificates.yml index 26b964034..344985244 100644 --- a/playbooks/byo/openshift-master/certificates.yml +++ b/playbooks/byo/openshift-master/certificates.yml @@ -1,8 +1,4 @@ --- -- include: ../openshift-cluster/initialize_groups.yml - -- include: ../../common/openshift-cluster/std_include.yml - -- include: ../../common/openshift-master/ca.yml +- include: ../../init/main.yml - include: ../../common/openshift-master/certificates.yml diff --git a/playbooks/byo/openshift-master/config.yml b/playbooks/byo/openshift-master/config.yml index 98be0c448..913525e65 100644 --- a/playbooks/byo/openshift-master/config.yml +++ b/playbooks/byo/openshift-master/config.yml @@ -1,6 +1,4 @@ --- -- include: ../openshift-cluster/initialize_groups.yml - -- include: ../../common/openshift-cluster/std_include.yml +- include: ../../init/main.yml - include: ../../common/openshift-master/config.yml diff --git a/playbooks/byo/openshift-master/restart.yml b/playbooks/byo/openshift-master/restart.yml index 8950efd00..d2031d928 100644 --- a/playbooks/byo/openshift-master/restart.yml +++ b/playbooks/byo/openshift-master/restart.yml @@ -1,6 +1,4 @@ --- -- include: ../openshift-cluster/initialize_groups.yml - -- include: ../../common/openshift-cluster/std_include.yml +- include: ../../init/main.yml - include: ../../common/openshift-master/restart.yml diff --git a/playbooks/byo/openshift-master/scaleup.yml b/playbooks/byo/openshift-master/scaleup.yml index a09edd55a..4fdec5e7d 100644 --- a/playbooks/byo/openshift-master/scaleup.yml +++ b/playbooks/byo/openshift-master/scaleup.yml @@ -1,5 +1,5 @@ --- -- include: ../openshift-cluster/initialize_groups.yml +- include: ../../init/evaluate_groups.yml - name: Ensure there are new_masters or new_nodes hosts: localhost @@ -13,8 +13,11 @@ add hosts to the new_masters and new_nodes host groups to add masters. when: - - (g_new_master_hosts | default([]) | length == 0) and (g_new_node_hosts | default([]) | length == 0) + - g_new_master_hosts | default([]) | length == 0 + - g_new_node_hosts | default([]) | length == 0 -- include: ../../common/openshift-cluster/std_include.yml +# Need a better way to do the above check for node without +# running evaluate_groups and init/main.yml +- include: ../../init/main.yml - include: ../../common/openshift-master/scaleup.yml diff --git a/playbooks/byo/openshift-nfs/config.yml b/playbooks/byo/openshift-nfs/config.yml index 93b24411e..a5e6fe784 100644 --- a/playbooks/byo/openshift-nfs/config.yml +++ b/playbooks/byo/openshift-nfs/config.yml @@ -1,6 +1,4 @@ --- -- include: ../openshift-cluster/initialize_groups.yml - -- include: ../../common/openshift-cluster/std_include.yml +- include: ../../init/main.yml - include: ../../common/openshift-nfs/config.yml diff --git a/playbooks/byo/openshift-node/certificates.yml b/playbooks/byo/openshift-node/certificates.yml index 3d2de74a9..5342cf46b 100644 --- a/playbooks/byo/openshift-node/certificates.yml +++ b/playbooks/byo/openshift-node/certificates.yml @@ -1,6 +1,4 @@ --- -- include: ../openshift-cluster/initialize_groups.yml - -- include: ../../common/openshift-cluster/std_include.yml +- include: ../../init/main.yml - include: ../../common/openshift-node/certificates.yml diff --git a/playbooks/byo/openshift-node/config.yml b/playbooks/byo/openshift-node/config.yml index 839dc36ff..b157b13d6 100644 --- a/playbooks/byo/openshift-node/config.yml +++ b/playbooks/byo/openshift-node/config.yml @@ -1,6 +1,4 @@ --- -- include: ../openshift-cluster/initialize_groups.yml - -- include: ../../common/openshift-cluster/std_include.yml +- include: ../../init/main.yml - include: ../../common/openshift-node/config.yml diff --git a/playbooks/byo/openshift-node/network_manager.yml b/playbooks/byo/openshift-node/network_manager.yml index b23692237..3d96cbdd5 100644 --- a/playbooks/byo/openshift-node/network_manager.yml +++ b/playbooks/byo/openshift-node/network_manager.yml @@ -1,4 +1,4 @@ --- -- include: ../openshift-cluster/initialize_groups.yml +- include: ../../init/evaluate_groups.yml - include: ../../common/openshift-node/network_manager.yml diff --git a/playbooks/byo/openshift-node/restart.yml b/playbooks/byo/openshift-node/restart.yml index ccf9e82da..53a6b9ed4 100644 --- a/playbooks/byo/openshift-node/restart.yml +++ b/playbooks/byo/openshift-node/restart.yml @@ -1,6 +1,4 @@ --- -- include: ../openshift-cluster/initialize_groups.yml - -- include: ../../common/openshift-cluster/std_include.yml +- include: ../../init/main.yml - include: ../../common/openshift-node/restart.yml diff --git a/playbooks/byo/openshift-node/scaleup.yml b/playbooks/byo/openshift-node/scaleup.yml index 9f992cca6..bf1bbcf46 100644 --- a/playbooks/byo/openshift-node/scaleup.yml +++ b/playbooks/byo/openshift-node/scaleup.yml @@ -1,5 +1,5 @@ --- -- include: ../openshift-cluster/initialize_groups.yml +- include: ../../init/evaluate_groups.yml - name: Ensure there are new_nodes hosts: localhost @@ -14,8 +14,8 @@ when: - g_new_node_hosts | default([]) | length == 0 -- include: ../../common/openshift-cluster/std_include.yml - -- include: ../../common/openshift-node/certificates.yml +# Need a better way to do the above check for node without +# running evaluate_groups and init/main.yml +- include: ../../init/main.yml - include: ../../common/openshift-node/config.yml diff --git a/playbooks/byo/openshift-preflight/check.yml b/playbooks/byo/openshift-preflight/check.yml deleted file mode 100644 index 2e53452a6..000000000 --- a/playbooks/byo/openshift-preflight/check.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# location is moved; this file remains so existing instructions keep working -- include: ../openshift-checks/pre-install.yml diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml index a8c1c3a88..29e0ebe8d 100644 --- a/playbooks/byo/openshift_facts.yml +++ b/playbooks/byo/openshift_facts.yml @@ -1,19 +1,13 @@ --- -- include: openshift-cluster/initialize_groups.yml - tags: - - always - -- include: ../common/openshift-cluster/std_include.yml - tags: - - always +- include: ../init/main.yml - name: Gather Cluster facts - # Temporarily reverting to OSEv3 until group standardization is complete - hosts: OSEv3 + hosts: oo_all_hosts roles: - openshift_facts tasks: - openshift_facts: openshift_env: "{{ hostvars[inventory_hostname] | oo_openshift_env }}" register: result - - debug: var=result + - debug: + var: result diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml index bc3109a31..261143080 100644 --- a/playbooks/byo/rhel_subscribe.yml +++ b/playbooks/byo/rhel_subscribe.yml @@ -1,11 +1,8 @@ --- -- include: openshift-cluster/initialize_groups.yml - tags: - - always +- include: ../init/evaluate_groups.yml - name: Subscribe hosts, update repos and update OS packages - # Temporarily reverting to OSEv3 until group standardization is complete - hosts: OSEv3 + hosts: oo_all_hosts roles: - role: rhel_subscribe when: diff --git a/playbooks/certificate_expiry b/playbooks/certificate_expiry deleted file mode 120000 index 9cf5334a1..000000000 --- a/playbooks/certificate_expiry +++ /dev/null @@ -1 +0,0 @@ -byo/openshift-checks/certificate_expiry/
\ No newline at end of file diff --git a/playbooks/common/openshift-cfme/config.yml b/playbooks/common/openshift-cfme/config.yml deleted file mode 100644 index 533a35d9e..000000000 --- a/playbooks/common/openshift-cfme/config.yml +++ /dev/null @@ -1,44 +0,0 @@ ---- -# TODO: Make this work. The 'name' variable below is undefined -# presently because it's part of the cfme role. This play can't run -# until that's re-worked. -# -# - name: Pre-Pull manageiq-pods docker images -# hosts: nodes -# tasks: -# - name: Ensure the latest manageiq-pods docker image is pulling -# docker_image: -# name: "{{ openshift_cfme_container_image }}" -# # Fire-and-forget method, never timeout -# async: 99999999999 -# # F-a-f, never check on this. True 'background' task. -# poll: 0 - -- name: Configure Masters for CFME Bulk Image Imports - hosts: oo_masters_to_config - serial: 1 - tasks: - - name: Run master cfme tuning playbook - include_role: - name: openshift_cfme - tasks_from: tune_masters - -- name: Setup CFME - hosts: oo_first_master - vars: - r_openshift_cfme_miq_template_content: "{{ lookup('file', 'roles/openshift_cfme/files/miq-template.yaml') | from_yaml}}" - pre_tasks: - - name: Create a temporary place to evaluate the PV templates - command: mktemp -d /tmp/openshift-ansible-XXXXXXX - register: r_openshift_cfme_mktemp - changed_when: false - - name: Ensure the server template was read from disk - debug: - msg="{{ r_openshift_cfme_miq_template_content | from_yaml }}" - - tasks: - - name: Run the CFME Setup Role - include_role: - name: openshift_cfme - vars: - template_dir: "{{ hostvars[groups.masters.0].r_openshift_cfme_mktemp.stdout }}" diff --git a/playbooks/common/openshift-cluster/cockpit-ui.yml b/playbooks/common/openshift-cluster/cockpit-ui.yml index 5ddafdb07..359132dd0 100644 --- a/playbooks/common/openshift-cluster/cockpit-ui.yml +++ b/playbooks/common/openshift-cluster/cockpit-ui.yml @@ -3,4 +3,6 @@ hosts: oo_first_master roles: - role: cockpit-ui - when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool) + when: + - openshift_hosted_manage_registry | default(true) | bool + - not openshift.docker.hosted_registry_insecure | default(false) | bool diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 4ca0d48e4..588291878 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -1,26 +1,5 @@ --- -# TODO: refactor this into its own include -# and pass a variable for ctx -- name: Verify Requirements - hosts: oo_all_hosts - roles: - - openshift_health_checker - vars: - - r_openshift_health_checker_playbook_context: install - post_tasks: - - action: openshift_health_check - args: - checks: - - disk_availability - - memory_availability - - package_availability - - package_version - - docker_image_availability - - docker_storage - -- include: ../openshift-etcd/ca.yml - -- include: ../openshift-etcd/certificates.yml +- include: ../../openshift-checks/private/install.yml - include: ../openshift-etcd/config.yml @@ -30,16 +9,10 @@ - include: ../openshift-loadbalancer/config.yml when: groups.oo_lb_to_config | default([]) | count > 0 -- include: ../openshift-master/ca.yml - -- include: ../openshift-master/certificates.yml - - include: ../openshift-master/config.yml - include: ../openshift-master/additional_config.yml -- include: ../openshift-node/certificates.yml - - include: ../openshift-node/config.yml - include: ../openshift-glusterfs/config.yml @@ -54,7 +27,10 @@ when: openshift_logging_install_logging | default(false) | bool - include: service_catalog.yml - when: openshift_enable_service_catalog | default(false) | bool + when: openshift_enable_service_catalog | default(true) | bool + +- include: ../openshift-management/config.yml + when: openshift_management_install_management | default(false) | bool - name: Print deprecated variable warning message if necessary hosts: oo_first_master diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml index be14b06f0..f91361b67 100644 --- a/playbooks/common/openshift-cluster/enable_dnsmasq.yml +++ b/playbooks/common/openshift-cluster/enable_dnsmasq.yml @@ -1,13 +1,8 @@ --- -- include: evaluate_groups.yml - - name: Load openshift_facts hosts: oo_masters_to_config:oo_nodes_to_config roles: - openshift_facts - post_tasks: - - fail: msg="This playbook requires a master version of at least Origin 1.1 or OSE 3.1" - when: not openshift.common.version_gte_3_1_1_or_1_1_1 | bool - name: Reconfigure masters to listen on our new dns_port hosts: oo_masters_to_config diff --git a/playbooks/common/openshift-cluster/install_docker_gc.yml b/playbooks/common/openshift-cluster/install_docker_gc.yml new file mode 100644 index 000000000..1e3dfee07 --- /dev/null +++ b/playbooks/common/openshift-cluster/install_docker_gc.yml @@ -0,0 +1,7 @@ +--- +- name: Install docker gc + hosts: oo_first_master + gather_facts: false + tasks: + - include_role: + name: openshift_docker_gc diff --git a/playbooks/common/openshift-cluster/openshift_default_storage_class.yml b/playbooks/common/openshift-cluster/openshift_default_storage_class.yml index 4b4f19690..62fe0dd60 100644 --- a/playbooks/common/openshift-cluster/openshift_default_storage_class.yml +++ b/playbooks/common/openshift-cluster/openshift_default_storage_class.yml @@ -3,4 +3,4 @@ hosts: oo_first_master roles: - role: openshift_default_storage_class - when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce') + when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce' or openshift_cloudprovider_kind == 'openstack') diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml index 32e5e708a..15ee60dc0 100644 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -1,14 +1,15 @@ --- - name: Hosted Install Checkpoint Start - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set Hosted install 'In Progress' + run_once: true set_stats: data: - installer_phase_hosted: "In Progress" - aggregate: false + installer_phase_hosted: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - include: create_persistent_volumes.yml @@ -25,13 +26,19 @@ - include: openshift_prometheus.yml when: openshift_hosted_prometheus_deploy | default(False) | bool +- include: install_docker_gc.yml + when: + - openshift_use_crio | default(False) | bool + - openshift_crio_enable_docker_gc | default(False) | bool + - name: Hosted Install Checkpoint End - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set Hosted install 'Complete' + run_once: true set_stats: data: - installer_phase_hosted: "Complete" - aggregate: false + installer_phase_hosted: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml index 69f50fbcd..bc59bd95a 100644 --- a/playbooks/common/openshift-cluster/openshift_logging.yml +++ b/playbooks/common/openshift-cluster/openshift_logging.yml @@ -1,14 +1,15 @@ --- - name: Logging Install Checkpoint Start - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set Logging install 'In Progress' + run_once: true set_stats: data: - installer_phase_logging: "In Progress" - aggregate: false + installer_phase_logging: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - name: OpenShift Aggregated Logging hosts: oo_first_master @@ -24,12 +25,13 @@ tasks_from: update_master_config - name: Logging Install Checkpoint End - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set Logging install 'Complete' + run_once: true set_stats: data: - installer_phase_logging: "Complete" - aggregate: false + installer_phase_logging: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml index e369dcd86..80cd93e5f 100644 --- a/playbooks/common/openshift-cluster/openshift_metrics.yml +++ b/playbooks/common/openshift-cluster/openshift_metrics.yml @@ -1,14 +1,15 @@ --- - name: Metrics Install Checkpoint Start - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set Metrics install 'In Progress' + run_once: true set_stats: data: - installer_phase_metrics: "In Progress" - aggregate: false + installer_phase_metrics: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - name: OpenShift Metrics hosts: oo_first_master @@ -25,12 +26,13 @@ tasks_from: update_master_config.yaml - name: Metrics Install Checkpoint End - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set Metrics install 'Complete' + run_once: true set_stats: data: - installer_phase_metrics: "Complete" - aggregate: false + installer_phase_metrics: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-cluster/openshift_prometheus.yml b/playbooks/common/openshift-cluster/openshift_prometheus.yml index ac2d250a3..7aa9a16e6 100644 --- a/playbooks/common/openshift-cluster/openshift_prometheus.yml +++ b/playbooks/common/openshift-cluster/openshift_prometheus.yml @@ -1,5 +1,29 @@ --- +- name: Prometheus Install Checkpoint Start + hosts: all + gather_facts: false + tasks: + - name: Set Prometheus install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_prometheus: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + - name: Create Hosted Resources - openshift_prometheus hosts: oo_first_master roles: - role: openshift_prometheus + +- name: Prometheus Install Checkpoint End + hosts: all + gather_facts: false + tasks: + - name: Set Prometheus install 'Complete' + run_once: true + set_stats: + data: + installer_phase_prometheus: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml index 12cd209d2..eb225dfb5 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml @@ -1,11 +1,4 @@ --- -- name: Verify OpenShift version is greater than or equal to 1.2 or 3.2 - hosts: oo_first_master - tasks: - - fail: - msg: "The current OpenShift version is less than 1.2/3.2 and does not support CA bundles." - when: not openshift.common.version_gte_3_2_or_1_2 | bool - - name: Check cert expirys hosts: oo_nodes_to_config:oo_masters_to_config:oo_etcd_to_config vars: @@ -43,11 +36,6 @@ when: (g_master_config_output.content|b64decode|from_yaml).oauthConfig.masterCA != 'ca-bundle.crt' - modify_yaml: dest: "{{ openshift.common.config_base }}/master/master-config.yaml" - yaml_key: servingInfo.clientCA - yaml_value: ca-bundle.crt - when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca-bundle.crt' - - modify_yaml: - dest: "{{ openshift.common.config_base }}/master/master-config.yaml" yaml_key: etcdClientInfo.ca yaml_value: ca-bundle.crt when: @@ -67,6 +55,13 @@ when: - groups.oo_etcd_to_config | default([]) | length == 0 - (g_master_config_output.content|b64decode|from_yaml).etcdConfig.servingInfo.clientCA != 'ca-bundle.crt' + # Set servingInfo.clientCA to client-ca-bundle.crt in order to roll the CA certificate. + # This change will be reverted in playbooks/byo/openshift-cluster/redeploy-certificates.yml + - modify_yaml: + dest: "{{ openshift.common.config_base }}/master/master-config.yaml" + yaml_key: servingInfo.clientCA + yaml_value: client-ca-bundle.crt + when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'client-ca-bundle.crt' - name: Copy current OpenShift CA to legacy directory hosts: oo_masters_to_config @@ -114,12 +109,18 @@ register: g_new_openshift_ca_mktemp changed_when: false -- include: ../../openshift-master/ca.yml +- name: Create OpenShift CA + hosts: oo_first_master vars: # Set openshift_ca_config_dir to a temporary directory where CA # will be created. We'll replace the existing CA with the CA # created in the temporary directory. openshift_ca_config_dir: "{{ hostvars[groups.oo_first_master.0].g_new_openshift_ca_mktemp.stdout }}" + roles: + - role: openshift_master_facts + - role: openshift_named_certificates + - role: openshift_ca + openshift_ca_host: "{{ groups.oo_first_master.0 }}" - name: Create temp directory for syncing certs hosts: localhost @@ -149,6 +150,7 @@ - ca.key - ca-bundle.crt - ca.serial.txt + - client-ca-bundle.crt delegate_to: "{{ openshift_ca_host }}" run_once: true changed_when: false @@ -167,6 +169,7 @@ - ca.key - ca-bundle.crt - ca.serial.txt + - client-ca-bundle.crt - name: Update master client kubeconfig CA data kubeclient_ca: client_path: "{{ openshift.common.config_base }}/master/openshift-master.kubeconfig" diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml index afd5463b2..7e9363c5f 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml @@ -70,9 +70,7 @@ --hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc,docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}" --cert={{ openshift.common.config_base }}/master/registry.crt --key={{ openshift.common.config_base }}/master/registry.key - {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %} --expire-days={{ openshift_hosted_registry_cert_expire_days | default(730) }} - {% endif %} - name: Update registry certificates secret oc_secret: diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml index 95a8f601c..7bb8511f6 100644 --- a/playbooks/common/openshift-cluster/service_catalog.yml +++ b/playbooks/common/openshift-cluster/service_catalog.yml @@ -1,14 +1,15 @@ --- - name: Service Catalog Install Checkpoint Start - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set Service Catalog install 'In Progress' + run_once: true set_stats: data: - installer_phase_servicecatalog: "In Progress" - aggregate: false + installer_phase_servicecatalog: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - name: Service Catalog hosts: oo_first_master @@ -20,12 +21,13 @@ first_master: "{{ groups.oo_first_master[0] }}" - name: Service Catalog Install Checkpoint End - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set Service Catalog install 'Complete' + run_once: true set_stats: data: - installer_phase_servicecatalog: "Complete" - aggregate: false + installer_phase_servicecatalog: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml deleted file mode 100644 index 090ad6445..000000000 --- a/playbooks/common/openshift-cluster/std_include.yml +++ /dev/null @@ -1,48 +0,0 @@ ---- -- name: Initialization Checkpoint Start - hosts: localhost - connection: local - gather_facts: false - roles: - - installer_checkpoint - tasks: - - name: Set install initialization 'In Progress' - set_stats: - data: - installer_phase_initialize: "In Progress" - aggregate: false - -- include: evaluate_groups.yml - tags: - - always - -- include: initialize_facts.yml - tags: - - always - -- include: sanity_checks.yml - tags: - - always - -- include: validate_hostnames.yml - tags: - - node - -- include: initialize_openshift_repos.yml - tags: - - always - -- include: initialize_openshift_version.yml - tags: - - always - -- name: Initialization Checkpoint End - hosts: localhost - connection: local - gather_facts: false - tasks: - - name: Set install initialization 'Complete' - set_stats: - data: - installer_phase_initialize: "Complete" - aggregate: false diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml index 98953f72e..6d4ddf011 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -1,5 +1,5 @@ --- -- include: ../../evaluate_groups.yml +- include: ../../../../init/evaluate_groups.yml vars: # Do not allow adding hosts during upgrade. g_new_master_hosts: [] diff --git a/playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml b/playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml new file mode 100644 index 000000000..9c9c260fb --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: v1 +kind: Role +metadata: + name: shared-resource-viewer + namespace: openshift +rules: +- apiGroups: + - "" + - template.openshift.io + attributeRestrictions: null + resources: + - templates + verbs: + - get + - list + - watch +- apiGroups: + - "" + - image.openshift.io + attributeRestrictions: null + resources: + - imagestreamimages + - imagestreams + - imagestreamtags + verbs: + - get + - list + - watch +- apiGroups: + - "" + - image.openshift.io + attributeRestrictions: null + resources: + - imagestreams/layers + verbs: + - get diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml index 2826951e6..9981d905b 100644 --- a/playbooks/common/openshift-cluster/upgrades/init.yml +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -1,15 +1,20 @@ --- -- include: ../evaluate_groups.yml +- include: ../../../init/evaluate_groups.yml vars: # Do not allow adding hosts during upgrade. g_new_master_hosts: [] g_new_node_hosts: [] -- include: ../initialize_facts.yml +- include: ../../../init/facts.yml - name: Ensure firewall is not switched during upgrade hosts: oo_all_hosts + vars: + openshift_master_installed_version: "{{ hostvars[groups.oo_first_master.0].openshift.common.version }}" tasks: + - name: set currently installed version + set_fact: + openshift_currently_installed_version: "{{ openshift_master_installed_version }}" - name: Check if iptables is running command: systemctl status iptables changed_when: false diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml index 72de63070..fc1cbf32a 100644 --- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml @@ -30,6 +30,7 @@ ansible_become: "{{ g_sudo | default(omit) }}" with_items: " {{ groups['oo_nodes_to_config'] }}" when: + - hostvars[item].openshift is defined - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list changed_when: false diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index 07e521a89..c634e0ab8 100644 --- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -85,6 +85,8 @@ roles: - openshift_manageiq + - role: openshift_project_request_template + when: openshift_project_request_template_manage # Create the new templates shipped in 3.2, existing templates are left # unmodified. This prevents the subsequent role definition for # openshift_examples from failing when trying to replace templates that do @@ -103,14 +105,20 @@ openshift_hosted_templates_import_command: replace # Check for warnings to be printed at the end of the upgrade: -- name: Check for warnings +- name: Clean up and display warnings hosts: oo_masters_to_config - tasks: + tags: + - always + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: enable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" + post_tasks: # Check if any masters are using pluginOrderOverride and warn if so, only for 1.3/3.3 and beyond: - name: grep pluginOrderOverride command: grep pluginOrderOverride {{ openshift.common.config_base }}/master/master-config.yaml register: grep_plugin_order_override - when: openshift.common.version_gte_3_3_or_1_3 | bool changed_when: false failed_when: false @@ -121,12 +129,8 @@ - not grep_plugin_order_override | skipped - grep_plugin_order_override.rc == 0 -- name: Re-enable excluder if it was previously enabled - hosts: oo_masters_to_config - tags: - - always - gather_facts: no - roles: - - role: openshift_excluder - r_openshift_excluder_action: enable - r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" + - name: Warn if shared-resource-viewer could not be updated + debug: + msg: "WARNING the shared-resource-viewer role could not be upgraded to 3.6 spec because it's marked protected, please see https://bugzilla.redhat.com/show_bug.cgi?id=1493213" + when: + - __shared_resource_viewer_protected | default(false) diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml index 45022cd61..6a5bc24f7 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml @@ -9,16 +9,29 @@ local_facts: ha: "{{ groups.oo_masters_to_config | length > 1 }}" - - name: Ensure HA Master is running - service: - name: "{{ openshift.common.service_type }}-master-api" - state: started - enabled: yes - when: openshift.common.is_containerized | bool + - when: openshift.common.is_containerized | bool + block: + - set_fact: + master_services: + - "{{ openshift.common.service_type }}-master" - - name: Ensure HA Master is running - service: - name: "{{ openshift.common.service_type }}-master-controllers" - state: started - enabled: yes - when: openshift.common.is_containerized | bool + # In case of the non-ha to ha upgrade. + - name: Check if the {{ openshift.common.service_type }}-master-api.service exists + command: > + systemctl list-units {{ openshift.common.service_type }}-master-api.service --no-legend + register: master_api_service_status + + - set_fact: + master_services: + - "{{ openshift.common.service_type }}-master-api" + - "{{ openshift.common.service_type }}-master-controllers" + when: + - master_api_service_status.stdout_lines | length > 0 + - (openshift.common.service_type + '-master-api.service') in master_api_service_status.stdout_lines[0] + + - name: Ensure Master is running + service: + name: "{{ item }}" + state: started + enabled: yes + with_items: "{{ master_services }}" diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml index ad6325ca0..2a8de50a2 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml @@ -1,12 +1,14 @@ --- -- name: Verify Host Requirements +- name: OpenShift Health Checks hosts: oo_all_hosts + any_errors_fatal: true roles: - openshift_health_checker vars: - r_openshift_health_checker_playbook_context: upgrade post_tasks: - - action: openshift_health_check + - name: Run health checks (upgrade) + action: openshift_health_check args: checks: - disk_availability diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml index 142ce5f3d..13fa37b09 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -4,6 +4,12 @@ msg: Verify OpenShift is already installed when: openshift.common.version is not defined +- name: Update oreg_auth docker login credentials if necessary + include_role: + name: docker + tasks_from: registry_auth.yml + when: oreg_auth_user is defined + - name: Verify containers are available for upgrade command: > docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }} @@ -37,7 +43,7 @@ fail: msg: "OpenShift {{ avail_openshift_version }} is available, but {{ openshift_upgrade_target }} or greater is required" when: - - openshift_pkg_version | default('0.0', True) | version_compare(openshift_release, '<') + - (openshift_pkg_version | default('-0.0', True)).split('-')[1] | version_compare(openshift_release, '<') - name: Fail when openshift version does not meet minium requirement for Origin upgrade fail: diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml deleted file mode 100644 index 8cc46ab68..000000000 --- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -# When we update package "a-${version}" and a requires b >= ${version} if we -# don't specify the version of b yum will choose the latest version of b -# available and the whole set of dependencies end up at the latest version. -# Since the package module, unlike the yum module, doesn't flatten a list -# of packages into one transaction we need to do that explicitly. The ansible -# core team tells us not to rely on yum module transaction flattening anyway. - -# TODO: If the sdn package isn't already installed this will install it, we -# should fix that -- name: Upgrade master packages - package: name={{ master_pkgs | join(',') }} state=present - vars: - master_pkgs: - - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" - - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - - PyYAML - when: - - component == "master" - - not openshift.common.is_atomic | bool - -- name: Upgrade node packages - package: name={{ node_pkgs | join(',') }} state=present - vars: - node_pkgs: - - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" - - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - - PyYAML - when: - - component == "node" - - not openshift.common.is_atomic | bool diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index da47491c1..fa65567c2 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -3,22 +3,6 @@ # Upgrade Masters ############################################################################### -# oc adm migrate storage should be run prior to etcd v3 upgrade -# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060 -- name: Pre master upgrade - Upgrade all storage - hosts: oo_first_master - tasks: - - name: Upgrade all storage - command: > - {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig - migrate storage --include=* --confirm - register: l_pb_upgrade_control_plane_pre_upgrade_storage - when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool - failed_when: - - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool - - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0 - - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool - # If facts cache were for some reason deleted, this fact may not be set, and if not set # it will always default to true. This causes problems for the etcd data dir fact detection # so we must first make sure this is set correctly before attempting the backup. @@ -31,7 +15,6 @@ role: master local_facts: embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" - debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level | default(2)) }}" - name: Upgrade and backup etcd include: ./etcd/main.yml @@ -49,6 +32,22 @@ - include: create_service_signer_cert.yml +# oc adm migrate storage should be run prior to etcd v3 upgrade +# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060 +- name: Pre master upgrade - Upgrade all storage + hosts: oo_first_master + tasks: + - name: Upgrade all storage + command: > + {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig + migrate storage --include=* --confirm + register: l_pb_upgrade_control_plane_pre_upgrade_storage + when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool + failed_when: + - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool + - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0 + - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool + # Set openshift_master_facts separately. In order to reconcile # admission_config's, we currently must run openshift_master_facts and # then run openshift_facts. @@ -64,13 +63,9 @@ vars: openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" serial: 1 - handlers: - - include: ../../../../roles/openshift_master/handlers/main.yml - static: yes - roles: - - openshift_facts - - lib_utils - post_tasks: + tasks: + - include_role: + name: openshift_facts # Run the pre-upgrade hook if defined: - debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}" @@ -79,52 +74,9 @@ - include: "{{ openshift_master_upgrade_pre_hook }}" when: openshift_master_upgrade_pre_hook is defined - - include: rpm_upgrade.yml component=master - when: not openshift.common.is_containerized | bool - - - include_vars: ../../../../roles/openshift_master_facts/vars/main.yml - - - include: upgrade_scheduler.yml - - - include: "{{ master_config_hook }}" - when: master_config_hook is defined - - - include_vars: ../../../../roles/openshift_master/vars/main.yml - - - name: Remove any legacy systemd units and update systemd units - include: ../../../../roles/openshift_master/tasks/systemd_units.yml - - - name: Check for ca-bundle.crt - stat: - path: "{{ openshift.common.config_base }}/master/ca-bundle.crt" - register: ca_bundle_stat - failed_when: false - - - name: Check for ca.crt - stat: - path: "{{ openshift.common.config_base }}/master/ca.crt" - register: ca_crt_stat - failed_when: false - - - name: Migrate ca.crt to ca-bundle.crt - command: mv ca.crt ca-bundle.crt - args: - chdir: "{{ openshift.common.config_base }}/master" - when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists - - - name: Link ca.crt to ca-bundle.crt - file: - src: "{{ openshift.common.config_base }}/master/ca-bundle.crt" - path: "{{ openshift.common.config_base }}/master/ca.crt" - state: link - when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists - - - name: Update oreg value - yedit: - src: "{{ openshift.common.config_base }}/master/master-config.yaml" - key: 'imageConfig.format' - value: "{{ oreg_url | default(oreg_url_master) }}" - when: oreg_url is defined or oreg_url_master is defined + - include_role: + name: openshift_master + tasks_from: upgrade.yml # Run the upgrade hook prior to restarting services/system if defined: - debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}" @@ -151,7 +103,9 @@ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig migrate storage --include=clusterpolicies --confirm register: l_pb_upgrade_control_plane_post_upgrade_storage - when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool + when: + - openshift_upgrade_post_storage_migration_enabled | default(true) | bool + - openshift_version | version_compare('3.7','<') failed_when: - openshift_upgrade_post_storage_migration_enabled | default(true) | bool - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 @@ -193,13 +147,14 @@ # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe # restart. skip_docker_role: True + __master_shared_resource_viewer_file: "shared_resource_viewer_role.yaml" tasks: - name: Reconcile Cluster Roles command: > {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-roles --additive-only=true --confirm -o name register: reconcile_cluster_role_result - when: not openshift.common.version_gte_3_7 | bool + when: openshift_version | version_compare('3.7','<') changed_when: - reconcile_cluster_role_result.stdout != '' - reconcile_cluster_role_result.rc == 0 @@ -214,7 +169,7 @@ --exclude-groups=system:unauthenticated --exclude-users=system:anonymous --additive-only=true --confirm -o name - when: not openshift.common.version_gte_3_7 | bool + when: openshift_version | version_compare('3.7','<') register: reconcile_bindings_result changed_when: - reconcile_bindings_result.stdout != '' @@ -229,7 +184,51 @@ changed_when: - reconcile_jenkins_role_binding_result.stdout != '' - reconcile_jenkins_role_binding_result.rc == 0 - when: (not openshift.common.version_gte_3_7 | bool) and (openshift.common.version_gte_3_4_or_1_4 | bool) + when: + - openshift_version | version_compare('3.7','<') + + - when: openshift_upgrade_target | version_compare('3.7','<') + block: + - name: Retrieve shared-resource-viewer + oc_obj: + state: list + kind: role + name: "shared-resource-viewer" + namespace: "openshift" + register: objout + + - name: Determine if shared-resource-viewer is protected + set_fact: + __shared_resource_viewer_protected: true + when: + - "'results' in objout" + - "'results' in objout['results']" + - "'annotations' in objout['results']['results'][0]['metadata']" + - "'openshift.io/reconcile-protect' in objout['results']['results'][0]['metadata']['annotations']" + - "objout['results']['results'][0]['metadata']['annotations']['openshift.io/reconcile-protect'] == 'true'" + - copy: + src: "{{ item }}" + dest: "/tmp/{{ item }}" + with_items: + - "{{ __master_shared_resource_viewer_file }}" + when: __shared_resource_viewer_protected is not defined + + - name: Fixup shared-resource-viewer role + oc_obj: + state: present + kind: role + name: "shared-resource-viewer" + namespace: "openshift" + files: + - "/tmp/{{ __master_shared_resource_viewer_file }}" + delete_after: true + when: __shared_resource_viewer_protected is not defined + register: result + retries: 3 + delay: 5 + until: result.rc == 0 + ignore_errors: true + - name: Reconcile Security Context Constraints command: > @@ -318,13 +317,13 @@ delay: 60 roles: - - lib_openshift - openshift_facts - - docker - - openshift_node_dnsmasq - - openshift_node_upgrade - post_tasks: + - include_role: + name: openshift_node + tasks_from: upgrade.yml + vars: + openshift_node_upgrade_in_progress: True - name: Set node schedulability oc_adm_manage_node: node: "{{ openshift.node.nodename | lower }}" diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index c93a5d89c..5dc8193a7 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -34,16 +34,18 @@ delay: 60 roles: - - lib_openshift - openshift_facts - - docker - - openshift_node_dnsmasq - - openshift_node_upgrade - - role: openshift_excluder - r_openshift_excluder_action: enable - r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" - post_tasks: + - include_role: + name: openshift_node + tasks_from: upgrade.yml + vars: + openshift_node_upgrade_in_progress: True + - include_role: + name: openshift_excluder + vars: + r_openshift_excluder_action: enable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" - name: Set node schedulability oc_adm_manage_node: node: "{{ openshift.node.nodename | lower }}" diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml deleted file mode 100644 index 8558bf3e9..000000000 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml +++ /dev/null @@ -1,173 +0,0 @@ ---- -# Upgrade predicates -- vars: - prev_predicates: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}" - prev_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, regions_enabled=False) }}" - default_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', regions_enabled=False) }}" - # older_predicates are the set of predicates that have previously been - # hard-coded into openshift_facts - older_predicates: - - - name: MatchNodeSelector - - name: PodFitsResources - - name: PodFitsPorts - - name: NoDiskConflict - - name: NoVolumeZoneConflict - - name: MaxEBSVolumeCount - - name: MaxGCEPDVolumeCount - - name: Region - argument: - serviceAffinity: - labels: - - region - - - name: MatchNodeSelector - - name: PodFitsResources - - name: PodFitsPorts - - name: NoDiskConflict - - name: NoVolumeZoneConflict - - name: Region - argument: - serviceAffinity: - labels: - - region - - - name: MatchNodeSelector - - name: PodFitsResources - - name: PodFitsPorts - - name: NoDiskConflict - - name: Region - argument: - serviceAffinity: - labels: - - region - # older_predicates_no_region are the set of predicates that have previously - # been hard-coded into openshift_facts, with the Region predicate removed - older_predicates_no_region: - - - name: MatchNodeSelector - - name: PodFitsResources - - name: PodFitsPorts - - name: NoDiskConflict - - name: NoVolumeZoneConflict - - name: MaxEBSVolumeCount - - name: MaxGCEPDVolumeCount - - - name: MatchNodeSelector - - name: PodFitsResources - - name: PodFitsPorts - - name: NoDiskConflict - - name: NoVolumeZoneConflict - - - name: MatchNodeSelector - - name: PodFitsResources - - name: PodFitsPorts - - name: NoDiskConflict - block: - - # Handle case where openshift_master_predicates is defined - - block: - - debug: - msg: "WARNING: openshift_master_scheduler_predicates is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_predicates }}" - when: openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region] - - - debug: - msg: "WARNING: openshift_master_scheduler_predicates does not match current defaults of: {{ openshift_master_scheduler_default_predicates }}" - when: openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates - when: openshift_master_scheduler_predicates | default(none) is not none - - # Handle cases where openshift_master_predicates is not defined - - block: - - debug: - msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler predicates: {{ openshift_master_scheduler_current_predicates }}\ncurrent scheduler default predicates are: {{ openshift_master_scheduler_default_predicates }}" - when: - - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates - - openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates] - - - set_fact: - openshift_upgrade_scheduler_predicates: "{{ openshift_master_scheduler_default_predicates }}" - when: - - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates - - openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates] - - - set_fact: - openshift_upgrade_scheduler_predicates: "{{ default_predicates_no_region }}" - when: - - openshift_master_scheduler_current_predicates != default_predicates_no_region - - openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region] - - when: openshift_master_scheduler_predicates | default(none) is none - - -# Upgrade priorities -- vars: - prev_priorities: "{{ lookup('openshift_master_facts_default_priorities', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}" - prev_priorities_no_zone: "{{ lookup('openshift_master_facts_default_priorities', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, zones_enabled=False) }}" - default_priorities_no_zone: "{{ lookup('openshift_master_facts_default_priorities', zones_enabled=False) }}" - # older_priorities are the set of priorities that have previously been - # hard-coded into openshift_facts - older_priorities: - - - name: LeastRequestedPriority - weight: 1 - - name: SelectorSpreadPriority - weight: 1 - - name: Zone - weight: 2 - argument: - serviceAntiAffinity: - label: zone - # older_priorities_no_region are the set of priorities that have previously - # been hard-coded into openshift_facts, with the Zone priority removed - older_priorities_no_zone: - - - name: LeastRequestedPriority - weight: 1 - - name: SelectorSpreadPriority - weight: 1 - block: - - # Handle case where openshift_master_priorities is defined - - block: - - debug: - msg: "WARNING: openshift_master_scheduler_priorities is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_priorities }}" - when: openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone] - - - debug: - msg: "WARNING: openshift_master_scheduler_priorities does not match current defaults of: {{ openshift_master_scheduler_default_priorities }}" - when: openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities - when: openshift_master_scheduler_priorities | default(none) is not none - - # Handle cases where openshift_master_priorities is not defined - - block: - - debug: - msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler priorities: {{ openshift_master_scheduler_current_priorities }}\ncurrent scheduler default priorities are: {{ openshift_master_scheduler_default_priorities }}" - when: - - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities - - openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities] - - - set_fact: - openshift_upgrade_scheduler_priorities: "{{ openshift_master_scheduler_default_priorities }}" - when: - - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities - - openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities] - - - set_fact: - openshift_upgrade_scheduler_priorities: "{{ default_priorities_no_zone }}" - when: - - openshift_master_scheduler_current_priorities != default_priorities_no_zone - - openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone] - - when: openshift_master_scheduler_priorities | default(none) is none - - -# Update scheduler -- vars: - scheduler_config: - kind: Policy - apiVersion: v1 - predicates: "{{ openshift_upgrade_scheduler_predicates - | default(openshift_master_scheduler_current_predicates) }}" - priorities: "{{ openshift_upgrade_scheduler_priorities - | default(openshift_master_scheduler_current_priorities) }}" - block: - - name: Update scheduler config - copy: - content: "{{ scheduler_config | to_nice_json }}" - dest: "{{ openshift_master_scheduler_conf }}" - backup: true - when: > - openshift_upgrade_scheduler_predicates is defined or - openshift_upgrade_scheduler_priorities is defined diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml deleted file mode 100644 index d69472fad..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml +++ /dev/null @@ -1,66 +0,0 @@ ---- -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.acceptContentTypes' - yaml_value: 'application/vnd.kubernetes.protobuf,application/json' - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.contentType' - yaml_value: 'application/vnd.kubernetes.protobuf' - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.burst' - yaml_value: 400 - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.qps' - yaml_value: 200 - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.acceptContentTypes' - yaml_value: 'application/vnd.kubernetes.protobuf,application/json' - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.contentType' - yaml_value: 'application/vnd.kubernetes.protobuf' - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.burst' - yaml_value: 600 - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.qps' - yaml_value: 300 - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'controllerConfig.servicesServingCert.signer.certFile' - yaml_value: service-signer.crt - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'controllerConfig.servicesServingCert.signer.keyFile' - yaml_value: service-signer.key - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'admissionConfig.pluginConfig' - yaml_value: "{{ openshift.master.admission_plugin_config }}" - when: "'admission_plugin_config' in openshift.master" - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'admissionConfig.pluginOrderOverride' - yaml_value: - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'kubernetesMasterConfig.admissionConfig' - yaml_value: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml deleted file mode 100644 index 89b524f14..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- modify_yaml: - dest: "{{ openshift.common.config_base}}/node/node-config.yaml" - yaml_key: 'masterClientConnectionOverrides.acceptContentTypes' - yaml_value: 'application/vnd.kubernetes.protobuf,application/json' - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/node/node-config.yaml" - yaml_key: 'masterClientConnectionOverrides.contentType' - yaml_value: 'application/vnd.kubernetes.protobuf' - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/node/node-config.yaml" - yaml_key: 'masterClientConnectionOverrides.burst' - yaml_value: 40 - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/node/node-config.yaml" - yaml_key: 'masterClientConnectionOverrides.qps' - yaml_value: 20 diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/roles b/playbooks/common/openshift-cluster/upgrades/v3_3/roles deleted file mode 120000 index 6bc1a7aef..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/roles +++ /dev/null @@ -1 +0,0 @@ -../../../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml deleted file mode 100644 index a241ef039..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml +++ /dev/null @@ -1,118 +0,0 @@ ---- -# -# Full Control Plane + Nodes Upgrade -# -- include: ../init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" - -# Pre-upgrade - -- include: ../initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos and initialize facts on all hosts - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../disable_master_excluders.yml - tags: - - pre_upgrade - -- include: ../disable_node_excluders.yml - tags: - - pre_upgrade - -- include: ../../initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- name: Verify upgrade targets - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tasks: - - include: ../pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- name: Verify docker upgrade targets - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - include: ../pre/tasks/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - include: ../cleanup_unused_images.yml - -- include: ../upgrade_control_plane.yml - vars: - master_config_hook: "v3_3/master_config_upgrade.yml" - -- include: ../upgrade_nodes.yml - vars: - node_config_hook: "v3_3/node_config_upgrade.yml" - -- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml deleted file mode 100644 index f64f0e003..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml +++ /dev/null @@ -1,119 +0,0 @@ ---- -# -# Control Plane Upgrade Playbook -# -# Upgrades masters and Docker (only on standalone etcd hosts) -# -# This upgrade does not include: -# - node service running on masters -# - docker running on masters -# - node service running on dedicated nodes -# -# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. -# -- include: ../init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" - -# Pre-upgrade -- include: ../initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on control plane hosts - hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../disable_master_excluders.yml - tags: - - pre_upgrade - -- include: ../../initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - openshift_install_base_package_group: "oo_masters_to_config" - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- name: Verify upgrade targets - hosts: oo_masters_to_config - tasks: - - include: ../pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- name: Verify docker upgrade targets - hosts: oo_masters_to_config:oo_etcd_to_config - tasks: - - include: ../pre/tasks/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_etcd_to_config - tasks: - - include: ../cleanup_unused_images.yml - -- include: ../upgrade_control_plane.yml - vars: - master_config_hook: "v3_3/master_config_upgrade.yml" - -- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml deleted file mode 100644 index cee4e9087..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml +++ /dev/null @@ -1,113 +0,0 @@ ---- -# -# Node Upgrade Playbook -# -# Upgrades nodes only, but requires the control plane to have already been upgraded. -# -- include: ../init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" - -# Pre-upgrade -- include: ../initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on nodes - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - roles: - - openshift_repos - tags: - - pre_upgrade - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../disable_node_excluders.yml - tags: - - pre_upgrade - -- include: ../../initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- name: Verify masters are already upgraded - hosts: oo_masters_to_config - tags: - - pre_upgrade - tasks: - - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." - when: openshift.common.version != openshift_version - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- name: Verify upgrade targets - hosts: oo_nodes_to_upgrade - tasks: - - include: ../pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- name: Verify docker upgrade targets - hosts: oo_nodes_to_upgrade - tasks: - - include: ../pre/tasks/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_nodes_to_upgrade - tasks: - - include: ../cleanup_unused_images.yml - -- include: ../upgrade_nodes.yml - vars: - node_config_hook: "v3_3/node_config_upgrade.yml" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml deleted file mode 100644 index ed89dbe8d..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'admissionConfig.pluginConfig' - yaml_value: "{{ openshift.master.admission_plugin_config }}" - when: "'admission_plugin_config' in openshift.master" - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'admissionConfig.pluginOrderOverride' - yaml_value: - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'kubernetesMasterConfig.admissionConfig' - yaml_value: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/roles b/playbooks/common/openshift-cluster/upgrades/v3_4/roles deleted file mode 120000 index 6bc1a7aef..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/roles +++ /dev/null @@ -1 +0,0 @@ -../../../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml deleted file mode 100644 index ae217ba2e..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml +++ /dev/null @@ -1,116 +0,0 @@ ---- -# -# Full Control Plane + Nodes Upgrade -# -- include: ../init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - -# Pre-upgrade - -- include: ../initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos and initialize facts on all hosts - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../disable_master_excluders.yml - tags: - - pre_upgrade - -- include: ../disable_node_excluders.yml - tags: - - pre_upgrade - -- include: ../../initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- name: Verify upgrade targets - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tasks: - - include: ../pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- name: Verify docker upgrade targets - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - include: ../pre/tasks/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - include: ../cleanup_unused_images.yml - -- include: ../upgrade_control_plane.yml - vars: - master_config_hook: "v3_4/master_config_upgrade.yml" - -- include: ../upgrade_nodes.yml - -- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml deleted file mode 100644 index 43da5b629..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml +++ /dev/null @@ -1,119 +0,0 @@ ---- -# -# Control Plane Upgrade Playbook -# -# Upgrades masters and Docker (only on standalone etcd hosts) -# -# This upgrade does not include: -# - node service running on masters -# - docker running on masters -# - node service running on dedicated nodes -# -# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. -# -- include: ../init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - -# Pre-upgrade -- include: ../initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on control plane hosts - hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../disable_master_excluders.yml - tags: - - pre_upgrade - -- include: ../../initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - openshift_install_base_package_group: "oo_masters_to_config" - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- name: Verify upgrade targets - hosts: oo_masters_to_config - tasks: - - include: ../pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- name: Verify docker upgrade targets - hosts: oo_masters_to_config:oo_etcd_to_config - tasks: - - include: ../pre/tasks/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_etcd_to_config - tasks: - - include: ../cleanup_unused_images.yml - -- include: ../upgrade_control_plane.yml - vars: - master_config_hook: "v3_4/master_config_upgrade.yml" - -- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml deleted file mode 100644 index 8531e6045..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml +++ /dev/null @@ -1,111 +0,0 @@ ---- -# -# Node Upgrade Playbook -# -# Upgrades nodes only, but requires the control plane to have already been upgraded. -# -- include: ../init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - -# Pre-upgrade -- include: ../initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on nodes - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - roles: - - openshift_repos - tags: - - pre_upgrade - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../disable_node_excluders.yml - tags: - - pre_upgrade - -- include: ../../initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- name: Verify masters are already upgraded - hosts: oo_masters_to_config - tags: - - pre_upgrade - tasks: - - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." - when: openshift.common.version != openshift_version - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- name: Verify upgrade targets - hosts: oo_nodes_to_upgrade - tasks: - - include: ../pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- name: Verify docker upgrade targets - hosts: oo_nodes_to_upgrade - tasks: - - include: ../pre/tasks/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_nodes_to_upgrade - tasks: - - include: ../cleanup_unused_images.yml - -- include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml deleted file mode 100644 index ed89dbe8d..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'admissionConfig.pluginConfig' - yaml_value: "{{ openshift.master.admission_plugin_config }}" - when: "'admission_plugin_config' in openshift.master" - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'admissionConfig.pluginOrderOverride' - yaml_value: - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'kubernetesMasterConfig.admissionConfig' - yaml_value: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml deleted file mode 100644 index ae63c9ca9..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml +++ /dev/null @@ -1,67 +0,0 @@ ---- -############################################################################### -# Pre upgrade checks for known data problems, if this playbook fails you should -# contact support. If you're not supported contact users@lists.openshift.com -# -# oc_objectvalidator provides these two checks -# 1 - SDN Data issues, never seen in the wild but known possible due to code audits -# https://github.com/openshift/origin/issues/12697 -# 2 - Namespace protections, https://bugzilla.redhat.com/show_bug.cgi?id=1428934 -# -############################################################################### -- name: Verify 3.5 specific upgrade checks - hosts: oo_first_master - roles: - - { role: lib_openshift } - tasks: - - name: Check for invalid namespaces and SDN errors - oc_objectvalidator: - - # What's all this PetSet business about? - # - # 'PetSets' were ALPHA resources in Kube <= 3.4. In >= 3.5 they are - # no longer supported. The BETA resource 'StatefulSets' replaces - # them. We can't migrate clients PetSets to - # StatefulSets. Additionally, Red Hat has never officially supported - # these resource types. Sorry users, but if you were using - # unsupported resources from the Kube documentation then we can't - # help you at this time. - # - # Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1428229 - - name: Check if legacy PetSets exist - oc_obj: - state: list - all_namespaces: true - kind: petsets - register: l_do_petsets_exist - - - name: Fail on unsupported resource migration 'PetSets' - fail: - msg: > - PetSet objects were detected in your cluster. These are an - Alpha feature in upstream Kubernetes 1.4 and are not supported - by Red Hat. In Kubernetes 1.5, they are replaced by the Beta - feature StatefulSets. Red Hat currently does not offer support - for either PetSets or StatefulSets. - - Automatically migrating PetSets to StatefulSets in OpenShift - Container Platform (OCP) 3.5 is not supported. See the - Kubernetes "Upgrading from PetSets to StatefulSets" - documentation for additional information: - - https://kubernetes.io/docs/tasks/manage-stateful-set/upgrade-pet-set-to-stateful-set/ - - PetSets MUST be removed before upgrading to OCP 3.5. Red Hat - strongly recommends reading the above referenced documentation - in its entirety before taking any destructive actions. - - If you want to simply remove all PetSets without manually - migrating to StatefulSets, run this command as a user with - cluster-admin privileges: - - $ oc get petsets --all-namespaces -o yaml | oc delete -f - --cascade=false - when: - # Search did not fail, valid resource type found - - l_do_petsets_exist.results.returncode == 0 - # Items do exist in the search results - - l_do_petsets_exist.results.results.0['items'] | length > 0 diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml deleted file mode 100644 index ed89dbe8d..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'admissionConfig.pluginConfig' - yaml_value: "{{ openshift.master.admission_plugin_config }}" - when: "'admission_plugin_config' in openshift.master" - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'admissionConfig.pluginOrderOverride' - yaml_value: - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'kubernetesMasterConfig.admissionConfig' - yaml_value: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml index 920dc2ffc..ef52f214b 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -63,7 +63,7 @@ tags: - pre_upgrade -- include: ../../initialize_openshift_version.yml +- include: ../../../../init/version.yml tags: - pre_upgrade vars: @@ -116,6 +116,8 @@ - include: ../cleanup_unused_images.yml - include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_6/master_config_upgrade.yml" - include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index 27d8515dc..4c6646a38 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -67,7 +67,7 @@ tags: - pre_upgrade -- include: ../../initialize_openshift_version.yml +- include: ../../../../init/version.yml tags: - pre_upgrade vars: @@ -76,7 +76,6 @@ # defined, and overriding the normal behavior of protecting the installed version openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - openshift_install_base_package_group: "oo_masters_to_config" # We skip the docker role at this point in upgrade to prevent # unintended package, container, or config upgrades which trigger diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml index ba6fcc3f8..f25cfe0d0 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml @@ -56,7 +56,7 @@ tags: - pre_upgrade -- include: ../../initialize_openshift_version.yml +- include: ../../../../init/version.yml tags: - pre_upgrade vars: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml deleted file mode 100644 index ed89dbe8d..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'admissionConfig.pluginConfig' - yaml_value: "{{ openshift.master.admission_plugin_config }}" - when: "'admission_plugin_config' in openshift.master" - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'admissionConfig.pluginOrderOverride' - yaml_value: - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'kubernetesMasterConfig.admissionConfig' - yaml_value: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml index f1ca1edb9..e3c012380 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml @@ -67,7 +67,7 @@ tags: - pre_upgrade -- include: ../../initialize_openshift_version.yml +- include: ../../../../init/version.yml tags: - pre_upgrade vars: @@ -120,6 +120,22 @@ - include: ../cleanup_unused_images.yml - include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_7/master_config_upgrade.yml" + +# All controllers must be stopped at the same time then restarted +- name: Cycle all controller services to force new leader election mode + hosts: oo_masters_to_config + gather_facts: no + tasks: + - name: Stop {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: stopped + - name: Start {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: started - include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml index 6c4f9671b..a88fa7b2e 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -71,7 +71,7 @@ tags: - pre_upgrade -- include: ../../initialize_openshift_version.yml +- include: ../../../../init/version.yml tags: - pre_upgrade vars: @@ -80,7 +80,6 @@ # defined, and overriding the normal behavior of protecting the installed version openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - openshift_install_base_package_group: "oo_masters_to_config" # We skip the docker role at this point in upgrade to prevent # unintended package, container, or config upgrades which trigger @@ -128,4 +127,18 @@ vars: master_config_hook: "v3_7/master_config_upgrade.yml" +# All controllers must be stopped at the same time then restarted +- name: Cycle all controller services to force new leader election mode + hosts: oo_masters_to_config + gather_facts: no + tasks: + - name: Stop {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: stopped + - name: Start {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: started + - include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml index bc080f9a3..c0546bd2d 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml @@ -56,7 +56,7 @@ tags: - pre_upgrade -- include: ../../initialize_openshift_version.yml +- include: ../../../../init/version.yml tags: - pre_upgrade vars: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml index f76fc68d1..74d0cd8ad 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml @@ -11,13 +11,15 @@ tasks: - name: Check for invalid namespaces and SDN errors oc_objectvalidator: - + # DO NOT DISABLE THIS, YOUR UPGRADE WILL FAIL IF YOU DO SO - name: Confirm OpenShift authorization objects are in sync command: > {{ openshift.common.client_binary }} adm migrate authorization - when: not openshift.common.version_gte_3_7 | bool + when: + - openshift_currently_installed_version | version_compare('3.7','<') + - openshift_upgrade_pre_authorization_migration_enabled | default(true) | bool changed_when: false register: l_oc_result until: l_oc_result.rc == 0 - retries: 4 + retries: 2 delay: 15 diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_8/filter_plugins index 7de3c1dd7..7de3c1dd7 120000 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/filter_plugins +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/filter_plugins diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/master_config_upgrade.yml new file mode 100644 index 000000000..1d4d1919c --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/master_config_upgrade.yml @@ -0,0 +1,20 @@ +--- +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'controllerConfig.election.lockName' + yaml_value: 'openshift-master-controllers' + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'controllerConfig.serviceServingCert.signer.certFile' + yaml_value: service-signer.crt + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile' + yaml_value: service-signer.key + +- modify_yaml: + dest: "{{ openshift.common.config_base }}/master/master-config.yaml" + yaml_key: servingInfo.clientCA + yaml_value: ca.crt diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/roles b/playbooks/common/openshift-cluster/upgrades/v3_8/roles index 415645be6..415645be6 120000 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/roles +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/roles diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml index 30e719d8f..73df15d53 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml @@ -12,8 +12,8 @@ - pre_upgrade tasks: - set_fact: - openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_target: '3.8' + openshift_upgrade_min: '3.7' # Pre-upgrade @@ -21,6 +21,10 @@ tags: - pre_upgrade +- include: ../pre/verify_etcd3_backend.yml + tags: + - pre_upgrade + - name: Update repos and initialize facts on all hosts hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config tags: @@ -47,6 +51,10 @@ tags: - pre_upgrade +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + - include: ../pre/verify_control_plane_running.yml tags: - pre_upgrade @@ -59,7 +67,7 @@ tags: - pre_upgrade -- include: ../../initialize_openshift_version.yml +- include: ../../../../init/version.yml tags: - pre_upgrade vars: @@ -112,6 +120,22 @@ - include: ../cleanup_unused_images.yml - include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_7/master_config_upgrade.yml" + +# All controllers must be stopped at the same time then restarted +- name: Cycle all controller services to force new leader election mode + hosts: oo_masters_to_config + gather_facts: no + tasks: + - name: Stop {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: stopped + - name: Start {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: started - include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml index e9cec9220..48d55c16f 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml @@ -21,14 +21,18 @@ - pre_upgrade tasks: - set_fact: - openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_target: '3.8' + openshift_upgrade_min: '3.7' # Pre-upgrade - include: ../initialize_nodes_to_upgrade.yml tags: - pre_upgrade +- include: ../pre/verify_etcd3_backend.yml + tags: + - pre_upgrade + - name: Update repos on control plane hosts hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config tags: @@ -55,6 +59,10 @@ tags: - pre_upgrade +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + - include: ../pre/verify_control_plane_running.yml tags: - pre_upgrade @@ -63,7 +71,7 @@ tags: - pre_upgrade -- include: ../../initialize_openshift_version.yml +- include: ../../../../init/version.yml tags: - pre_upgrade vars: @@ -72,7 +80,6 @@ # defined, and overriding the normal behavior of protecting the installed version openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - openshift_install_base_package_group: "oo_masters_to_config" # We skip the docker role at this point in upgrade to prevent # unintended package, container, or config upgrades which trigger @@ -118,6 +125,20 @@ - include: ../upgrade_control_plane.yml vars: - master_config_hook: "v3_5/master_config_upgrade.yml" + master_config_hook: "v3_7/master_config_upgrade.yml" + +# All controllers must be stopped at the same time then restarted +- name: Cycle all controller services to force new leader election mode + hosts: oo_masters_to_config + gather_facts: no + tasks: + - name: Stop {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: stopped + - name: Start {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: started - include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml index e29d0f8e6..abd56e762 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml @@ -14,8 +14,8 @@ - pre_upgrade tasks: - set_fact: - openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_target: '3.8' + openshift_upgrade_min: '3.7' # Pre-upgrade - include: ../initialize_nodes_to_upgrade.yml @@ -48,11 +48,15 @@ tags: - pre_upgrade +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + - include: ../disable_node_excluders.yml tags: - pre_upgrade -- include: ../../initialize_openshift_version.yml +- include: ../../../../init/version.yml tags: - pre_upgrade vars: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/validator.yml new file mode 100644 index 000000000..d8540abfb --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/validator.yml @@ -0,0 +1,7 @@ +--- +- name: Verify 3.8 specific upgrade checks + hosts: oo_first_master + roles: + - { role: lib_openshift } + tasks: + - debug: msg="noop" diff --git a/playbooks/common/openshift-etcd/certificates.yml b/playbooks/common/openshift-etcd/certificates.yml index 31a0f50d8..eb6b94f33 100644 --- a/playbooks/common/openshift-etcd/certificates.yml +++ b/playbooks/common/openshift-etcd/certificates.yml @@ -1,29 +1,4 @@ --- -- name: Create etcd server certificates for etcd hosts - hosts: oo_etcd_to_config - any_errors_fatal: true - roles: - - role: openshift_etcd_facts - post_tasks: - - include_role: - name: etcd - tasks_from: server_certificates - vars: - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" - etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" +- include: server_certificates.yml -- name: Create etcd client certificates for master hosts - hosts: oo_masters_to_config - any_errors_fatal: true - roles: - - role: openshift_etcd_facts - - role: openshift_etcd_client_certificates - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}" - etcd_cert_config_dir: "{{ openshift.common.config_base }}/master" - etcd_cert_prefix: "master.etcd-" - openshift_ca_host: "{{ groups.oo_first_master.0 }}" - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config +- include: master_etcd_certificates.yml diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml index 2cae231b4..3fe483785 100644 --- a/playbooks/common/openshift-etcd/config.yml +++ b/playbooks/common/openshift-etcd/config.yml @@ -1,14 +1,19 @@ --- - name: etcd Install Checkpoint Start - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set etcd install 'In Progress' + run_once: true set_stats: data: - installer_phase_etcd: "In Progress" - aggregate: false + installer_phase_etcd: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- include: ca.yml + +- include: certificates.yml - name: Configure etcd hosts: oo_etcd_to_config @@ -23,12 +28,13 @@ - role: nickhammond.logrotate - name: etcd Install Checkpoint End - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set etcd install 'Complete' + run_once: true set_stats: data: - installer_phase_etcd: "Complete" - aggregate: false + installer_phase_etcd: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-etcd/embedded2external.yml b/playbooks/common/openshift-etcd/embedded2external.yml new file mode 100644 index 000000000..b16b78c4f --- /dev/null +++ b/playbooks/common/openshift-etcd/embedded2external.yml @@ -0,0 +1,172 @@ +--- +- name: Pre-migrate checks + hosts: localhost + tasks: + # Check there is only one etcd host + - assert: + that: groups.oo_etcd_to_config | default([]) | length == 1 + msg: "[etcd] group must contain only one host" + # Check there is only one master + - assert: + that: groups.oo_masters_to_config | default([]) | length == 1 + msg: "[master] group must contain only one host" + +# 1. stop a master +- name: Prepare masters for etcd data migration + hosts: oo_first_master + roles: + - role: openshift_facts + tasks: + - name: Check the master API is ready + include_role: + name: openshift_master + tasks_from: check_master_api_is_ready + - set_fact: + master_service: "{{ openshift.common.service_type + '-master' }}" + embedded_etcd_backup_suffix: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" + - debug: + msg: "master service name: {{ master_service }}" + - name: Stop master + service: + name: "{{ master_service }}" + state: stopped + # 2. backup embedded etcd + # Can't use with_items with include_role: https://github.com/ansible/ansible/issues/21285 + - include_role: + name: etcd + tasks_from: backup + vars: + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + r_etcd_common_backup_tag: pre-migrate + r_etcd_common_embedded_etcd: "{{ true }}" + r_etcd_common_backup_sufix_name: "{{ embedded_etcd_backup_suffix }}" + + - include_role: + name: etcd + tasks_from: backup.archive + vars: + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + r_etcd_common_backup_tag: pre-migrate + r_etcd_common_embedded_etcd: "{{ true }}" + r_etcd_common_backup_sufix_name: "{{ embedded_etcd_backup_suffix }}" + +# 3. deploy certificates (for etcd and master) +- include: ca.yml + +- include: server_certificates.yml + +- name: Backup etcd client certificates for master host + hosts: oo_first_master + tasks: + - include_role: + name: etcd + tasks_from: backup_master_etcd_certificates + +- name: Redeploy master etcd certificates + include: master_etcd_certificates.yml + vars: + etcd_certificates_redeploy: "{{ true }}" + +# 4. deploy external etcd +- include: ../openshift-etcd/config.yml + +# 5. stop external etcd +- name: Cleanse etcd + hosts: oo_etcd_to_config[0] + gather_facts: no + pre_tasks: + - include_role: + name: etcd + tasks_from: disable_etcd + vars: + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + - include_role: + name: etcd + tasks_from: clean_data + vars: + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + +# 6. copy the embedded etcd backup to the external host +# TODO(jchaloup): if the etcd and first master are on the same host, just copy the directory +- name: Copy embedded etcd backup to the external host + hosts: localhost + tasks: + - name: Create local temp directory for syncing etcd backup + local_action: command mktemp -d /tmp/etcd_backup-XXXXXXX + register: g_etcd_client_mktemp + changed_when: False + become: no + + - include_role: + name: etcd + tasks_from: backup.fetch + vars: + r_etcd_common_etcd_runtime: "{{ hostvars[groups.oo_first_master.0].openshift.common.etcd_runtime }}" + etcd_backup_sync_directory: "{{ g_etcd_client_mktemp.stdout }}" + r_etcd_common_backup_tag: pre-migrate + r_etcd_common_embedded_etcd: "{{ true }}" + r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}" + delegate_to: "{{ groups.oo_first_master[0] }}" + + - include_role: + name: etcd + tasks_from: backup.copy + vars: + r_etcd_common_etcd_runtime: "{{ hostvars[groups.oo_etcd_to_config.0].openshift.common.etcd_runtime }}" + etcd_backup_sync_directory: "{{ g_etcd_client_mktemp.stdout }}" + r_etcd_common_backup_tag: pre-migrate + r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}" + delegate_to: "{{ groups.oo_etcd_to_config[0] }}" + + - debug: + msg: "etcd_backup_dest_directory: {{ g_etcd_client_mktemp.stdout }}" + + - name: Delete temporary directory + local_action: file path="{{ g_etcd_client_mktemp.stdout }}" state=absent + changed_when: False + become: no + +# 7. force new cluster from the backup +- name: Force new etcd cluster + hosts: oo_etcd_to_config[0] + tasks: + - include_role: + name: etcd + tasks_from: backup.unarchive + vars: + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + r_etcd_common_backup_tag: pre-migrate + r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}" + + - include_role: + name: etcd + tasks_from: backup.force_new_cluster + vars: + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + r_etcd_common_backup_tag: pre-migrate + r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}" + etcd_peer: "{{ openshift.common.ip }}" + etcd_url_scheme: "https" + etcd_peer_url_scheme: "https" + +# 8. re-configure master to use the external etcd +- name: Configure master to use external etcd + hosts: oo_first_master + tasks: + - include_role: + name: openshift_master + tasks_from: configure_external_etcd + vars: + etcd_peer_url_scheme: "https" + etcd_ip: "{{ hostvars[groups.oo_etcd_to_config.0].openshift.common.ip }}" + etcd_peer_port: 2379 + + # 9. start the master + - name: Start master + service: + name: "{{ master_service }}" + state: started + register: service_status + until: service_status.state is defined and service_status.state == "started" + retries: 5 + delay: 10 diff --git a/playbooks/common/openshift-etcd/master_etcd_certificates.yml b/playbooks/common/openshift-etcd/master_etcd_certificates.yml new file mode 100644 index 000000000..0a25aac57 --- /dev/null +++ b/playbooks/common/openshift-etcd/master_etcd_certificates.yml @@ -0,0 +1,14 @@ +--- +- name: Create etcd client certificates for master hosts + hosts: oo_masters_to_config + any_errors_fatal: true + roles: + - role: openshift_etcd_facts + - role: openshift_etcd_client_certificates + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}" + etcd_cert_config_dir: "{{ openshift.common.config_base }}/master" + etcd_cert_prefix: "master.etcd-" + openshift_ca_host: "{{ groups.oo_first_master.0 }}" + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/common/openshift-etcd/migrate.yml index 2456ad3a8..31362f2f6 100644 --- a/playbooks/common/openshift-etcd/migrate.yml +++ b/playbooks/common/openshift-etcd/migrate.yml @@ -1,4 +1,17 @@ --- +- name: Check if the master has embedded etcd + hosts: localhost + connection: local + become: no + gather_facts: no + tags: + - always + tasks: + - fail: + msg: "Migration of an embedded etcd is not supported. Please, migrate the embedded etcd into an external etcd first." + when: + - groups.oo_etcd_to_config | default([]) | length == 0 + - name: Run pre-checks hosts: oo_etcd_to_migrate tasks: @@ -60,12 +73,11 @@ hosts: oo_etcd_to_migrate gather_facts: no pre_tasks: - - set_fact: - l_etcd_service: "{{ 'etcd_container' if openshift.common.is_containerized else 'etcd' }}" - - name: Disable etcd members - service: - name: "{{ l_etcd_service }}" - state: stopped + - include_role: + name: etcd + tasks_from: disable_etcd + vars: + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - name: Migrate data on first etcd hosts: oo_etcd_to_migrate[0] diff --git a/playbooks/common/openshift-etcd/scaleup.yml b/playbooks/common/openshift-etcd/scaleup.yml index b5ba2bbba..20061366c 100644 --- a/playbooks/common/openshift-etcd/scaleup.yml +++ b/playbooks/common/openshift-etcd/scaleup.yml @@ -46,7 +46,7 @@ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" etcd_initial_cluster_state: "existing" - initial_etcd_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') | regex_replace('\"','') }}" + etcd_initial_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') | regex_replace('\"','') }}" etcd_ca_setup: False r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - role: nickhammond.logrotate @@ -71,7 +71,7 @@ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" openshift_ca_host: "{{ groups.oo_first_master.0 }}" openshift_master_etcd_hosts: "{{ hostvars - | oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config'])) + | oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config'] | default([]) )) | oo_collect('openshift.common.hostname') | default(none, true) }}" openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}" diff --git a/playbooks/common/openshift-etcd/server_certificates.yml b/playbooks/common/openshift-etcd/server_certificates.yml new file mode 100644 index 000000000..10e06747b --- /dev/null +++ b/playbooks/common/openshift-etcd/server_certificates.yml @@ -0,0 +1,15 @@ +--- +- name: Create etcd server certificates for etcd hosts + hosts: oo_etcd_to_config + any_errors_fatal: true + roles: + - role: openshift_etcd_facts + post_tasks: + - include_role: + name: etcd + tasks_from: server_certificates + vars: + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" + etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml index 516618de2..19e14ab3e 100644 --- a/playbooks/common/openshift-glusterfs/config.yml +++ b/playbooks/common/openshift-glusterfs/config.yml @@ -1,14 +1,15 @@ --- - name: GlusterFS Install Checkpoint Start - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set GlusterFS install 'In Progress' + run_once: true set_stats: data: - installer_phase_glusterfs: "In Progress" - aggregate: false + installer_phase_glusterfs: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - name: Open firewall ports for GlusterFS nodes hosts: glusterfs @@ -18,6 +19,11 @@ tasks_from: firewall.yml when: - openshift_storage_glusterfs_is_native | default(True) | bool + - include_role: + name: openshift_storage_glusterfs + tasks_from: kernel_modules.yml + when: + - openshift_storage_glusterfs_is_native | default(True) | bool - name: Open firewall ports for GlusterFS registry nodes hosts: glusterfs_registry @@ -27,6 +33,11 @@ tasks_from: firewall.yml when: - openshift_storage_glusterfs_registry_is_native | default(True) | bool + - include_role: + name: openshift_storage_glusterfs + tasks_from: kernel_modules.yml + when: + - openshift_storage_glusterfs_registry_is_native | default(True) | bool - name: Configure GlusterFS hosts: oo_first_master @@ -37,12 +48,13 @@ when: groups.oo_glusterfs_to_config | default([]) | count > 0 - name: GlusterFS Install Checkpoint End - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set GlusterFS install 'Complete' + run_once: true set_stats: data: - installer_phase_glusterfs: "Complete" - aggregate: false + installer_phase_glusterfs: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml index ecbb092bc..d737b836b 100644 --- a/playbooks/common/openshift-loadbalancer/config.yml +++ b/playbooks/common/openshift-loadbalancer/config.yml @@ -1,14 +1,24 @@ --- - name: Load Balancer Install Checkpoint Start - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set load balancer install 'In Progress' + run_once: true set_stats: data: - installer_phase_loadbalancer: "In Progress" - aggregate: false + installer_phase_loadbalancer: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- name: Configure firewall and docker for load balancers + hosts: oo_lb_to_config:!oo_masters_to_config:!oo_nodes_to_config + vars: + openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}" + roles: + - role: os_firewall + - role: openshift_docker + when: openshift.common.is_containerized | default(False) | bool and not skip_docker_role | default(False) | bool - name: Configure load balancers hosts: oo_lb_to_config @@ -25,16 +35,17 @@ + openshift_loadbalancer_additional_backends | default([]) }}" openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}" roles: - - role: os_firewall - role: openshift_loadbalancer + - role: tuned - name: Load Balancer Install Checkpoint End - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set load balancer install 'Complete' + run_once: true set_stats: data: - installer_phase_loadbalancer: "Complete" - aggregate: false + installer_phase_loadbalancer: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-management/add_container_provider.yml b/playbooks/common/openshift-management/add_container_provider.yml new file mode 100644 index 000000000..facb3a5b9 --- /dev/null +++ b/playbooks/common/openshift-management/add_container_provider.yml @@ -0,0 +1,8 @@ +--- +- name: Add Container Provider to Management + hosts: oo_first_master + tasks: + - name: Run the Management Integration Tasks + include_role: + name: openshift_management + tasks_from: add_container_provider diff --git a/playbooks/common/openshift-management/config.yml b/playbooks/common/openshift-management/config.yml new file mode 100644 index 000000000..3f1cdf713 --- /dev/null +++ b/playbooks/common/openshift-management/config.yml @@ -0,0 +1,39 @@ +--- +- name: Management Install Checkpoint Start + hosts: all + gather_facts: false + tasks: + - name: Set Management install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_management: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- name: Setup CFME + hosts: oo_first_master + pre_tasks: + - name: Create a temporary place to evaluate the PV templates + command: mktemp -d /tmp/openshift-ansible-XXXXXXX + register: r_openshift_management_mktemp + changed_when: false + + tasks: + - name: Run the CFME Setup Role + include_role: + name: openshift_management + vars: + template_dir: "{{ hostvars[groups.masters.0].r_openshift_management_mktemp.stdout }}" + +- name: Management Install Checkpoint End + hosts: all + gather_facts: false + tasks: + - name: Set Management install 'Complete' + run_once: true + set_stats: + data: + installer_phase_management: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-cfme/filter_plugins b/playbooks/common/openshift-management/filter_plugins index 99a95e4ca..99a95e4ca 120000 --- a/playbooks/common/openshift-cfme/filter_plugins +++ b/playbooks/common/openshift-management/filter_plugins diff --git a/playbooks/common/openshift-cfme/library b/playbooks/common/openshift-management/library index ba40d2f56..ba40d2f56 120000 --- a/playbooks/common/openshift-cfme/library +++ b/playbooks/common/openshift-management/library diff --git a/playbooks/common/openshift-cfme/roles b/playbooks/common/openshift-management/roles index 20c4c58cf..20c4c58cf 120000 --- a/playbooks/common/openshift-cfme/roles +++ b/playbooks/common/openshift-management/roles diff --git a/playbooks/common/openshift-cfme/uninstall.yml b/playbooks/common/openshift-management/uninstall.yml index 78b8e7668..9f35cc276 100644 --- a/playbooks/common/openshift-cfme/uninstall.yml +++ b/playbooks/common/openshift-management/uninstall.yml @@ -1,8 +1,8 @@ --- - name: Uninstall CFME - hosts: masters + hosts: masters[0] tasks: - name: Run the CFME Uninstall Role Tasks include_role: - name: openshift_cfme + name: openshift_management tasks_from: uninstall diff --git a/playbooks/common/openshift-master/additional_config.yml b/playbooks/common/openshift-master/additional_config.yml index ee76e2ed7..32f638d42 100644 --- a/playbooks/common/openshift-master/additional_config.yml +++ b/playbooks/common/openshift-master/additional_config.yml @@ -1,14 +1,15 @@ --- - name: Master Additional Install Checkpoint Start - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set Master Additional install 'In Progress' + run_once: true set_stats: data: - installer_phase_master_additional: "In Progress" - aggregate: false + installer_phase_master_additional: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - name: Additional master configuration hosts: oo_first_master @@ -20,16 +21,18 @@ roles: - role: openshift_master_cluster when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker" + - role: openshift_project_request_template + when: openshift_project_request_template_manage - role: openshift_examples when: openshift_install_examples | default(true, true) | bool registry_url: "{{ openshift.master.registry_url }}" - role: openshift_hosted_templates registry_url: "{{ openshift.master.registry_url }}" - role: openshift_manageiq - when: openshift_use_manageiq | default(false) | bool + when: openshift_use_manageiq | default(true) | bool - role: cockpit when: - - openshift.common.is_atomic + - not openshift.common.is_atomic | bool - deployment_type == 'openshift-enterprise' - osm_use_cockpit is undefined or osm_use_cockpit | bool - openshift.common.deployment_subtype != 'registry' @@ -37,12 +40,13 @@ when: openshift_use_flannel | default(false) | bool - name: Master Additional Install Checkpoint End - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set Master Additional install 'Complete' + run_once: true set_stats: data: - installer_phase_master_additional: "Complete" - aggregate: false + installer_phase_master_additional: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-master/ca.yml b/playbooks/common/openshift-master/ca.yml deleted file mode 100644 index 5bb796fa3..000000000 --- a/playbooks/common/openshift-master/ca.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Create OpenShift CA - hosts: oo_masters_to_config - roles: - - role: openshift_master_facts - - role: openshift_named_certificates - - role: openshift_ca - openshift_ca_host: "{{ groups.oo_first_master.0 }}" diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 766e0e501..6b0fd6b7c 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -1,14 +1,17 @@ --- - name: Master Install Checkpoint Start - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set Master install 'In Progress' + run_once: true set_stats: data: - installer_phase_master: "In Progress" - aggregate: false + installer_phase_master: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- include: certificates.yml - name: Disable excluders hosts: oo_masters_to_config @@ -196,6 +199,7 @@ openshift_master_default_registry_value: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value }}" openshift_master_default_registry_value_api: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_api }}" openshift_master_default_registry_value_controllers: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_controllers }}" + - role: tuned - role: nuage_ca when: openshift_use_nuage | default(false) | bool - role: nuage_common @@ -204,6 +208,18 @@ when: openshift_use_nuage | default(false) | bool - role: calico_master when: openshift_use_calico | default(false) | bool + tasks: + - include_role: + name: kuryr + tasks_from: master + when: openshift_use_kuryr | default(false) | bool + + - name: Setup the node group config maps + include_role: + name: openshift_node_group + when: openshift_master_bootstrap_enabled | default(false) | bool + run_once: True + post_tasks: - name: Create group for deployment type group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }} @@ -224,12 +240,13 @@ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" - name: Master Install Checkpoint End - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set Master install 'Complete' + run_once: true set_stats: data: - installer_phase_master: "Complete" - aggregate: false + installer_phase_master: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js b/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js deleted file mode 100644 index d0a9f11dc..000000000 --- a/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js +++ /dev/null @@ -1,2 +0,0 @@ -// empty file so that the master-config can still point to a file that exists -// this file will be replaced by the template service broker role if enabled diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml index 4f8b758fd..4e1b3a3be 100644 --- a/playbooks/common/openshift-master/restart_services.yml +++ b/playbooks/common/openshift-master/restart_services.yml @@ -1,22 +1,4 @@ --- -- name: Restart master API - service: - name: "{{ openshift.common.service_type }}-master-api" - state: restarted - when: openshift_master_ha | bool -- name: Wait for master API to come back online - wait_for: - host: "{{ openshift.common.hostname }}" - state: started - delay: 10 - port: "{{ openshift.master.api_port }}" - timeout: 600 - when: openshift_master_ha | bool -- name: Restart master controllers - service: - name: "{{ openshift.common.service_type }}-master-controllers" - state: restarted - # Ignore errrors since it is possible that type != simple for - # pre-3.1.1 installations. - ignore_errors: true - when: openshift_master_ha | bool +- include_role: + name: openshift_master + tasks_from: restart.yml diff --git a/playbooks/common/openshift-master/revert-client-ca.yml b/playbooks/common/openshift-master/revert-client-ca.yml new file mode 100644 index 000000000..9ae23bf5b --- /dev/null +++ b/playbooks/common/openshift-master/revert-client-ca.yml @@ -0,0 +1,17 @@ +--- +- name: Set servingInfo.clientCA = ca.crt in master config + hosts: oo_masters_to_config + tasks: + - name: Read master config + slurp: + src: "{{ openshift.common.config_base }}/master/master-config.yaml" + register: g_master_config_output + + # servingInfo.clientCA may be set as the client-ca-bundle.crt from + # CA redeployment and this task reverts that change. + - name: Set servingInfo.clientCA = ca.crt in master config + modify_yaml: + dest: "{{ openshift.common.config_base }}/master/master-config.yaml" + yaml_key: servingInfo.clientCA + yaml_value: ca.crt + when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca.crt' diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml index d007fac85..4c415ebce 100644 --- a/playbooks/common/openshift-master/scaleup.yml +++ b/playbooks/common/openshift-master/scaleup.yml @@ -22,16 +22,17 @@ - name: restart master api service: name={{ openshift.common.service_type }}-master-controllers state=restarted notify: verify api server + # We retry the controllers because the API may not be 100% initialized yet. - name: restart master controllers - service: name={{ openshift.common.service_type }}-master-controllers state=restarted + command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" + retries: 3 + delay: 5 + register: result + until: result.rc == 0 - name: verify api server command: > curl --silent --tlsv1.2 - {% if openshift.common.version_gte_3_2_or_1_2 | bool %} --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt - {% else %} - --cacert {{ openshift.common.config_base }}/master/ca.crt - {% endif %} {{ openshift.master.api_url }}/healthz/ready args: # Disables the following warning: @@ -47,8 +48,6 @@ - include: ../openshift-etcd/certificates.yml -- include: ../openshift-master/certificates.yml - - include: ../openshift-master/config.yml - include: ../openshift-loadbalancer/config.yml diff --git a/playbooks/common/openshift-master/tasks/wire_aggregator.yml b/playbooks/common/openshift-master/tasks/wire_aggregator.yml index 560eea785..97acc5d5d 100644 --- a/playbooks/common/openshift-master/tasks/wire_aggregator.yml +++ b/playbooks/common/openshift-master/tasks/wire_aggregator.yml @@ -136,9 +136,15 @@ when: - not front_proxy_kubeconfig.stat.exists -- name: copy tech preview extension file for service console UI - copy: - src: openshift-ansible-catalog-console.js +- name: Delete temp directory + file: + name: "{{ certtemp.stdout }}" + state: absent + changed_when: False + +- name: Setup extension file for service console UI + template: + src: ../templates/openshift-ansible-catalog-console.js dest: /etc/origin/master/openshift-ansible-catalog-console.js - name: Update master config @@ -179,8 +185,13 @@ - yedit_output.changed - openshift.master.cluster_method == 'native' +# We retry the controllers because the API may not be 100% initialized yet. - name: restart master controllers - systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted + command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" + retries: 3 + delay: 5 + register: result + until: result.rc == 0 when: - yedit_output.changed - openshift.master.cluster_method == 'native' @@ -190,11 +201,7 @@ # wait_for port doesn't provide health information. command: > curl --silent --tlsv1.2 - {% if openshift.common.version_gte_3_2_or_1_2 | bool %} --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt - {% else %} - --cacert {{ openshift.common.config_base }}/master/ca.crt - {% endif %} {{ openshift.master.api_url }}/healthz/ready args: # Disables the following warning: @@ -207,9 +214,3 @@ changed_when: false when: - yedit_output.changed - -- name: Delete temp directory - file: - name: "{{ certtemp.stdout }}" - state: absent - changed_when: False diff --git a/playbooks/common/openshift-master/templates/openshift-ansible-catalog-console.js b/playbooks/common/openshift-master/templates/openshift-ansible-catalog-console.js new file mode 100644 index 000000000..fd02325ba --- /dev/null +++ b/playbooks/common/openshift-master/templates/openshift-ansible-catalog-console.js @@ -0,0 +1 @@ +window.OPENSHIFT_CONSTANTS.TEMPLATE_SERVICE_BROKER_ENABLED = {{ 'true' if (template_service_broker_install | default(True)) else 'false' }}; diff --git a/playbooks/common/openshift-nfs/config.yml b/playbooks/common/openshift-nfs/config.yml index 66303d6f7..6ea77e00b 100644 --- a/playbooks/common/openshift-nfs/config.yml +++ b/playbooks/common/openshift-nfs/config.yml @@ -1,14 +1,15 @@ --- - name: NFS Install Checkpoint Start - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set NFS install 'In Progress' + run_once: true set_stats: data: - installer_phase_nfs: "In Progress" - aggregate: false + installer_phase_nfs: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - name: Configure nfs hosts: oo_nfs_to_config @@ -17,12 +18,13 @@ - role: openshift_storage_nfs - name: NFS Install Checkpoint End - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set NFS install 'Complete' + run_once: true set_stats: data: - installer_phase_nfs: "Complete" - aggregate: false + installer_phase_nfs: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-node/additional_config.yml b/playbooks/common/openshift-node/additional_config.yml index fe51ef833..ac757397b 100644 --- a/playbooks/common/openshift-node/additional_config.yml +++ b/playbooks/common/openshift-node/additional_config.yml @@ -19,10 +19,14 @@ - group_by: key: oo_nodes_use_{{ (openshift_use_contiv | default(False)) | ternary('contiv','nothing') }} changed_when: False + # Create group for kuryr nodes + - group_by: + key: oo_nodes_use_{{ (openshift_use_kuryr | default(False)) | ternary('kuryr','nothing') }} + changed_when: False - include: etcd_client_config.yml vars: - openshift_node_scale_up_group: "oo_nodes_use_flannel:oo_nodes_use_calico:oo_nodes_use_contiv" + openshift_node_scale_up_group: "oo_nodes_use_flannel:oo_nodes_use_calico:oo_nodes_use_contiv:oo_nodes_use_kuryr" - name: Additional node config hosts: oo_nodes_use_flannel @@ -50,3 +54,11 @@ - role: contiv contiv_role: netplugin when: openshift_use_contiv | default(false) | bool + +- name: Configure Kuryr node + hosts: oo_nodes_use_kuryr + tasks: + - include_role: + name: kuryr + tasks_from: node + when: openshift_use_kuryr | default(false) | bool diff --git a/playbooks/common/openshift-node/clean_image.yml b/playbooks/common/openshift-node/clean_image.yml new file mode 100644 index 000000000..38753d0af --- /dev/null +++ b/playbooks/common/openshift-node/clean_image.yml @@ -0,0 +1,10 @@ +--- +- name: Configure nodes + hosts: oo_nodes_to_config:!oo_containerized_master_nodes + tasks: + - name: Remove any ansible facts created during AMI creation + file: + path: "/etc/ansible/facts.d/{{ item }}" + state: absent + with_items: + - openshift.fact diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 6fd8aa6f1..28e3c1b1b 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -1,14 +1,17 @@ --- - name: Node Install Checkpoint Start - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set Node install 'In Progress' + run_once: true set_stats: data: - installer_phase_node: "In Progress" - aggregate: false + installer_phase_node: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- include: certificates.yml - include: setup.yml @@ -23,12 +26,13 @@ - include: enable_excluders.yml - name: Node Install Checkpoint End - hosts: localhost - connection: local + hosts: all gather_facts: false tasks: - name: Set Node install 'Complete' + run_once: true set_stats: data: - installer_phase_node: "Complete" - aggregate: false + installer_phase_node: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-node/configure_nodes.yml b/playbooks/common/openshift-node/configure_nodes.yml index c96e4921c..17259422d 100644 --- a/playbooks/common/openshift-node/configure_nodes.yml +++ b/playbooks/common/openshift-node/configure_nodes.yml @@ -13,4 +13,5 @@ roles: - role: os_firewall - role: openshift_node + - role: tuned - role: nickhammond.logrotate diff --git a/playbooks/common/openshift-node/image_prep.yml b/playbooks/common/openshift-node/image_prep.yml new file mode 100644 index 000000000..3c042acdc --- /dev/null +++ b/playbooks/common/openshift-node/image_prep.yml @@ -0,0 +1,21 @@ +--- +- name: normalize groups + include: ../../init/evaluate_groups.yml + +- name: initialize the facts + include: ../../init/facts.yml + +- name: initialize the repositories + include: ../../init/repos.yml + +- name: run node config setup + include: setup.yml + +- name: run node config + include: configure_nodes.yml + +- name: Re-enable excluders + include: enable_excluders.yml + +- name: Remove any undesired artifacts from build + include: clean_image.yml diff --git a/playbooks/common/openshift-node/network_manager.yml b/playbooks/common/openshift-node/network_manager.yml index b3a7399dc..c2efb0483 100644 --- a/playbooks/common/openshift-node/network_manager.yml +++ b/playbooks/common/openshift-node/network_manager.yml @@ -1,5 +1,5 @@ --- -- include: ../openshift-cluster/evaluate_groups.yml +- include: ../../init/evaluate_groups.yml - name: Install and configure NetworkManager hosts: oo_all_hosts diff --git a/playbooks/gcp/openshift-cluster/provision.yml b/playbooks/gcp/openshift-cluster/provision.yml index a3d1d46a6..097717607 100644 --- a/playbooks/gcp/openshift-cluster/provision.yml +++ b/playbooks/gcp/openshift-cluster/provision.yml @@ -9,11 +9,8 @@ include_role: name: openshift_gcp -- name: normalize groups - include: ../../byo/openshift-cluster/initialize_groups.yml - -- name: run the std_include - include: ../../common/openshift-cluster/std_include.yml +- name: run the init + include: ../../init/main.yml - name: run the config include: ../../common/openshift-cluster/config.yml diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/init/evaluate_groups.yml index e55b2f964..8787c87e1 100644 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ b/playbooks/init/evaluate_groups.yml @@ -5,6 +5,9 @@ become: no gather_facts: no tasks: + - name: Load group name mapping variables + include_vars: vars/cluster_hosts.yml + - name: Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required fail: msg: This playbook requires g_etcd_hosts or g_new_etcd_hosts to be set @@ -51,7 +54,7 @@ when: - g_etcd_hosts | default([]) | length not in [3,1] - not openshift_master_unsupported_embedded_etcd | default(False) - - not openshift_node_bootstrap | default(False) + - not (openshift_node_bootstrap | default(False)) - name: Evaluate oo_all_hosts add_host: diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/init/facts.yml index be2f8b5f4..91223d368 100644 --- a/playbooks/common/openshift-cluster/initialize_facts.yml +++ b/playbooks/init/facts.yml @@ -10,6 +10,7 @@ - name: load openshift_facts module include_role: name: openshift_facts + static: yes # TODO: Should this role be refactored into health_checks?? - name: Run openshift_sanitize_inventory to set variables @@ -145,7 +146,19 @@ https_proxy: "{{ openshift_https_proxy | default(None) }}" no_proxy: "{{ openshift_no_proxy | default(None) }}" generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}" - no_proxy_internal_hostnames: "{{ openshift_no_proxy_internal_hostnames | default(None) }}" + + - name: Set fact of no_proxy_internal_hostnames + openshift_facts: + role: common + local_facts: + no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool - name: initialize_facts set_fact repoquery command set_fact: diff --git a/playbooks/init/main.yml b/playbooks/init/main.yml new file mode 100644 index 000000000..87ffeafc7 --- /dev/null +++ b/playbooks/init/main.yml @@ -0,0 +1,38 @@ +--- +- name: Initialization Checkpoint Start + hosts: all + gather_facts: false + roles: + - installer_checkpoint + tasks: + - name: Set install initialization 'In Progress' + run_once: true + set_stats: + data: + installer_phase_initialize: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- include: evaluate_groups.yml + +- include: facts.yml + +- include: sanity_checks.yml + +- include: validate_hostnames.yml + +- include: repos.yml + +- include: version.yml + +- name: Initialization Checkpoint End + hosts: all + gather_facts: false + tasks: + - name: Set install initialization 'Complete' + run_once: true + set_stats: + data: + installer_phase_initialize: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-cluster/initialize_openshift_repos.yml b/playbooks/init/repos.yml index a7114fc80..a7114fc80 100644 --- a/playbooks/common/openshift-cluster/initialize_openshift_repos.yml +++ b/playbooks/init/repos.yml diff --git a/playbooks/init/roles b/playbooks/init/roles new file mode 120000 index 000000000..b741aa3db --- /dev/null +++ b/playbooks/init/roles @@ -0,0 +1 @@ +../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/sanity_checks.yml b/playbooks/init/sanity_checks.yml index 26716a92d..26716a92d 100644 --- a/playbooks/common/openshift-cluster/sanity_checks.yml +++ b/playbooks/init/sanity_checks.yml diff --git a/playbooks/common/openshift-cluster/validate_hostnames.yml b/playbooks/init/validate_hostnames.yml index be2e6a15a..be2e6a15a 100644 --- a/playbooks/common/openshift-cluster/validate_hostnames.yml +++ b/playbooks/init/validate_hostnames.yml diff --git a/playbooks/byo/openshift-cluster/cluster_hosts.yml b/playbooks/init/vars/cluster_hosts.yml index e807ac004..e807ac004 100644 --- a/playbooks/byo/openshift-cluster/cluster_hosts.yml +++ b/playbooks/init/vars/cluster_hosts.yml diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/init/version.yml index 6100c36e1..37a5284d5 100644 --- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml +++ b/playbooks/init/version.yml @@ -1,15 +1,4 @@ --- -# openshift_install_base_package_group may be set in a play variable to limit -# the host groups the base package is installed on. This is currently used -# for master/control-plane upgrades. -- name: Set version_install_base_package true on masters and nodes - hosts: "{{ openshift_install_base_package_group | default('oo_masters_to_config:oo_nodes_to_config') }}" - tasks: - - name: Set version_install_base_package true - set_fact: - version_install_base_package: True - when: version_install_base_package is not defined - # NOTE: requires openshift_facts be run - name: Determine openshift_version to configure on first master hosts: oo_first_master @@ -19,8 +8,8 @@ # NOTE: We set this even on etcd hosts as they may also later run as masters, # and we don't want to install wrong version of docker and have to downgrade # later. -- name: Set openshift_version for all hosts - hosts: oo_all_hosts:!oo_first_master +- name: Set openshift_version for etcd, node, and master hosts + hosts: oo_etcd_to_config:oo_nodes_to_config:oo_masters_to_config:!oo_first_master vars: openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}" pre_tasks: diff --git a/playbooks/byo/openshift-checks/README.md b/playbooks/openshift-checks/README.md index b26e7d7ed..0b7ea91ff 100644 --- a/playbooks/byo/openshift-checks/README.md +++ b/playbooks/openshift-checks/README.md @@ -47,19 +47,19 @@ against your inventory file. Here is the step-by-step: 3. Run the appropriate playbook: ```console - $ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/pre-install.yml + $ ansible-playbook -i <inventory file> playbooks/openshift-checks/pre-install.yml ``` or ```console - $ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/health.yml + $ ansible-playbook -i <inventory file> playbooks/openshift-checks/health.yml ``` or ```console - $ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/certificate_expiry/default.yaml -v + $ ansible-playbook -i <inventory file> playbooks/openshift-checks/certificate_expiry/default.yaml -v ``` ### The adhoc playbook @@ -72,19 +72,19 @@ using the `-e` flag. For example, to run the `docker_storage` check: ```console -$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml -e openshift_checks=docker_storage +$ ansible-playbook -i <inventory file> playbooks/openshift-checks/adhoc.yml -e openshift_checks=docker_storage ``` To run more checks, use a comma-separated list of check names: ```console -$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml -e openshift_checks=docker_storage,disk_availability +$ ansible-playbook -i <inventory file> playbooks/openshift-checks/adhoc.yml -e openshift_checks=docker_storage,disk_availability ``` To run an entire class of checks, use the name of a check group tag, prefixed by `@`. This will run all checks tagged `preflight`: ```console -$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml -e openshift_checks=@preflight +$ ansible-playbook -i <inventory file> playbooks/openshift-checks/adhoc.yml -e openshift_checks=@preflight ``` It is valid to specify multiple check tags and individual check names together @@ -94,7 +94,7 @@ To list all of the available checks and tags, run the adhoc playbook without setting the `openshift_checks` variable: ```console -$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml +$ ansible-playbook -i <inventory file> playbooks/openshift-checks/adhoc.yml ``` ## Running in a container diff --git a/playbooks/byo/openshift-checks/adhoc.yml b/playbooks/openshift-checks/adhoc.yml index 226bed732..036a63776 100644 --- a/playbooks/byo/openshift-checks/adhoc.yml +++ b/playbooks/openshift-checks/adhoc.yml @@ -1,6 +1,6 @@ --- # NOTE: ideally this would be just part of a single play in -# common/openshift-checks/adhoc.yml that lists the existing checks when +# private/adhoc.yml that lists the existing checks when # openshift_checks is not set or run the requested checks. However, to actually # run the checks we need to have the included dependencies to run first and that # takes time. To speed up listing checks, we use this separate play that runs @@ -20,8 +20,6 @@ action: openshift_health_check when: openshift_checks is undefined or not openshift_checks -- include: ../openshift-cluster/initialize_groups.yml +- include: ../init/main.yml -- include: ../../common/openshift-cluster/std_include.yml - -- include: ../../common/openshift-checks/adhoc.yml +- include: private/adhoc.yml diff --git a/playbooks/byo/openshift-checks/certificate_expiry/default.yaml b/playbooks/openshift-checks/certificate_expiry/default.yaml index 630135cae..630135cae 100644 --- a/playbooks/byo/openshift-checks/certificate_expiry/default.yaml +++ b/playbooks/openshift-checks/certificate_expiry/default.yaml diff --git a/playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml b/playbooks/openshift-checks/certificate_expiry/easy-mode-upload.yaml index 378d1f154..378d1f154 100644 --- a/playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml +++ b/playbooks/openshift-checks/certificate_expiry/easy-mode-upload.yaml diff --git a/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml b/playbooks/openshift-checks/certificate_expiry/easy-mode.yaml index ae41c7c14..ae41c7c14 100644 --- a/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml +++ b/playbooks/openshift-checks/certificate_expiry/easy-mode.yaml diff --git a/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml b/playbooks/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml index d80cb6ff4..d80cb6ff4 100644 --- a/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml +++ b/playbooks/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml diff --git a/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml b/playbooks/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml index 2189455b7..2189455b7 100644 --- a/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml +++ b/playbooks/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml diff --git a/playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml b/playbooks/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml index 87a0f3be4..87a0f3be4 100644 --- a/playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml +++ b/playbooks/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml diff --git a/playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml b/playbooks/openshift-checks/certificate_expiry/longer_warning_period.yaml index 960457c4b..960457c4b 100644 --- a/playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml +++ b/playbooks/openshift-checks/certificate_expiry/longer_warning_period.yaml diff --git a/playbooks/common/openshift-checks/roles b/playbooks/openshift-checks/certificate_expiry/roles index 20c4c58cf..20c4c58cf 120000 --- a/playbooks/common/openshift-checks/roles +++ b/playbooks/openshift-checks/certificate_expiry/roles diff --git a/playbooks/openshift-checks/health.yml b/playbooks/openshift-checks/health.yml new file mode 100644 index 000000000..64bfa411d --- /dev/null +++ b/playbooks/openshift-checks/health.yml @@ -0,0 +1,4 @@ +--- +- include: ../init/main.yml + +- include: private/health.yml diff --git a/playbooks/openshift-checks/pre-install.yml b/playbooks/openshift-checks/pre-install.yml new file mode 100644 index 000000000..410204d6a --- /dev/null +++ b/playbooks/openshift-checks/pre-install.yml @@ -0,0 +1,4 @@ +--- +- include: ../init/main.yml + +- include: private/pre-install.yml diff --git a/playbooks/common/openshift-checks/adhoc.yml b/playbooks/openshift-checks/private/adhoc.yml index dfcef8435..d0deaeb65 100644 --- a/playbooks/common/openshift-checks/adhoc.yml +++ b/playbooks/openshift-checks/private/adhoc.yml @@ -1,12 +1,13 @@ --- -- name: OpenShift health checks +- name: OpenShift Health Checks hosts: oo_all_hosts + roles: - openshift_health_checker vars: - r_openshift_health_checker_playbook_context: adhoc post_tasks: - - name: Run health checks + - name: Run health checks (adhoc) action: openshift_health_check args: checks: '{{ openshift_checks | default([]) }}' diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/openshift-checks/private/health.yml index 21ea785ef..d0921b9d3 100644 --- a/playbooks/common/openshift-checks/health.yml +++ b/playbooks/openshift-checks/private/health.yml @@ -1,11 +1,13 @@ --- -- name: Run OpenShift health checks +- name: OpenShift Health Checks hosts: oo_all_hosts + roles: - openshift_health_checker vars: - r_openshift_health_checker_playbook_context: health post_tasks: - - action: openshift_health_check + - name: Run health checks (@health) + action: openshift_health_check args: checks: ['@health'] diff --git a/playbooks/openshift-checks/private/install.yml b/playbooks/openshift-checks/private/install.yml new file mode 100644 index 000000000..93cf6c359 --- /dev/null +++ b/playbooks/openshift-checks/private/install.yml @@ -0,0 +1,51 @@ +--- +- name: Health Check Checkpoint Start + hosts: all + gather_facts: false + tasks: + - name: Set Health Check 'In Progress' + run_once: true + set_stats: + data: + installer_phase_health: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- name: OpenShift Health Checks + hosts: oo_all_hosts + any_errors_fatal: true + roles: + - openshift_health_checker + vars: + - r_openshift_health_checker_playbook_context: install + post_tasks: + - name: Run health checks (install) - EL + when: ansible_distribution != "Fedora" + action: openshift_health_check + args: + checks: + - disk_availability + - memory_availability + - package_availability + - package_version + - docker_image_availability + - docker_storage + + - name: Run health checks (install) - Fedora + when: ansible_distribution == "Fedora" + action: openshift_health_check + args: + checks: + - docker_image_availability + +- name: Health Check Checkpoint End + hosts: all + gather_facts: false + tasks: + - name: Set Health Check 'Complete' + run_once: true + set_stats: + data: + installer_phase_health: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/openshift-checks/private/pre-install.yml index 88e6f9120..32449d4e4 100644 --- a/playbooks/common/openshift-checks/pre-install.yml +++ b/playbooks/openshift-checks/private/pre-install.yml @@ -1,11 +1,13 @@ --- -- name: run OpenShift pre-install checks +- name: OpenShift Health Checks hosts: oo_all_hosts + roles: - openshift_health_checker vars: - r_openshift_health_checker_playbook_context: pre-install post_tasks: - - action: openshift_health_check + - name: Run health checks (@preflight) + action: openshift_health_check args: checks: ['@preflight'] diff --git a/playbooks/openshift-checks/private/roles b/playbooks/openshift-checks/private/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/openshift-checks/private/roles @@ -0,0 +1 @@ +../../../roles
\ No newline at end of file diff --git a/playbooks/openshift-checks/roles b/playbooks/openshift-checks/roles new file mode 120000 index 000000000..b741aa3db --- /dev/null +++ b/playbooks/openshift-checks/roles @@ -0,0 +1 @@ +../../roles
\ No newline at end of file diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md new file mode 100644 index 000000000..f3fe13530 --- /dev/null +++ b/playbooks/openstack/README.md @@ -0,0 +1,262 @@ +# OpenStack Provisioning + +This directory contains [Ansible][ansible] playbooks and roles to create +OpenStack resources (servers, networking, volumes, security groups, +etc.). The result is an environment ready for OpenShift installation +via [openshift-ansible]. + +We provide everything necessary to be able to install OpenShift on +OpenStack (including the DNS and load balancer servers when +necessary). In addition, we work on providing integration with the +OpenStack-native services (storage, lbaas, baremetal as a service, +dns, etc.). + + +## OpenStack Requirements + +Before you start the installation, you need to have an OpenStack +environment to connect to. You can use a public cloud or an OpenStack +within your organisation. It is also possible to +use [Devstack][devstack] or [TripleO][tripleo]. In the case of +TripleO, we will be running on top of the **overcloud**. + +The OpenStack release must be Newton (for Red Hat OpenStack this is +version 10) or newer. It must also satisfy these requirements: + +* Heat (Orchestration) must be available +* The deployment image (CentOS 7 or RHEL 7) must be loaded +* The deployment flavor must be available to your user + - `m1.medium` / 4GB RAM + 40GB disk should be enough for testing + - look at + the [Minimum Hardware Requirements page][hardware-requirements] + for production +* The keypair for SSH must be available in openstack +* `keystonerc` file that lets you talk to the openstack services + * NOTE: only Keystone V2 is currently supported + +Optional: +* External Neutron network with a floating IP address pool + + +## DNS Requirements + +OpenShift requires DNS to operate properly. OpenStack supports DNS-as-a-service +in the form of the Designate project, but the playbooks here don't support it +yet. Until we do, you will need to provide a DNS solution yourself (or in case +you are not running Designate when we do). + +If your server supports nsupdate, we will use it to add the necessary records. + +TODO(shadower): describe how to build a sample DNS server and how to configure +our playbooks for nsupdate. + + +## Installation + +There are four main parts to the installation: + +1. [Preparing Ansible and dependencies](#1-preparing-ansible-and-dependencies) +2. [Configuring the desired OpenStack environment and OpenShift cluster](#2-configuring-the-openstack-environment-and-openshift-cluster) +3. [Creating the OpenStack resources (VMs, networking, etc.)](#3-creating-the-openstack-resources-vms-networking-etc) +4. [Installing OpenShift](#4-installing-openshift) + +This guide is going to install [OpenShift Origin][origin] +with [CentOS 7][centos7] images with minimal customisation. + +We will create the VMs for running OpenShift, in a new Neutron +network, assign Floating IP addresses and configure DNS. + +The OpenShift cluster will have a single Master node that will run +`etcd`, a single Infra node and two App nodes. + +You can look at +the [Advanced Configuration page][advanced-configuration] for +additional options. + + + +### 1. Preparing Ansible and dependencies + +First, you need to select where to run [Ansible][ansible] from (the +*Ansible host*). This can be the computer you read this guide on or an +OpenStack VM you'll create specifically for this purpose. + +We will use +a +[Docker image that has all the dependencies installed][control-host-image] to +make things easier. If you don't want to use Docker, take a look at +the [Ansible host dependencies][ansible-dependencies] and make sure +they're installed. + +Your *Ansible host* needs to have the following: + +1. Docker +2. `keystonerc` file with your OpenStack credentials +3. SSH private key for logging in to your OpenShift nodes + +Assuming your private key is `~/.ssh/id_rsa` and `keystonerc` in your +current directory: + +```bash +$ sudo docker run -it -v ~/.ssh:/mnt/.ssh:Z \ + -v $PWD/keystonerc:/root/.config/openstack/keystonerc.sh:Z \ + redhatcop/control-host-openstack bash +``` + +This will create the container, add your SSH key and source your +`keystonerc`. It should be set up for the installation. + +You can verify that everything is in order: + + +```bash +$ less .ssh/id_rsa +$ ansible --version +$ openstack image list +``` + + +### 2. Configuring the OpenStack Environment and OpenShift Cluster + +The configuration is all done in an Ansible inventory directory. We +will clone the [openshift-ansible][openshift-ansible] repository and set +things up for a minimal installation. + + +``` +$ git clone https://github.com/openshift/openshift-ansible +$ cp -r openshift-ansible/playbooks/openstack/sample-inventory/ inventory +``` + +If you're testing multiple configurations, you can have multiple +inventories and switch between them. + +#### OpenStack Configuration + +The OpenStack configuration is in `inventory/group_vars/all.yml`. + +Open the file and plug in the image, flavor and network configuration +corresponding to your OpenStack installation. + +```bash +$ vi inventory/group_vars/all.yml +``` + +1. Set the `openshift_openstack_keypair_name` to your OpenStack keypair name. + - See `openstack keypair list` to find the keypairs registered with + OpenShift. + - This must correspond to your private SSH key in `~/.ssh/id_rsa` +2. Set the `openshift_openstack_external_network_name` to the floating IP + network of your openstack. + - See `openstack network list` for the list of networks. + - It's often called `public`, `external` or `ext-net`. +3. Set the `openshift_openstack_default_image_name` to the image you want your + OpenShift VMs to run. + - See `openstack image list` for the list of available images. +4. Set the `openshift_openstack_default_flavor` to the flavor you want your + OpenShift VMs to use. + - See `openstack flavor list` for the list of available flavors. +5. Set the `openshift_openstack_dns_nameservers` to the list of the IP addresses + of the DNS servers used for the **private** address resolution. + +**NOTE ON DNS**: at minimum, the OpenShift nodes need to be able to access each +other by their hostname. OpenStack doesn't provide this by default, so you +need to provide a DNS server. Put the address of that DNS server in +`openshift_openstack_dns_nameservers` variable. + + + + +#### OpenShift configuration + +The OpenShift configuration is in `inventory/group_vars/OSEv3.yml`. + +The default options will mostly work, but unless you used the large +flavors for a production-ready environment, openshift-ansible's +hardware check will fail. + +Let's disable those checks by putting this in +`inventory/group_vars/OSEv3.yml`: + +```yaml +openshift_disable_check: disk_availability,memory_availability +``` + +**NOTE**: The default authentication method will allow **any username +and password** in! If you're running this in a public place, you need +to set up access control. + +Feel free to look at +the [Sample OpenShift Inventory][sample-openshift-inventory] and +the [advanced configuration][advanced-configuration]. + + +### 3. Creating the OpenStack resources (VMs, networking, etc.) + +We provide an `ansible.cfg` file which has some useful defaults -- you should +copy it to the directory you're going to run `ansible-playbook` from. + +```bash +$ cp openshift-ansible/ansible.cfg ansible.cfg +``` + +Then run the provisioning playbook -- this will create the OpenStack +resources: + +```bash +$ ansible-playbook --user openshift -i inventory openshift-ansible/playbooks/openstack/openshift-cluster/provision.yaml +``` + +If you're using multiple inventories, make sure you pass the path to +the right one to `-i`. + +If your SSH private key is not in `~/.ssh/id_rsa` use the `--private-key` +option to specify the correct path. + + +### 4. Installing OpenShift + +Run the `byo/config.yml` playbook on top of the OpenStack nodes we have +prepared. + +```bash +$ ansible-playbook -i inventory openshift-ansible/playbooks/byo/config.yml +``` + + +### Next Steps + +And that's it! You should have a small but functional OpenShift +cluster now. + +Take a look at [how to access the cluster][accessing-openshift] +and [how to remove it][uninstall-openshift] as well as the more +advanced configuration: + +* [Accessing the OpenShift cluster][accessing-openshift] +* [Removing the OpenShift cluster][uninstall-openshift] +* Set Up Authentication (TODO) +* [Multiple Masters with a load balancer][loadbalancer] +* [External Dns][external-dns] +* Multiple Clusters (TODO) +* [Cinder Registry][cinder-registry] +* [Bastion Node][bastion] + + +[ansible]: https://www.ansible.com/ +[openshift-ansible]: https://github.com/openshift/openshift-ansible +[devstack]: https://docs.openstack.org/devstack/ +[tripleo]: http://tripleo.org/ +[ansible-dependencies]: ./advanced-configuration.md#dependencies-for-localhost-ansible-controladmin-node +[control-host-image]: https://hub.docker.com/r/redhatcop/control-host-openstack/ +[hardware-requirements]: https://docs.openshift.org/latest/install_config/install/prerequisites.html#hardware +[origin]: https://www.openshift.org/ +[centos7]: https://www.centos.org/ +[sample-openshift-inventory]: https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.example +[advanced-configuration]: ./advanced-configuration.md +[accessing-openshift]: ./advanced-configuration.md#accessing-the-openshift-cluster +[uninstall-openshift]: ./advanced-configuration.md#removing-the-openshift-cluster +[loadbalancer]: ./advanced-configuration.md#multi-master-configuration +[external-dns]: ./advanced-configuration.md#dns-configuration-variables +[cinder-registry]: ./advanced-configuration.md#creating-and-using-a-cinder-volume-for-the-openshift-registry +[bastion]: ./advanced-configuration.md#configure-static-inventory-and-access-via-a-bastion-node diff --git a/playbooks/openstack/advanced-configuration.md b/playbooks/openstack/advanced-configuration.md new file mode 100644 index 000000000..90cc20b98 --- /dev/null +++ b/playbooks/openstack/advanced-configuration.md @@ -0,0 +1,772 @@ +## Dependencies for localhost (ansible control/admin node) + +* [Ansible 2.3](https://pypi.python.org/pypi/ansible) +* [Ansible-galaxy](https://pypi.python.org/pypi/ansible-galaxy-local-deps) +* [jinja2](http://jinja.pocoo.org/docs/2.9/) +* [shade](https://pypi.python.org/pypi/shade) +* python-jmespath / [jmespath](https://pypi.python.org/pypi/jmespath) +* python-dns / [dnspython](https://pypi.python.org/pypi/dnspython) +* Become (sudo) is not required. + +**NOTE**: You can use a Docker image with all dependencies set up. +Find more in the [Deployment section](#deployment). + +### Optional Dependencies for localhost +**Note**: When using rhel images, `rhel-7-server-openstack-10-rpms` repository is required in order to install these packages. + +* `python-openstackclient` +* `python-heatclient` + +## Dependencies for OpenStack hosted cluster nodes (servers) + +There are no additional dependencies for the cluster nodes. Required +configuration steps are done by Heat given a specific user data config +that normally should not be changed. + +## Required galaxy modules + +In order to pull in external dependencies for DNS configuration steps, +the following commads need to be executed: + + ansible-galaxy install \ + -r openshift-ansible-contrib/playbooks/provisioning/openstack/galaxy-requirements.yaml \ + -p openshift-ansible-contrib/roles + +Alternatively you can install directly from github: + + ansible-galaxy install git+https://github.com/redhat-cop/infra-ansible,master \ + -p openshift-ansible-contrib/roles + +Notes: +* This assumes we're in the directory that contains the clonned +openshift-ansible-contrib repo in its root path. +* When trying to install a different version, the previous one must be removed first +(`infra-ansible` directory from [roles](https://github.com/openshift/openshift-ansible-contrib/tree/master/roles)). +Otherwise, even if there are differences between the two versions, installation of the newer version is skipped. + + +## Accessing the OpenShift Cluster + +### Use the Cluster DNS + +In addition to the OpenShift nodes, we created a DNS server with all +the necessary entries. We will configure your *Ansible host* to use +this new DNS and talk to the deployed OpenShift. + +First, get the DNS IP address: + +```bash +$ openstack server show dns-0.openshift.example.com --format value --column addresses +openshift-ansible-openshift.example.com-net=192.168.99.11, 10.40.128.129 +``` + +Note the floating IP address (it's `10.40.128.129` in this case) -- if +you're not sure, try pinging them both -- it's the one that responds +to pings. + +Next, edit your `/etc/resolv.conf` as root and put `nameserver DNS_IP` as your +**first entry**. + +If your `/etc/resolv.conf` currently looks like this: + +``` +; generated by /usr/sbin/dhclient-script +search openstacklocal +nameserver 192.168.0.3 +nameserver 192.168.0.2 +``` + +Change it to this: + +``` +; generated by /usr/sbin/dhclient-script +search openstacklocal +nameserver 10.40.128.129 +nameserver 192.168.0.3 +nameserver 192.168.0.2 +``` + +### Get the `oc` Client + +**NOTE**: You can skip this section if you're using the Docker image +-- it already has the `oc` binary. + +You need to download the OpenShift command line client (called `oc`). +You can download and extract `openshift-origin-client-tools` from the +OpenShift release page: + +https://github.com/openshift/origin/releases/latest/ + +Or you can now copy it from the master node: + + $ ansible -i inventory masters[0] -m fetch -a "src=/bin/oc dest=oc" + +Either way, find the `oc` binary and put it in your `PATH`. + + +### Logging in Using the Command Line + + +``` +oc login --insecure-skip-tls-verify=true https://master-0.openshift.example.com:8443 -u user -p password +oc new-project test +oc new-app --template=cakephp-mysql-example +oc status -v +curl http://cakephp-mysql-example-test.apps.openshift.example.com +``` + +This will trigger an image build. You can run `oc logs -f +bc/cakephp-mysql-example` to follow its progress. + +Wait until the build has finished and both pods are deployed and running: + +``` +$ oc status -v +In project test on server https://master-0.openshift.example.com:8443 + +http://cakephp-mysql-example-test.apps.openshift.example.com (svc/cakephp-mysql-example) + dc/cakephp-mysql-example deploys istag/cakephp-mysql-example:latest <- + bc/cakephp-mysql-example source builds https://github.com/openshift/cakephp-ex.git on openshift/php:7.0 + deployment #1 deployed about a minute ago - 1 pod + +svc/mysql - 172.30.144.36:3306 + dc/mysql deploys openshift/mysql:5.7 + deployment #1 deployed 3 minutes ago - 1 pod + +Info: + * pod/cakephp-mysql-example-1-build has no liveness probe to verify pods are still running. + try: oc set probe pod/cakephp-mysql-example-1-build --liveness ... +View details with 'oc describe <resource>/<name>' or list everything with 'oc get all'. + +``` + +You can now look at the deployed app using its route: + +``` +$ curl http://cakephp-mysql-example-test.apps.openshift.example.com +``` + +Its `title` should say: "Welcome to OpenShift". + + +### Accessing the UI + +You can also access the OpenShift cluster with a web browser by going to: + +https://master-0.openshift.example.com:8443 + +Note that for this to work, the OpenShift nodes must be accessible +from your computer and it's DNS configuration must use the cruster's +DNS. + + +## Removing the OpenShift Cluster + +Everything in the cluster is contained within a Heat stack. To +completely remove the cluster and all the related OpenStack resources, +run this command: + +```bash +openstack stack delete --wait --yes openshift.example.com +``` + + +## DNS configuration variables + +Pay special attention to the values in the first paragraph -- these +will depend on your OpenStack environment. + +Note that the provsisioning playbooks update the original Neutron subnet +created with the Heat stack to point to the configured DNS servers. +So the provisioned cluster nodes will start using those natively as +default nameservers. Technically, this allows to deploy OpenShift clusters +without dnsmasq proxies. + +The `openshift_openstack_clusterid` and `openshift_openstack_public_dns_domain` will form the cluster's DNS domain all +your servers will be under. With the default values, this will be +`openshift.example.com`. For workloads, the default subdomain is 'apps'. +That sudomain can be set as well by the `openshift_openstack_app_subdomain` variable in +the inventory. + +The `openstack_<role name>_hostname` is a set of variables used for customising +hostnames of servers with a given role. When such a variable stays commented, +default hostname (usually the role name) is used. + +The `openshift_openstack_dns_nameservers` is a list of DNS servers accessible from all +the created Nova servers. These will provide the internal name resolution for +your OpenShift nodes (as well as upstream name resolution for installing +packages, etc.). + +The `openshift_use_dnsmasq` controls either dnsmasq is deployed or not. +By default, dnsmasq is deployed and comes as the hosts' /etc/resolv.conf file +first nameserver entry that points to the local host instance of the dnsmasq +daemon that in turn proxies DNS requests to the authoritative DNS server. +When Network Manager is enabled for provisioned cluster nodes, which is +normally the case, you should not change the defaults and always deploy dnsmasq. + +`openshift_openstack_external_nsupdate_keys` describes an external authoritative DNS server(s) +processing dynamic records updates in the public and private cluster views: + + openshift_openstack_external_nsupdate_keys: + public: + key_secret: <some nsupdate key> + key_algorithm: 'hmac-md5' + key_name: 'update-key' + server: <public DNS server IP> + private: + key_secret: <some nsupdate key 2> + key_algorithm: 'hmac-sha256' + server: <public or private DNS server IP> + +Here, for the public view section, we specified another key algorithm and +optional `key_name`, which normally defaults to the cluster's DNS domain. +This just illustrates a compatibility mode with a DNS service deployed +by OpenShift on OSP10 reference architecture, and used in a mixed mode with +another external DNS server. + +Another example defines an external DNS server for the public view +additionally to the in-stack DNS server used for the private view only: + + openshift_openstack_external_nsupdate_keys: + public: + key_secret: <some nsupdate key> + key_algorithm: 'hmac-sha256' + server: <public DNS server IP> + +Here, updates matching the public view will be hitting the given public +server IP. While updates matching the private view will be sent to the +auto evaluated in-stack DNS server's **public** IP. + +Note, for the in-stack DNS server, private view updates may be sent only +via the public IP of the server. You can not send updates via the private +IP yet. This forces the in-stack private server to have a floating IP. +See also the [security notes](#security-notes) + +## Flannel networking + +In order to configure the +[flannel networking](https://docs.openshift.com/container-platform/3.6/install_config/configuring_sdn.html#using-flannel), +uncomment and adjust the appropriate `inventory/group_vars/OSEv3.yml` group vars. +Note that the `osm_cluster_network_cidr` must not overlap with the default +Docker bridge subnet of 172.17.0.0/16. Or you should change the docker0 default +CIDR range otherwise. For example, by adding `--bip=192.168.2.1/24` to +`DOCKER_NETWORK_OPTIONS` located in `/etc/sysconfig/docker-network`. + +Also note that the flannel network will be provisioned on a separate isolated Neutron +subnet defined from `osm_cluster_network_cidr` and having ports security disabled. +Use the `openstack_private_data_network_name` variable to define the network +name for the heat stack resource. + +After the cluster deployment done, you should run an additional post installation +step for flannel and docker iptables configuration: + + ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/post-install.yml + +## Other configuration variables + +`openshift_openstack_keypair_name` is a Nova keypair - you can see your +keypairs with `openstack keypair list`. It must correspond to the +private SSH key Ansible will use to log into the created VMs. This is +`~/.ssh/id_rsa` by default, but you can use a different key by passing +`--private-key` to `ansible-playbook`. + +`openshift_openstack_default_image_name` is the default name of the Glance image the +servers will use. You can see your images with `openstack image list`. +In order to set a different image for a role, uncomment the line with the +corresponding variable (e.g. `openshift_openstack_lb_image_name` for load balancer) and +set its value to another available image name. `openshift_openstack_default_image_name` +must stay defined as it is used as a default value for the rest of the roles. + +`openshift_openstack_default_flavor` is the default Nova flavor the servers will use. +You can see your flavors with `openstack flavor list`. +In order to set a different flavor for a role, uncomment the line with the +corresponding variable (e.g. `openshift_openstack_lb_flavor` for load balancer) and +set its value to another available flavor. `openshift_openstack_default_flavor` must +stay defined as it is used as a default value for the rest of the roles. + +`openshift_openstack_external_network_name` is the name of the Neutron network +providing external connectivity. It is often called `public`, +`external` or `ext-net`. You can see your networks with `openstack +network list`. + +`openshift_openstack_private_network_name` is the name of the private Neutron network +providing admin/control access for ansible. It can be merged with other +cluster networks, there are no special requirements for networking. + +The `openshift_openstack_num_masters`, `openshift_openstack_num_infra` and +`openshift_openstack_num_nodes` values specify the number of Master, Infra and +App nodes to create. + +The `openshift_openstack_cluster_node_labels` defines custom labels for your openshift +cluster node groups. It currently supports app and infra node groups. +The default value of this variable sets `region: primary` to app nodes and +`region: infra` to infra nodes. +An example of setting a customised label: +``` +openshift_openstack_cluster_node_labels: + app: + mylabel: myvalue +``` + +The `openshift_openstack_nodes_to_remove` allows you to specify the numerical indexes +of App nodes that should be removed; for example, ['0', '2'], + +The `docker_volume_size` is the default Docker volume size the servers will use. +In order to set a different volume size for a role, +uncomment the line with the corresponding variable (e. g. `docker_master_volume_size` +for master) and change its value. `docker_volume_size` must stay defined as it is +used as a default value for some of the servers (master, infra, app node). +The rest of the roles (etcd, load balancer, dns) have their defaults hard-coded. + +**Note**: If the `openshift_openstack_ephemeral_volumes` is set to `true`, the `*_volume_size` variables +will be ignored and the deployment will not create any cinder volumes. + +The `openshift_openstack_flat_secgrp`, controls Neutron security groups creation for Heat +stacks. Set it to true, if you experience issues with sec group rules +quotas. It trades security for number of rules, by sharing the same set +of firewall rules for master, node, etcd and infra nodes. + +The `openshift_openstack_required_packages` variable also provides a list of the additional +prerequisite packages to be installed before to deploy an OpenShift cluster. +Those are ignored though, if the `manage_packages: False`. + +The `openstack_inventory` controls either a static inventory will be created after the +cluster nodes provisioned on OpenStack cloud. Note, the fully dynamic inventory +is yet to be supported, so the static inventory will be created anyway. + +The `openstack_inventory_path` points the directory to host the generated static inventory. +It should point to the copied example inventory directory, otherwise ti creates +a new one for you. + +## Multi-master configuration + +Please refer to the official documentation for the +[multi-master setup](https://docs.openshift.com/container-platform/3.6/install_config/install/advanced_install.html#multiple-masters) +and define the corresponding [inventory +variables](https://docs.openshift.com/container-platform/3.6/install_config/install/advanced_install.html#configuring-cluster-variables) +in `inventory/group_vars/OSEv3.yml`. For example, given a load balancer node +under the ansible group named `ext_lb`: + + openshift_master_cluster_method: native + openshift_master_cluster_hostname: "{{ groups.ext_lb.0 }}" + openshift_master_cluster_public_hostname: "{{ groups.ext_lb.0 }}" + +## Provider Network + +Normally, the playbooks create a new Neutron network and subnet and attach +floating IP addresses to each node. If you have a provider network set up, this +is all unnecessary as you can just access servers that are placed in the +provider network directly. + +To use a provider network, set its name in `openshift_openstack_provider_network_name` in +`inventory/group_vars/all.yml`. + +If you set the provider network name, the `openshift_openstack_external_network_name` and +`openshift_openstack_private_network_name` fields will be ignored. + +**NOTE**: this will not update the nodes' DNS, so running openshift-ansible +right after provisioning will fail (unless you're using an external DNS server +your provider network knows about). You must make sure your nodes are able to +resolve each other by name. + +## Security notes + +Configure required `*_ingress_cidr` variables to restrict public access +to provisioned servers from your laptop (a /32 notation should be used) +or your trusted network. The most important is the `openshift_openstack_node_ingress_cidr` +that restricts public access to the deployed DNS server and cluster +nodes' ephemeral ports range. + +Note, the command ``curl https://api.ipify.org`` helps fiding an external +IP address of your box (the ansible admin node). + +There is also the `manage_packages` variable (defaults to True) you +may want to turn off in order to speed up the provisioning tasks. This may +be the case for development environments. When turned off, the servers will +be provisioned omitting the ``yum update`` command. This brings security +implications though, and is not recommended for production deployments. + +### DNS servers security options + +Aside from `openshift_openstack_node_ingress_cidr` restricting public access to in-stack DNS +servers, there are following (bind/named specific) DNS security +options available: + + named_public_recursion: 'no' + named_private_recursion: 'yes' + +External DNS servers, which is not included in the 'dns' hosts group, +are not managed. It is up to you to configure such ones. + +## Configure the OpenShift parameters + +Finally, you need to update the DNS entry in +`inventory/group_vars/OSEv3.yml` (look at +`openshift_master_default_subdomain`). + +In addition, this is the place where you can customise your OpenShift +installation for example by specifying the authentication. + +The full list of options is available in this sample inventory: + +https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.ose.example + +Note, that in order to deploy OpenShift origin, you should update the following +variables for the `inventory/group_vars/OSEv3.yml`, `all.yml`: + + deployment_type: origin + openshift_deployment_type: "{{ deployment_type }}" + + +## Setting a custom entrypoint + +In order to set a custom entrypoint, update `openshift_master_cluster_public_hostname` + + openshift_master_cluster_public_hostname: api.openshift.example.com + +Note than an empty hostname does not work, so if your domain is `openshift.example.com`, +you cannot set this value to simply `openshift.example.com`. + +## Creating and using a Cinder volume for the OpenShift registry + +You can optionally have the playbooks create a Cinder volume and set +it up as the OpenShift hosted registry. + +To do that you need specify the desired Cinder volume name and size in +Gigabytes in `inventory/group_vars/all.yml`: + + openshift_openstack_cinder_hosted_registry_name: cinder-registry + openshift_openstack_cinder_hosted_registry_size_gb: 10 + +With this, the playbooks will create the volume and set up its +filesystem. If there is an existing volume of the same name, we will +use it but keep the existing data on it. + +To use the volume for the registry, you must first configure it with +the OpenStack credentials by putting the following to `OSEv3.yml`: + + openshift_cloudprovider_openstack_username: "{{ lookup('env','OS_USERNAME') }}" + openshift_cloudprovider_openstack_password: "{{ lookup('env','OS_PASSWORD') }}" + openshift_cloudprovider_openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}" + openshift_cloudprovider_openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME') }}" + +This will use the credentials from your shell environment. If you want +to enter them explicitly, you can. You can also use credentials +different from the provisioning ones (say for quota or access control +reasons). + +**NOTE**: If you're testing this on (DevStack)[devstack], you must +explicitly set your Keystone API version to v2 (e.g. +`OS_AUTH_URL=http://10.34.37.47/identity/v2.0`) instead of the default +value provided by `openrc`. You may also encounter the following issue +with Cinder: + +https://github.com/kubernetes/kubernetes/issues/50461 + +You can read the (OpenShift documentation on configuring +OpenStack)[openstack] for more information. + +[devstack]: https://docs.openstack.org/devstack/latest/ +[openstack]: https://docs.openshift.org/latest/install_config/configuring_openstack.html + + +Next, we need to instruct OpenShift to use the Cinder volume for it's +registry. Again in `OSEv3.yml`: + + #openshift_hosted_registry_storage_kind: openstack + #openshift_hosted_registry_storage_access_modes: ['ReadWriteOnce'] + #openshift_hosted_registry_storage_openstack_filesystem: xfs + +The filesystem value here will be used in the initial formatting of +the volume. + +If you're using the dynamic inventory, you must uncomment these two values as +well: + + #openshift_hosted_registry_storage_openstack_volumeID: "{{ lookup('os_cinder', openshift_openstack_cinder_hosted_registry_name).id }}" + #openshift_hosted_registry_storage_volume_size: "{{ openshift_openstack_cinder_hosted_registry_size_gb }}Gi" + +But note that they use the `os_cinder` lookup plugin we provide, so you must +tell Ansible where to find it either in `ansible.cfg` (the one we provide is +configured properly) or by exporting the +`ANSIBLE_LOOKUP_PLUGINS=openshift-ansible-contrib/lookup_plugins` environment +variable. + + + +## Use an existing Cinder volume for the OpenShift registry + +You can also use a pre-existing Cinder volume for the storage of your +OpenShift registry. + +To do that, you need to have a Cinder volume. You can create one by +running: + + openstack volume create --size <volume size in gb> <volume name> + +The volume needs to have a file system created before you put it to +use. + +As with the automatically-created volume, you have to set up the +OpenStack credentials in `inventory/group_vars/OSEv3.yml` as well as +registry values: + + #openshift_hosted_registry_storage_kind: openstack + #openshift_hosted_registry_storage_access_modes: ['ReadWriteOnce'] + #openshift_hosted_registry_storage_openstack_filesystem: xfs + #openshift_hosted_registry_storage_openstack_volumeID: e0ba2d73-d2f9-4514-a3b2-a0ced507fa05 + #openshift_hosted_registry_storage_volume_size: 10Gi + +Note the `openshift_hosted_registry_storage_openstack_volumeID` and +`openshift_hosted_registry_storage_volume_size` values: these need to +be added in addition to the previous variables. + +The **Cinder volume ID**, **filesystem** and **volume size** variables +must correspond to the values in your volume. The volume ID must be +the **UUID** of the Cinder volume, *not its name*. + +We can do formate the volume for you if you ask for it in +`inventory/group_vars/all.yml`: + + openshift_openstack_prepare_and_format_registry_volume: true + +**NOTE:** doing so **will destroy any data that's currently on the volume**! + +You can also run the registry setup playbook directly: + + ansible-playbook -i inventory playbooks/provisioning/openstack/prepare-and-format-cinder-volume.yaml + +(the provisioning phase must be completed, first) + + + +## Configure static inventory and access via a bastion node + +Example inventory variables: + + openshift_openstack_use_bastion: true + openshift_openstack_bastion_ingress_cidr: "{{openshift_openstack_subnet_prefix}}.0/24" + openstack_private_ssh_key: ~/.ssh/id_rsa + openstack_inventory: static + openstack_inventory_path: ../../../../inventory + openstack_ssh_config_path: /tmp/ssh.config.openshift.ansible.openshift.example.com + +The `openshift_openstack_subnet_prefix` is the openstack private network for your cluster. +And the `openshift_openstack_bastion_ingress_cidr` defines accepted range for SSH connections to nodes +additionally to the `openshift_openstack_ssh_ingress_cidr`` (see the security notes above). + +The SSH config will be stored on the ansible control node by the +gitven path. Ansible uses it automatically. To access the cluster nodes with +that ssh config, use the `-F` prefix, f.e.: + + ssh -F /tmp/ssh.config.openshift.ansible.openshift.example.com master-0.openshift.example.com echo OK + +Note, relative paths will not work for the `openstack_ssh_config_path`, but it +works for the `openstack_private_ssh_key` and `openstack_inventory_path`. In this +guide, the latter points to the current directory, where you run ansible commands +from. + +To verify nodes connectivity, use the command: + + ansible -v -i inventory/hosts -m ping all + +If something is broken, double-check the inventory variables, paths and the +generated `<openstack_inventory_path>/hosts` and `openstack_ssh_config_path` files. + +The `inventory: dynamic` can be used instead to access cluster nodes directly via +floating IPs. In this mode you can not use a bastion node and should specify +the dynamic inventory file in your ansible commands , like `-i openstack.py`. + +## Using Docker on the Ansible host + +If you don't want to worry about the dependencies, you can use the +[OpenStack Control Host image][control-host-image]. + +[control-host-image]: https://hub.docker.com/r/redhatcop/control-host-openstack/ + +It has all the dependencies installed, but you'll need to map your +code and credentials to it. Assuming your SSH keys live in `~/.ssh` +and everything else is in your current directory (i.e. `ansible.cfg`, +`keystonerc`, `inventory`, `openshift-ansible`, +`openshift-ansible-contrib`), this is how you run the deployment: + + sudo docker run -it -v ~/.ssh:/mnt/.ssh:Z \ + -v $PWD:/root/openshift:Z \ + -v $PWD/keystonerc:/root/.config/openstack/keystonerc.sh:Z \ + redhatcop/control-host-openstack bash + +(feel free to replace `$PWD` with an actual path to your inventory and +checkouts, but note that relative paths don't work) + +The first run may take a few minutes while the image is being +downloaded. After that, you'll be inside the container and you can run +the playbooks: + + cd openshift + ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml + + +### Run the playbook + +Assuming your OpenStack (Keystone) credentials are in the `keystonerc` +this is how you stat the provisioning process from your ansible control node: + + . keystonerc + ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml + +Note, here you start with an empty inventory. The static inventory will be populated +with data so you can omit providing additional arguments for future ansible commands. + +If bastion enabled, the generates SSH config must be applied for ansible. +Otherwise, it is auto included by the previous step. In order to execute it +as a separate playbook, use the following command: + + ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/post-provision-openstack.yml + +The first infra node then becomes a bastion node as well and proxies access +for future ansible commands. The post-provision step also configures Satellite, +if requested, and DNS server, and ensures other OpenShift requirements to be met. + + +## Running Custom Post-Provision Actions + +A custom playbook can be run like this: + +``` +ansible-playbook --private-key ~/.ssh/openshift -i inventory/ openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/custom-playbook.yml +``` + +If you'd like to limit the run to one particular host, you can do so as follows: + +``` +ansible-playbook --private-key ~/.ssh/openshift -i inventory/ openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/custom-playbook.yml -l app-node-0.openshift.example.com +``` + +You can also create your own custom playbook. Here are a few examples: + +### Adding additional YUM repositories + +``` +--- +- hosts: app + tasks: + + # enable EPL + - name: Add repository + yum_repository: + name: epel + description: EPEL YUM repo + baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/ +``` + +This example runs against app nodes. The list of options include: + + - cluster_hosts (all hosts: app, infra, masters, dns, lb) + - OSEv3 (app, infra, masters) + - app + - dns + - masters + - infra_hosts + +### Attaching additional RHN pools + +``` +--- +- hosts: cluster_hosts + tasks: + - name: Attach additional RHN pool + become: true + command: "/usr/bin/subscription-manager attach --pool=<pool ID>" + register: attach_rhn_pool_result + until: attach_rhn_pool_result.rc == 0 + retries: 10 + delay: 1 +``` + +This playbook runs against all cluster nodes. In order to help prevent slow connectivity +problems, the task is retried 10 times in case of initial failure. +Note that in order for this example to work in your deployment, your servers must use the RHEL image. + +### Adding extra Docker registry URLs + +This playbook is located in the [custom-actions](https://github.com/openshift/openshift-ansible-contrib/tree/master/playbooks/provisioning/openstack/custom-actions) directory. + +It adds URLs passed as arguments to the docker configuration program. +Going into more detail, the configuration program (which is in the YAML format) is loaded into an ansible variable +([lines 27-30](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml#L27-L30)) +and in its structure, `registries` and `insecure_registries` sections are expanded with the newly added items +([lines 56-76](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml#L56-L76)). +The new content is then saved into the original file +([lines 78-82](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml#L78-L82)) +and docker is restarted. + +Example usage: +``` +ansible-playbook -i <inventory> openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml --extra-vars '{"registries": "reg1", "insecure_registries": ["ins_reg1","ins_reg2"]}' +``` + +### Adding extra CAs to the trust chain + +This playbook is also located in the [custom-actions](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions) directory. +It copies passed CAs to the trust chain location and updates the trust chain on each selected host. + +Example usage: +``` +ansible-playbook -i <inventory> openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions/add-cas.yml --extra-vars '{"ca_files": [<absolute path to ca1 file>, <absolute path to ca2 file>]}' +``` + +Please consider contributing your custom playbook back to openshift-ansible-contrib! + +A library of custom post-provision actions exists in `openshift-ansible-contrib/playbooks/provisioning/openstack/custom-actions`. Playbooks include: + +* [add-yum-repos.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-yum-repos.yml): adds a list of custom yum repositories to every node in the cluster +* [add-rhn-pools.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-rhn-pools.yml): attaches a list of additional RHN pools to every node in the cluster +* [add-docker-registry.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-docker-registry.yml): adds a list of docker registries to the docker configuration on every node in the cluster +* [add-cas.yml](https://github.com/openshift/openshift-ansible-contrib/blob/master/playbooks/provisioning/openstack/custom-actions/add-rhn-pools.yml): adds a list of CAs to the trust chain on every node in the cluster + + +## Install OpenShift + +Once it succeeds, you can install openshift by running: + + ansible-playbook openshift-ansible/playbooks/byo/config.yml + +## Access UI + +OpenShift UI may be accessed via the 1st master node FQDN, port 8443. + +When using a bastion, you may want to make an SSH tunnel from your control node +to access UI on the `https://localhost:8443`, with this inventory variable: + + openshift_openstack_ui_ssh_tunnel: True + +Note, this requires sudo rights on the ansible control node and an absolute path +for the `openstack_private_ssh_key`. You should also update the control node's +`/etc/hosts`: + + 127.0.0.1 master-0.openshift.example.com + +In order to access UI, the ssh-tunnel service will be created and started on the +control node. Make sure to remove these changes and the service manually, when not +needed anymore. + +## Scale Deployment up/down + +### Scaling up + +One can scale up the number of application nodes by executing the ansible playbook +`openshift-ansible-contrib/playbooks/provisioning/openstack/scale-up.yaml`. +This process can be done even if there is currently no deployment available. +The `increment_by` variable is used to specify by how much the deployment should +be scaled up (if none exists, it serves as a target number of application nodes). +The path to `openshift-ansible` directory can be customised by the `openshift_ansible_dir` +variable. Its value must be an absolute path to `openshift-ansible` and it cannot +contain the '/' symbol at the end. + +Usage: + +``` +ansible-playbook -i <path to inventory> openshift-ansible-contrib/playbooks/provisioning/openstack/scale-up.yaml` [-e increment_by=<number>] [-e openshift_ansible_dir=<path to openshift-ansible>] +``` + +Note: This playbook works only without a bastion node (`openshift_openstack_use_bastion: False`). diff --git a/playbooks/openstack/openshift-cluster/install.yml b/playbooks/openstack/openshift-cluster/install.yml new file mode 100644 index 000000000..1c4f609e3 --- /dev/null +++ b/playbooks/openstack/openshift-cluster/install.yml @@ -0,0 +1,15 @@ +--- +# NOTE(shadower): the AWS playbook builds an in-memory inventory of +# all the EC2 instances here. We don't need to as that's done by the +# dynamic inventory. + +# TODO(shadower): the AWS playbook sets the +# `openshift_master_cluster_hostname` and `osm_custom_cors_origins` +# values here. We do it in the OSEv3 group vars. Do we need to add +# some logic here? + +- name: run the initialization + include: ../../init/main.yml + +- name: run the config + include: ../../common/openshift-cluster/config.yml diff --git a/playbooks/openstack/openshift-cluster/prerequisites.yml b/playbooks/openstack/openshift-cluster/prerequisites.yml new file mode 100644 index 000000000..0356b37dd --- /dev/null +++ b/playbooks/openstack/openshift-cluster/prerequisites.yml @@ -0,0 +1,12 @@ +--- +- hosts: localhost + tasks: + - name: Check dependencies and OpenStack prerequisites + include_role: + name: openshift_openstack + tasks_from: check-prerequisites.yml + + - name: Check network configuration + include_role: + name: openshift_openstack + tasks_from: net_vars_check.yaml diff --git a/playbooks/openstack/openshift-cluster/provision.yml b/playbooks/openstack/openshift-cluster/provision.yml new file mode 100644 index 000000000..36d8c8215 --- /dev/null +++ b/playbooks/openstack/openshift-cluster/provision.yml @@ -0,0 +1,59 @@ +--- +- name: Create the OpenStack resources for cluster installation + hosts: localhost + tasks: + - name: provision cluster + include_role: + name: openshift_openstack + tasks_from: provision.yml + + +# NOTE(shadower): Bring in the host groups: +- name: evaluate groups + include: ../../init/evaluate_groups.yml + + +- name: Wait for the nodes and gather their facts + hosts: oo_all_hosts + become: yes + # NOTE: The nodes may not be up yet, don't gather facts here. + # They'll be collected after `wait_for_connection`. + gather_facts: no + tasks: + - name: Wait for the the nodes to come up + wait_for_connection: + + - name: Gather facts for the new nodes + setup: + + +# NOTE(shadower): the (internal) DNS must be functional at this point!! +# That will have happened in provision.yml if nsupdate was configured. + +# TODO(shadower): consider splitting this up so people can stop here +# and configure their DNS if they have to. +- name: Populate the DNS entries + hosts: localhost + tasks: + - name: Populate DNS entries + include_role: + name: openshift_openstack + tasks_from: populate-dns.yml + when: + - openshift_openstack_external_nsupdate_keys is defined + - openshift_openstack_external_nsupdate_keys.private is defined or openshift_openstack_external_nsupdate_keys.public is defined + +- name: Prepare the Nodes in the cluster for installation + hosts: oo_all_hosts + become: yes + gather_facts: yes + tasks: + - name: Install dependencies + include_role: + name: openshift_openstack + tasks_from: node-packages.yml + + - name: Configure Node + include_role: + name: openshift_openstack + tasks_from: node-configuration.yml diff --git a/playbooks/openstack/openshift-cluster/provision_install.yml b/playbooks/openstack/openshift-cluster/provision_install.yml new file mode 100644 index 000000000..5d88c105f --- /dev/null +++ b/playbooks/openstack/openshift-cluster/provision_install.yml @@ -0,0 +1,9 @@ +--- +- name: Check the prerequisites for cluster provisioning in OpenStack + include: prerequisites.yml + +- name: Include the provision.yml playbook to create cluster + include: provision.yml + +- name: Include the install.yml playbook to install cluster + include: install.yml diff --git a/playbooks/openstack/openshift-cluster/roles b/playbooks/openstack/openshift-cluster/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/playbooks/openstack/openshift-cluster/roles @@ -0,0 +1 @@ +../../../roles/
\ No newline at end of file diff --git a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml new file mode 100644 index 000000000..1e55adb9e --- /dev/null +++ b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml @@ -0,0 +1,59 @@ +--- +openshift_deployment_type: origin +#openshift_deployment_type: openshift-enterprise +#openshift_release: v3.5 +openshift_master_default_subdomain: "apps.{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}" + +openshift_master_cluster_method: native +openshift_master_cluster_hostname: "console.{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}" +openshift_master_cluster_public_hostname: "{{ openshift_master_cluster_hostname }}" + +osm_default_node_selector: 'region=primary' + +openshift_hosted_router_wait: True +openshift_hosted_registry_wait: True + +## Openstack credentials +#openshift_cloudprovider_kind=openstack +#openshift_cloudprovider_openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}" +#openshift_cloudprovider_openstack_username: "{{ lookup('env','OS_USERNAME') }}" +#openshift_cloudprovider_openstack_password: "{{ lookup('env','OS_PASSWORD') }}" +#openshift_cloudprovider_openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME') }}" +#openshift_cloudprovider_openstack_region="{{ lookup('env', 'OS_REGION_NAME') }}" + + +## Use Cinder volume for Openshift registry: +#openshift_hosted_registry_storage_kind: openstack +#openshift_hosted_registry_storage_access_modes: ['ReadWriteOnce'] +#openshift_hosted_registry_storage_openstack_filesystem: xfs + +## NOTE(shadower): This won't work until the openshift-ansible issue #5657 is fixed: +## https://github.com/openshift/openshift-ansible/issues/5657 +## If you're using the `openshift_openstack_cinder_hosted_registry_name` option from +## `all.yml`, uncomment these lines: +#openshift_hosted_registry_storage_openstack_volumeID: "{{ lookup('os_cinder', openshift_openstack_cinder_hosted_registry_name).id }}" +#openshift_hosted_registry_storage_volume_size: "{{ openshift_openstack_cinder_hosted_registry_size_gb }}Gi" + +## If you're using a Cinder volume you've set up yourself, uncomment these lines: +#openshift_hosted_registry_storage_openstack_volumeID: e0ba2d73-d2f9-4514-a3b2-a0ced507fa05 +#openshift_hosted_registry_storage_volume_size: 10Gi + + +# NOTE(shadower): the hostname check seems to always fail because the +# host's floating IP address doesn't match the address received from +# inside the host. +openshift_override_hostname_check: true + +# For POCs or demo environments that are using smaller instances than +# the official recommended values for RAM and DISK, uncomment the line below. +#openshift_disable_check: disk_availability,memory_availability + +# NOTE(shadower): Always switch to root on the OSEv3 nodes. +# openshift-ansible requires an explicit `become`. +ansible_become: true + +# # Flannel networking +#osm_cluster_network_cidr: 10.128.0.0/14 +#openshift_use_openshift_sdn: false +#openshift_use_flannel: true +#flannel_interface: eth1 diff --git a/playbooks/openstack/sample-inventory/group_vars/all.yml b/playbooks/openstack/sample-inventory/group_vars/all.yml new file mode 100644 index 000000000..921edb867 --- /dev/null +++ b/playbooks/openstack/sample-inventory/group_vars/all.yml @@ -0,0 +1,147 @@ +--- +openshift_openstack_clusterid: "openshift" +openshift_openstack_public_dns_domain: "example.com" +openshift_openstack_dns_nameservers: [] + +# # Used Hostnames +# # - set custom hostnames for roles by uncommenting corresponding lines +#openshift_openstack_master_hostname: "master" +#openshift_openstack_infra_hostname: "infra-node" +#openshift_openstack_node_hostname: "app-node" +#openshift_openstack_lb_hostname: "lb" +#openshift_openstack_etcd_hostname: "etcd" +#openshift_openstack_dns_hostname: "dns" + +openshift_openstack_keypair_name: "openshift" +openshift_openstack_external_network_name: "public" +#openshift_openstack_private_network_name: "openshift-ansible-{{ openshift_openstack_stack_name }}-net" +# # A dedicated Neutron network name for containers data network +# # Configures the data network to be separated from openshift_openstack_private_network_name +# # NOTE: this is only supported with Flannel SDN yet +#openstack_private_data_network_name: "openshift-ansible-{{ openshift_openstack_stack_name }}-data-net" + +## If you want to use a provider network, set its name here. +## NOTE: the `openshift_openstack_external_network_name` and +## `openshift_openstack_private_network_name` options will be ignored when using a +## provider network. +#openshift_openstack_provider_network_name: "provider" + +# # Used Images +# # - set specific images for roles by uncommenting corresponding lines +# # - note: do not remove openshift_openstack_default_image_name definition +#openshift_openstack_master_image_name: "centos7" +#openshift_openstack_infra_image_name: "centos7" +#openshift_openstack_node_image_name: "centos7" +#openshift_openstack_lb_image_name: "centos7" +#openshift_openstack_etcd_image_name: "centos7" +#openshift_openstack_dns_image_name: "centos7" +openshift_openstack_default_image_name: "centos7" + +openshift_openstack_num_masters: 1 +openshift_openstack_num_infra: 1 +openshift_openstack_num_nodes: 2 + +# # Used Flavors +# # - set specific flavors for roles by uncommenting corresponding lines +# # - note: do note remove openshift_openstack_default_flavor definition +#openshift_openstack_master_flavor: "m1.medium" +#openshift_openstack_infra_flavor: "m1.medium" +#openshift_openstack_node_flavor: "m1.medium" +#openshift_openstack_lb_flavor: "m1.medium" +#openshift_openstack_etcd_flavor: "m1.medium" +#openshift_openstack_dns_flavor: "m1.medium" +openshift_openstack_default_flavor: "m1.medium" + +# # Numerical index of nodes to remove +# openshift_openstack_nodes_to_remove: [] + +# # Docker volume size +# # - set specific volume size for roles by uncommenting corresponding lines +# # - note: do not remove docker_default_volume_size definition +#openshift_openstack_docker_master_volume_size: "15" +#openshift_openstack_docker_infra_volume_size: "15" +#openshift_openstack_docker_node_volume_size: "15" +#openshift_openstack_docker_etcd_volume_size: "2" +#openshift_openstack_docker_dns_volume_size: "1" +#openshift_openstack_docker_lb_volume_size: "5" +openshift_openstack_docker_volume_size: "15" + +## Specify server group policies for master and infra nodes. Nova must be configured to +## enable these policies. 'anti-affinity' will ensure that each VM is launched on a +## different physical host. +#openshift_openstack_master_server_group_policies: [anti-affinity] +#openshift_openstack_infra_server_group_policies: [anti-affinity] + +## Create a Cinder volume and use it for the OpenShift registry. +## NOTE: the openstack credentials and hosted registry options must be set in OSEv3.yml! +#openshift_openstack_cinder_hosted_registry_name: cinder-registry +#openshift_openstack_cinder_hosted_registry_size_gb: 10 + +## Set up a filesystem on the cinder volume specified in `OSEv3.yaml`. +## You need to specify the file system and volume ID in OSEv3 via +## `openshift_hosted_registry_storage_openstack_filesystem` and +## `openshift_hosted_registry_storage_openstack_volumeID`. +## WARNING: This will delete any data on the volume! +#openshift_openstack_prepare_and_format_registry_volume: False + +openshift_openstack_subnet_prefix: "192.168.99" + +## Red Hat subscription defaults to false which means we will not attempt to +## subscribe the nodes +#rhsm_register: False + +# # Using Red Hat Satellite: +#rhsm_register: True +#rhsm_satellite: 'sat-6.example.com' +#rhsm_org: 'OPENSHIFT_ORG' +#rhsm_activationkey: '<activation-key>' + +# # Or using RHN username, password and optionally pool: +#rhsm_register: True +#rhsm_username: '<username>' +#rhsm_password: '<password>' +#rhsm_pool: '<pool id>' + +#rhsm_repos: +# - "rhel-7-server-rpms" +# - "rhel-7-server-ose-3.5-rpms" +# - "rhel-7-server-extras-rpms" +# - "rhel-7-fast-datapath-rpms" + + +# # Roll-your-own DNS +#openshift_openstack_num_dns: 0 +#openshift_openstack_external_nsupdate_keys: +# public: +# key_secret: 'SKqKNdpfk7llKxZ57bbxUnUDobaaJp9t8CjXLJPl+fRI5mPcSBuxTAyvJPa6Y9R7vUg9DwCy/6WTpgLNqnV4Hg==' +# key_algorithm: 'hmac-md5' +# server: '192.168.1.1' +# private: +# key_secret: 'kVE2bVTgZjrdJipxPhID8BEZmbHD8cExlVPR+zbFpW6la8kL5wpXiwOh8q5AAosXQI5t95UXwq3Inx8QT58duw==' +# key_algorithm: 'hmac-md5' +# server: '192.168.1.2' + +# # Customize DNS server security options +#named_public_recursion: 'no' +#named_private_recursion: 'yes' + + +# NOTE(shadower): Do not change this value. The Ansible user is currently +# hardcoded to `openshift`. +ansible_user: openshift + +# # Use a single security group for a cluster (default: false) +#openshift_openstack_flat_secgrp: false + +# If you want to use the VM storage instead of Cinder volumes, set this to `true`. +# NOTE: this is for testing only! Your data will be gone once the VM disappears! +# openshift_openstack_ephemeral_volumes: false + +# # OpenShift node labels +# # - in order to customise node labels for app and/or infra group, set the +# # openshift_openstack_cluster_node_labels variable +#openshift_openstack_cluster_node_labels: +# app: +# region: primary +# infra: +# region: infra diff --git a/playbooks/openstack/sample-inventory/inventory.py b/playbooks/openstack/sample-inventory/inventory.py new file mode 100755 index 000000000..47c56d94d --- /dev/null +++ b/playbooks/openstack/sample-inventory/inventory.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python +""" +This is an Ansible dynamic inventory for OpenStack. + +It requires your OpenStack credentials to be set in clouds.yaml or your shell +environment. + +""" + +from __future__ import print_function + +import json + +import shade + + +def build_inventory(): + '''Build the dynamic inventory.''' + cloud = shade.openstack_cloud() + + inventory = {} + + # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER` + # environment variable. + cluster_hosts = [ + server for server in cloud.list_servers() + if 'metadata' in server and 'clusterid' in server.metadata] + + masters = [server.name for server in cluster_hosts + if server.metadata['host-type'] == 'master'] + + etcd = [server.name for server in cluster_hosts + if server.metadata['host-type'] == 'etcd'] + if not etcd: + etcd = masters + + infra_hosts = [server.name for server in cluster_hosts + if server.metadata['host-type'] == 'node' and + server.metadata['sub-host-type'] == 'infra'] + + app = [server.name for server in cluster_hosts + if server.metadata['host-type'] == 'node' and + server.metadata['sub-host-type'] == 'app'] + + nodes = list(set(masters + infra_hosts + app)) + + dns = [server.name for server in cluster_hosts + if server.metadata['host-type'] == 'dns'] + + load_balancers = [server.name for server in cluster_hosts + if server.metadata['host-type'] == 'lb'] + + osev3 = list(set(nodes + etcd + load_balancers)) + + inventory['cluster_hosts'] = {'hosts': [s.name for s in cluster_hosts]} + inventory['OSEv3'] = {'hosts': osev3} + inventory['masters'] = {'hosts': masters} + inventory['etcd'] = {'hosts': etcd} + inventory['nodes'] = {'hosts': nodes} + inventory['infra_hosts'] = {'hosts': infra_hosts} + inventory['app'] = {'hosts': app} + inventory['dns'] = {'hosts': dns} + inventory['lb'] = {'hosts': load_balancers} + + for server in cluster_hosts: + if 'group' in server.metadata: + group = server.metadata.group + if group not in inventory: + inventory[group] = {'hosts': []} + inventory[group]['hosts'].append(server.name) + + inventory['_meta'] = {'hostvars': {}} + + for server in cluster_hosts: + ssh_ip_address = server.public_v4 or server.private_v4 + hostvars = { + 'ansible_host': ssh_ip_address + } + + public_v4 = server.public_v4 or server.private_v4 + if public_v4: + hostvars['public_v4'] = public_v4 + # TODO(shadower): what about multiple networks? + if server.private_v4: + hostvars['private_v4'] = server.private_v4 + + node_labels = server.metadata.get('node_labels') + if node_labels: + hostvars['openshift_node_labels'] = node_labels + + inventory['_meta']['hostvars'][server.name] = hostvars + return inventory + + +if __name__ == '__main__': + print(json.dumps(build_inventory(), indent=4, sort_keys=True)) |