diff options
-rw-r--r-- | .tito/packages/openshift-ansible | 2 | ||||
-rw-r--r-- | openshift-ansible.spec | 16 | ||||
-rw-r--r-- | playbooks/aws/openshift-cluster/uninstall_elb.yml | 9 | ||||
-rw-r--r-- | playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml | 3 | ||||
-rw-r--r-- | roles/openshift_aws/tasks/elb.yml | 24 | ||||
-rw-r--r-- | roles/openshift_aws/tasks/elb_single.yml | 34 | ||||
-rw-r--r-- | roles/openshift_aws/tasks/iam_cert.yml | 9 | ||||
-rw-r--r-- | roles/openshift_aws/tasks/uninstall_elb.yml | 11 | ||||
-rw-r--r-- | roles/openshift_aws/tasks/uninstall_iam_cert.yml | 25 | ||||
-rw-r--r-- | roles/openshift_aws/tasks/vpc_and_subnet_id.yml | 8 | ||||
-rw-r--r-- | roles/openshift_health_checker/openshift_checks/ovs_version.py | 2 | ||||
-rw-r--r-- | roles/openshift_health_checker/openshift_checks/package_version.py | 2 | ||||
-rw-r--r-- | roles/openshift_provisioners/defaults/main.yaml | 2 | ||||
-rw-r--r-- | roles/openshift_storage_glusterfs/README.md | 22 |
14 files changed, 124 insertions, 45 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index bf76a3913..1266921a6 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.9.0-0.39.0 ./ +3.9.0-0.41.0 ./ diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 1ec707543..ae0104b27 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -10,7 +10,7 @@ Name: openshift-ansible Version: 3.9.0 -Release: 0.39.0%{?dist} +Release: 0.41.0%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 URL: https://github.com/openshift/openshift-ansible @@ -201,6 +201,20 @@ Atomic OpenShift Utilities includes %changelog +* Wed Feb 07 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.41.0 +- Allow OVS 2.7 in OCP 3.10 (sdodson@redhat.com) +- GlusterFS: Minor documentation update (jarrpa@redhat.com) +- Make sure to include upgrade_pre when upgrading master nodes + (sdodson@redhat.com) + +* Wed Feb 07 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.40.0 +- health checks: tolerate ovs 2.9 (lmeyer@redhat.com) +- Fix docker rpm upgrade install task wording (mgugino@redhat.com) +- Initial support for 3.10 (sdodson@redhat.com) +- add deprovisioning for ELB (and IAM certs) (jdiaz@redhat.com) +- [6632] fix indentation of terminationGracePeriodSeconds var + (jsanda@redhat.com) + * Tue Feb 06 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.39.0 - Update code to not fail when rc != 0 (kwoodson@redhat.com) - Upgrades: pass openshift_manage_node_is_master to master nodes during upgrade diff --git a/playbooks/aws/openshift-cluster/uninstall_elb.yml b/playbooks/aws/openshift-cluster/uninstall_elb.yml new file mode 100644 index 000000000..c1b724f0c --- /dev/null +++ b/playbooks/aws/openshift-cluster/uninstall_elb.yml @@ -0,0 +1,9 @@ +--- +- name: Delete elb + hosts: localhost + connection: local + tasks: + - name: deprovision elb + include_role: + name: openshift_aws + tasks_from: uninstall_elb.yml diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 40e245d75..3c0b72832 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -311,6 +311,9 @@ post_tasks: - import_role: name: openshift_node + tasks_from: upgrade_pre.yml + - import_role: + name: openshift_node tasks_from: upgrade.yml - import_role: name: openshift_manage_node diff --git a/roles/openshift_aws/tasks/elb.yml b/roles/openshift_aws/tasks/elb.yml index d8257cf31..3eb7b73b3 100644 --- a/roles/openshift_aws/tasks/elb.yml +++ b/roles/openshift_aws/tasks/elb.yml @@ -2,26 +2,8 @@ - name: "dump the elb listeners for {{ l_elb_dict_item.key }}" debug: msg: "{{ l_elb_dict_item.value }}" + verbosity: 1 -- name: "Create ELB {{ l_elb_dict_item.key }}" - ec2_elb_lb: - name: "{{ item.value.name }}" - state: present - cross_az_load_balancing: "{{ item.value.cross_az_load_balancing }}" - security_group_names: "{{ l_elb_security_groups[l_elb_dict_item.key] }}" - idle_timeout: "{{ item.value.idle_timout }}" - region: "{{ openshift_aws_region }}" - subnets: - - "{{ subnetout.subnets[0].id }}" - health_check: "{{ item.value.health_check }}" - listeners: "{{ item.value.listeners }}" - scheme: "{{ (item.key == 'internal') | ternary('internal','internet-facing') }}" - tags: "{{ item.value.tags }}" - wait: True - register: new_elb +- name: Create ELB(s) + include_tasks: elb_single.yml with_dict: "{{ l_elb_dict_item.value }}" - -- debug: - msg: "{{ item }}" - with_items: - - "{{ new_elb }}" diff --git a/roles/openshift_aws/tasks/elb_single.yml b/roles/openshift_aws/tasks/elb_single.yml new file mode 100644 index 000000000..864757549 --- /dev/null +++ b/roles/openshift_aws/tasks/elb_single.yml @@ -0,0 +1,34 @@ +--- +- name: "dump the elb listeners for {{ item.key }}" + debug: + msg: "{{ item.value }}" + verbosity: 1 + +- name: "Create ELB {{ item.value.name }}" + ec2_elb_lb: + name: "{{ item.value.name }}" + state: present + cross_az_load_balancing: "{{ item.value.cross_az_load_balancing }}" + security_group_names: "{{ l_elb_security_groups[l_elb_dict_item.key] }}" + idle_timeout: "{{ item.value.idle_timout }}" + region: "{{ openshift_aws_region }}" + subnets: + - "{{ subnetout.subnets[0].id }}" + health_check: "{{ item.value.health_check }}" + listeners: "{{ item.value.listeners }}" + scheme: "{{ (item.key == 'internal') | ternary('internal','internet-facing') }}" + tags: "{{ item.value.tags }}" + wait: True + register: new_elb + retries: 20 + delay: 5 + until: new_elb | succeeded + ignore_errors: yes + +- fail: + msg: "couldn't create ELB {{ item.value.name }}" + when: not new_elb | succeeded + +- debug: + msg: "{{ new_elb }}" + verbosity: 1 diff --git a/roles/openshift_aws/tasks/iam_cert.yml b/roles/openshift_aws/tasks/iam_cert.yml index f74a62b8b..42d7d951c 100644 --- a/roles/openshift_aws/tasks/iam_cert.yml +++ b/roles/openshift_aws/tasks/iam_cert.yml @@ -18,7 +18,9 @@ - openshift_aws_iam_cert_key_path != '' - openshift_aws_elb_cert_arn == '' -- debug: msg="{{ elb_cert_chain }}" +- debug: + msg: "{{ elb_cert_chain }}" + verbosity: 1 - name: set_fact openshift_aws_elb_cert_arn set_fact: @@ -28,8 +30,3 @@ - openshift_aws_iam_cert_path != '' - openshift_aws_iam_cert_key_path != '' - openshift_aws_elb_cert_arn == '' - -- name: wait for cert to propagate - pause: - seconds: 5 - when: elb_cert_chain.changed diff --git a/roles/openshift_aws/tasks/uninstall_elb.yml b/roles/openshift_aws/tasks/uninstall_elb.yml new file mode 100644 index 000000000..147e9a905 --- /dev/null +++ b/roles/openshift_aws/tasks/uninstall_elb.yml @@ -0,0 +1,11 @@ +--- +- name: delete elbs + ec2_elb_lb: + name: "{{ item }}" + region: "{{ openshift_aws_region }}" + state: absent + with_items: "{{ openshift_aws_elb_dict | json_query('*.*.name') | sum(start = []) }}" + +- when: openshift_aws_create_iam_cert | bool + name: delete the iam_cert for elb certificate + include_tasks: uninstall_iam_cert.yml diff --git a/roles/openshift_aws/tasks/uninstall_iam_cert.yml b/roles/openshift_aws/tasks/uninstall_iam_cert.yml new file mode 100644 index 000000000..7b47673ee --- /dev/null +++ b/roles/openshift_aws/tasks/uninstall_iam_cert.yml @@ -0,0 +1,25 @@ +--- +- when: + - openshift_aws_create_iam_cert | bool + - openshift_aws_iam_cert_path != '' + - openshift_aws_iam_cert_key_path != '' + - openshift_aws_elb_cert_arn == '' + block: + - name: delete AWS IAM certificates + iam_cert23: + state: absent + name: "{{ openshift_aws_iam_cert_name }}" + register: elb_cert_chain + retries: 20 + delay: 10 + until: elb_cert_chain | succeeded + ignore_errors: yes + + - debug: + var: elb_cert_chain + verbosity: 1 + + - name: check for iam cert error + fail: + msg: "Couldn't delete IAM cert {{ openshift_aws_iam_cert_name }}" + when: not elb_cert_chain | succeeded diff --git a/roles/openshift_aws/tasks/vpc_and_subnet_id.yml b/roles/openshift_aws/tasks/vpc_and_subnet_id.yml index 1b754f863..c2c345faf 100644 --- a/roles/openshift_aws/tasks/vpc_and_subnet_id.yml +++ b/roles/openshift_aws/tasks/vpc_and_subnet_id.yml @@ -7,7 +7,9 @@ register: vpcout - name: debug vcpout - debug: var=vpcout + debug: + var: vpcout + verbosity: 1 - name: fetch the default subnet id ec2_vpc_subnet_facts: @@ -18,4 +20,6 @@ register: subnetout - name: debug subnetout - debug: var=subnetout + debug: + var: subnetout + verbosity: 1 diff --git a/roles/openshift_health_checker/openshift_checks/ovs_version.py b/roles/openshift_health_checker/openshift_checks/ovs_version.py index 4352778c2..fa398e5a9 100644 --- a/roles/openshift_health_checker/openshift_checks/ovs_version.py +++ b/roles/openshift_health_checker/openshift_checks/ovs_version.py @@ -22,7 +22,7 @@ class OvsVersion(NotContainerizedMixin, OpenShiftCheck): (3, 7): ["2.6", "2.7", "2.8", "2.9"], (3, 8): ["2.6", "2.7", "2.8", "2.9"], (3, 9): ["2.6", "2.7", "2.8", "2.9"], - (3, 10): ["2.8", "2.9"], + (3, 10): ["2.7", "2.8", "2.9"], } def is_active(self): diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py index 3e8c1dac3..68022deca 100644 --- a/roles/openshift_health_checker/openshift_checks/package_version.py +++ b/roles/openshift_health_checker/openshift_checks/package_version.py @@ -18,7 +18,7 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck): (3, 7): ["2.6", "2.7", "2.8", "2.9"], (3, 8): ["2.6", "2.7", "2.8", "2.9"], (3, 9): ["2.6", "2.7", "2.8", "2.9"], - (3, 10): ["2.8", "2.9"], + (3, 10): ["2.7", "2.8", "2.9"], } openshift_to_docker_version = { diff --git a/roles/openshift_provisioners/defaults/main.yaml b/roles/openshift_provisioners/defaults/main.yaml index 34ba78404..a54bf6de2 100644 --- a/roles/openshift_provisioners/defaults/main.yaml +++ b/roles/openshift_provisioners/defaults/main.yaml @@ -11,7 +11,7 @@ openshift_provisioners_project: openshift-infra openshift_provisioners_image_prefix_dict: origin: "docker.io/openshift/origin-" - openshift-enterprise: "registry.access.redhat.com/openshift3/ose-" + openshift-enterprise: "registry.access.redhat.com/openshift3/" openshift_provisioners_image_version_dict: origin: "latest" diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md index 70a89b0ba..65d38793c 100644 --- a/roles/openshift_storage_glusterfs/README.md +++ b/roles/openshift_storage_glusterfs/README.md @@ -81,8 +81,8 @@ GlusterFS cluster into a new or existing OpenShift cluster: | openshift_storage_glusterfs_name | 'storage' | A name to identify the GlusterFS cluster, which will be used in resource names | openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name | openshift_storage_glusterfs_use_default_selector | False | Whether to use a default node selector for the GlusterFS namespace/project. If False, the namespace/project will have no restricting node selector. If True, uses pre-existing or default (e.g. osm_default_node_selector) node selectors. **NOTE:** If True, nodes which will host GlusterFS pods must already have the additional labels. -| openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster -| openshift_storage_glusterfs_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default +| openshift_storage_glusterfs_storageclass | True | Automatically create a GlusterFS StorageClass for this group +| openshift_storage_glusterfs_storageclass_default | False | Sets the GlusterFS StorageClass for this group as cluster-wide default | openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7' | openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods | openshift_storage_glusterfs_block_deploy | True | Deploy glusterblock provisioner service @@ -91,8 +91,8 @@ GlusterFS cluster into a new or existing OpenShift cluster: | openshift_storage_glusterfs_block_host_vol_create | True | Automatically create GlusterFS volumes to host glusterblock volumes. **NOTE:** If this is False, block-hosting volumes will need to be manually created before glusterblock volumes can be provisioned | openshift_storage_glusterfs_block_host_vol_size | 100 | Size, in GB, of GlusterFS volumes that will be automatically create to host glusterblock volumes if not enough space is available for a glusterblock volume create request. **NOTE:** This value is effectively an upper limit on the size of glusterblock volumes unless you manually create larger GlusterFS block-hosting volumes | openshift_storage_glusterfs_block_host_vol_max | 15 | Max number of GlusterFS volumes to host glusterblock volumes -| openshift_storage_glusterfs_block_storageclass | False | Automatically create a StorageClass for each Gluster Block cluster -| openshift_storage_glusterfs_block_storageclass_default | False | Sets the StorageClass for each Gluster Block cluster as default +| openshift_storage_glusterfs_block_storageclass | False | Automatically create a StorageClass for each glusterblock cluster +| openshift_storage_glusterfs_block_storageclass_default | False | Sets the glusterblock StorageClass for this group as cluster-wide default | openshift_storage_glusterfs_s3_deploy | True | Deploy gluster-s3 service | openshift_storage_glusterfs_s3_image | 'gluster/gluster-object'| Container image to use for gluster-s3 pod, enterprise default is 'rhgs3/rhgs-gluster-s3-server-rhel7' | openshift_storage_glusterfs_s3_version | 'latest' | Container image version to use for gluster=s3 pod @@ -118,8 +118,8 @@ GlusterFS cluster into a new or existing OpenShift cluster: | openshift_storage_glusterfs_heketi_ssh_user | 'root' | SSH user for external GlusterFS nodes via native heketi | openshift_storage_glusterfs_heketi_ssh_sudo | False | Whether to sudo (if non-root user) for SSH to external GlusterFS nodes via native heketi | openshift_storage_glusterfs_heketi_ssh_keyfile | Undefined | Path to a private key file for use with SSH connections to external GlusterFS nodes via native heketi **NOTE:** This must be an absolute path -| openshift_storage_glusterfs_heketi_fstab | '/var/lib/heketi/fstab' | When heketi is native, sets the path to the fstab file on the GlusterFS nodes to update on LVM volume mounts, changes to '/etc/fstab/' when the heketi executor is 'ssh' **NOTE:** This should not need to be changed -| openshift_storage_glusterfs_heketi_wipe | False | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe` +| openshift_storage_glusterfs_heketi_fstab | '/var/lib/heketi/fstab' | When heketi is native, sets the path to the fstab file on the GlusterFS nodes to update on LVM volume mounts, changes to '/etc/fstab/' when the heketi executor is 'ssh' **NOTE:** This should not need to be changed +| openshift_storage_glusterfs_heketi_wipe | False | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe` Each role variable also has a corresponding variable to optionally configure a separate GlusterFS cluster for use as storage for an integrated Docker @@ -131,11 +131,11 @@ are an exception: | Name | Default value | Description | |-----------------------------------------------------------------|-----------------------|-----------------------------------------| | openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'glusterfs' -| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters -| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties -| openshift_storage_glusterfs_registry_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default -| openshift_storage_glusterfs_registry_block_storageclass | False | It is recommended to not create a StorageClass for Gluster Block clusters serving registry storage, so as to avoid performance penalties -| openshift_storage_glusterfs_registry_block_storageclass_default | False | Sets the StorageClass for each Gluster Block cluster as default +| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry group from other Gluster groups +| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for this group, so as to avoid noisy neighbor complications +| openshift_storage_glusterfs_registry_storageclass_default | False | Separate from the above +| openshift_storage_glusterfs_registry_block_storageclass | False | Only enable this for use by Logging and Metrics +| openshift_storage_glusterfs_registry_block_storageclass_default | False | Separate from the above | openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above | openshift_storage_glusterfs_registry_heketi_user_key | auto-generated | Separate from the above |