diff options
186 files changed, 4360 insertions, 1334 deletions
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index cdfd93725..2a4f80a36 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -1,16 +1,3 @@ -### <HTPASSWD_AUTH> - -We are aware of the current issues related to htpasswd_auth failures -Please downgrade to ansible 2.2.0.0 until a fix is released. -You can track the status of the bug fix in this issue: -https://github.com/openshift/openshift-ansible/issues/3111 -Please erase this <HTPASSWD_AUTH> section if it does not apply to you. - -Thanks - 2017-01-31 - -### </HTPASSWD_AUTH> - - #### Description Provide a brief description of your issue here. For example: diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index 1cd7bde30..14df49d42 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.6.15-1 ./ +3.6.26-1 ./ @@ -1,10 +1,12 @@ # openshift-ansible RPM Build instructions + We use tito to make building and tracking revisions easy. For more information on tito, please see the [Tito home page](https://github.com/dgoodwin/tito "Tito home page"). -## Build openshift-ansible-bin +## Build openshift-ansible + - Change into openshift-ansible ``` cd openshift-ansible @@ -22,23 +24,3 @@ tito tag ``` tito build --rpm ``` - - -## Build openshift-ansible-inventory -- Change into openshift-ansible/inventory -``` -cd openshift-ansible/inventory -``` -- Build a test package (no tagging needed) -``` -tito build --test --rpm -``` -- Tag a new build (bumps version number and adds log entries) -``` -tito tag -``` -- Follow the on screen tito instructions to push the tags -- Build a new package based on the latest tag information -``` -tito build --rpm -``` diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 50bb09470..a3ae3fd10 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -200,6 +200,9 @@ test results are posted to S3 buckets when complete. The test output of each job is also posted to the Pull Request as comments. +A trend of the time taken by merge jobs is available at +https://ci.openshift.redhat.com/jenkins/job/merge_pull_request_openshift_ansible/buildTimeTrend. + --- ## Appendix diff --git a/DEPLOYMENT_TYPES.md b/DEPLOYMENT_TYPES.md index 42ac5635a..009a1d95c 100644 --- a/DEPLOYMENT_TYPES.md +++ b/DEPLOYMENT_TYPES.md @@ -1,22 +1,17 @@ -#Deployment Types +# Deployment Types -This module supports OpenShift Origin and OpenShift Enterprise Each deployment -type sets various defaults used throughout your environment. +This repository supports OpenShift Origin and OpenShift Container Platform. -The table below outlines the defaults per `deployment_type`. - -| deployment_type | origin | enterprise (< 3.1) | openshift-enterprise (>= 3.1) | -|-----------------------------------------------------------------|------------------------------------------|----------------------------------------|----------------------------------| -| **openshift.common.service_type** (also used for package names) | origin | openshift | | -| **openshift.common.config_base** | /etc/origin | /etc/openshift | /etc/origin | -| **openshift.common.data_dir** | /var/lib/origin | /var/lib/openshift | /var/lib/origin | -| **openshift.master.registry_url openshift.node.registry_url** | openshift/origin-${component}:${version} | openshift3/ose-${component}:${version} | aos3/aos-${component}:${version} | -| **Image Streams** | centos | rhel + xpaas | rhel | - - -**NOTE** `enterprise` deployment type is used for OpenShift Enterprise version -3.0.x OpenShift Enterprise deployments utilizing version 3.1 and later will -make use of the new `openshift-enterprise` deployment type. Additional work to -migrate between the two will be forthcoming. +Various defaults used throughout the playbooks and roles in this repository are +set based on the deployment type configuration (usually defined in an Ansible +hosts file). +The table below outlines the defaults per `openshift_deployment_type`: +| openshift_deployment_type | origin | openshift-enterprise | +|-----------------------------------------------------------------|------------------------------------------|----------------------------------------| +| **openshift.common.service_type** (also used for package names) | origin | atomic-openshift | +| **openshift.common.config_base** | /etc/origin | /etc/origin | +| **openshift.common.data_dir** | /var/lib/origin | /var/lib/origin | +| **openshift.master.registry_url openshift.node.registry_url** | openshift/origin-${component}:${version} | openshift3/ose-${component}:${version} | +| **Image Streams** | centos | rhel | diff --git a/README_ANSIBLE_CONTAINER.md b/README_ANSIBLE_CONTAINER.md deleted file mode 100644 index 30c5f8503..000000000 --- a/README_ANSIBLE_CONTAINER.md +++ /dev/null @@ -1,15 +0,0 @@ -# Running ansible in a docker container -* Building ansible container: - - ```sh - git clone https://github.com/openshift/openshift-ansible.git - cd openshift-ansible - docker build --rm -t ansible . - ``` -* Create /etc/ansible directory on the host machine and copy inventory file (hosts) into it. -* Copy ssh public key of the host machine to master and nodes machines in the cluster. -* Running the ansible container: - - ```sh - docker run -it --rm --privileged --net=host -v ~/.ssh:/root/.ssh -v /etc/ansible:/etc/ansible ansible - ``` diff --git a/README_CONTAINERIZED_INSTALLATION.md b/README_CONTAINERIZED_INSTALLATION.md index 5e013e809..c697783e3 100644 --- a/README_CONTAINERIZED_INSTALLATION.md +++ b/README_CONTAINERIZED_INSTALLATION.md @@ -38,7 +38,7 @@ and _/tmp_. Be mindful of this when passing in files to be processed by `oc` or ### Requisite Images -Based on your deployment_type the installer will make use of the following +Based on your `openshift_deployment_type` the installer will make use of the following images. Because you may make use of a private repository we've moved the configuration of docker additional, insecure, and blocked registries to the beginning of the installation process ensuring that these settings are applied diff --git a/README_CONTAINER_IMAGE.md b/README_CONTAINER_IMAGE.md index 35e057af3..2499e01d4 100644 --- a/README_CONTAINER_IMAGE.md +++ b/README_CONTAINER_IMAGE.md @@ -38,4 +38,6 @@ Here is an example of how to run a containerized `openshift-ansible` playbook th -e PLAYBOOK_FILE=playbooks/certificate_expiry/default.yaml \ openshift/openshift-ansible -The [playbook2image examples](https://github.com/aweiteka/playbook2image/tree/master/examples) provide additional information on how to use an image built from it like this one. +Further usage examples are available in the [examples directory](examples/). + +Additional usage information for images built from `playbook2image` like this one can be found in the [playbook2image examples](https://github.com/aweiteka/playbook2image/tree/master/examples). diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 000000000..0e412244d --- /dev/null +++ b/examples/README.md @@ -0,0 +1,93 @@ +# openshift-ansible usage examples + +The primary use of `openshift-ansible` is to install, configure and upgrade OpenShift clusters. + +This is typically done by direct invocation of Ansible tools like `ansible-playbook`. This use case is covered in detail in the [OpenShift advanced installation documentation](https://docs.openshift.org/latest/install_config/install/advanced_install.html) + +For OpenShift Container Platform there's also an installation utility that wraps `openshift-ansible`. This usage case is covered in the [Quick Installation](https://docs.openshift.com/container-platform/latest/install_config/install/quick_install.html) section of the documentation. + +The usage examples below cover use cases other than install/configure/upgrade. + +## Container image + +The examples below run [openshift-ansible in a container](../README_CONTAINER_IMAGE.md) to perform certificate expiration checks on an OpenShift cluster from pods running on the cluster itself. + +You can find more details about the certificate expiration check roles and example playbooks in [the openshift_certificate_expiry role's README](../roles/openshift_certificate_expiry/README.md). + +### Job to upload certificate expiration reports + +The example `Job` in [certificate-check-upload.yaml](certificate-check-upload.yaml) executes a [Job](https://docs.openshift.org/latest/dev_guide/jobs.html) that checks the expiration dates of the internal certificates of the cluster and uploads HTML and JSON reports to `/etc/origin/certificate_expiration_report` in the masters. + +This example uses the [`easy-mode-upload.yaml`](../playbooks/certificate_expiry/easy-mode-upload.yaml) example playbook, which generates reports and uploads them to the masters. The playbook can be customized via environment variables to control the length of the warning period (`CERT_EXPIRY_WARN_DAYS`) and the location in the masters where the reports are uploaded (`COPY_TO_PATH`). + +The job expects the inventory to be provided via the *hosts* key of a [ConfigMap](https://docs.openshift.org/latest/dev_guide/configmaps.html) named *inventory*, and the passwordless ssh key that allows connecting to the hosts to be availalbe as *ssh-privatekey* from a [Secret](https://docs.openshift.org/latest/dev_guide/secrets.html) named *sshkey*, so these are created first: + + oc new-project certcheck + oc create configmap inventory --from-file=hosts=/etc/ansible/hosts + oc secrets new-sshauth sshkey --ssh-privatekey=$HOME/.ssh/id_rsa + +Note that `inventory`, `hosts`, `sshkey` and `ssh-privatekey` are referenced by name from the provided example Job definition. If you use different names for the objects/attributes you will have to adjust the Job accordingly. + +To create the Job: + + oc create -f examples/certificate-check-upload.yaml + +### Scheduled job for certificate expiration report upload + +**Note**: This example uses the [ScheduledJob](https://docs.openshift.com/container-platform/3.4/dev_guide/scheduled_jobs.html) object, which has been renamed to [CronJob](https://docs.openshift.org/latest/dev_guide/cron_jobs.html) upstream and is still a Technology Preview subject to further change. + +The example `ScheduledJob` in [scheduled-certcheck-upload.yaml](scheduled-certcheck-upload.yaml) does the same as the `Job` example above, but it is scheduled to automatically run every first day of the month (see the `spec.schedule` value in the example). + +The job definition is the same and it expects the same configuration: we provide the inventory and ssh key via a ConfigMap and a Secret respectively: + + oc new-project certcheck + oc create configmap inventory --from-file=hosts=/etc/ansible/hosts + oc secrets new-sshauth sshkey --ssh-privatekey=$HOME/.ssh/id_rsa + +And then we create the ScheduledJob: + + oc create -f examples/scheduled-certcheck-upload.yaml + +### Job and ScheduledJob to check certificates using volumes + +There are two additional examples: + + - A `Job` [certificate-check-volume.yaml](certificate-check-volume.yaml) + - A `ScheduledJob` [scheduled-certcheck-upload.yaml](scheduled-certcheck-upload.yaml) + +These perform the same work as the two examples above, but instead of uploading the generated reports to the masters they store them in a custom path within the container that is expected to be backed by a [PersistentVolumeClaim](https://docs.openshift.org/latest/dev_guide/persistent_volumes.html), so that the reports are actually written to storage external to the container. + +These examples assume that there is an existing `PersistentVolumeClaim` called `certcheck-reports` and they use the [`html_and_json_timestamp.yaml`](../playbooks/certificate_expiry/html_and_json_timestamp.yaml) example playbook to write timestamped reports into it. + +You can later access the reports from another pod that mounts the same volume, or externally via direct access to the backend storage behind the matching `PersistentVolume`. + +To run these examples we prepare the inventory and ssh keys as in the other examples: + + oc new-project certcheck + oc create configmap inventory --from-file=hosts=/etc/ansible/hosts + oc secrets new-sshauth sshkey --ssh-privatekey=$HOME/.ssh/id_rsa + +Additionally we allocate a `PersistentVolumeClaim` to store the reports: + + oc create -f - <<PVC + --- + apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: certcheck-reports + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + PVC + +With that we can run the `Job` once: + + oc create -f examples/certificate-check-volume.yaml + +or schedule it to run periodically as a `ScheduledJob`: + + oc create -f examples/scheduled-certcheck-volume.yaml + diff --git a/examples/certificate-check-upload.yaml b/examples/certificate-check-upload.yaml new file mode 100644 index 000000000..b10a0b614 --- /dev/null +++ b/examples/certificate-check-upload.yaml @@ -0,0 +1,47 @@ +# An example Job to run a certificate check of OpenShift's internal +# certificate status from within OpenShift. +# +# The generated reports are uploaded to a location in the master +# hosts, using the playbook 'easy-mode-upload.yaml'. +# +# This example uses the openshift/openshift-ansible container image. +# (see README_CONTAINER_IMAGE.md in the top level dir for more details). +# +# The following objects are xpected to be configured before the creation +# of this Job: +# - A ConfigMap named 'inventory' with a key named 'hosts' that +# contains the the Ansible inventory file +# - A Secret named 'sshkey' with a key named 'ssh-privatekey +# that contains the ssh key to connect to the hosts +# (see examples/README.md for more details) +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: certificate-check +spec: + containers: + - name: openshift-ansible + image: openshift/openshift-ansible + env: + - name: PLAYBOOK_FILE + value: playbooks/certificate_expiry/easy-mode-upload.yaml + - name: INVENTORY_FILE + value: /tmp/inventory/hosts # from configmap vol below + - name: ANSIBLE_PRIVATE_KEY_FILE # from secret vol below + value: /opt/app-root/src/.ssh/id_rsa/ssh-privatekey + - name: CERT_EXPIRY_WARN_DAYS + value: "45" # must be a string, don't forget the quotes + volumeMounts: + - name: sshkey + mountPath: /opt/app-root/src/.ssh/id_rsa + - name: inventory + mountPath: /tmp/inventory + volumes: + - name: sshkey + secret: + secretName: sshkey + - name: inventory + configMap: + name: inventory + restartPolicy: Never diff --git a/examples/certificate-check-volume.yaml b/examples/certificate-check-volume.yaml new file mode 100644 index 000000000..c19dc1f88 --- /dev/null +++ b/examples/certificate-check-volume.yaml @@ -0,0 +1,54 @@ +# An example Job to run a certificate check of OpenShift's internal +# certificate status from within OpenShift. +# +# The generated reports are stored in a Persistent Volume using +# the playbook 'html_and_json_timestamp.yaml'. +# +# This example uses the openshift/openshift-ansible container image. +# (see README_CONTAINER_IMAGE.md in the top level dir for more details). +# +# The following objects are xpected to be configured before the creation +# of this Job: +# - A ConfigMap named 'inventory' with a key named 'hosts' that +# contains the the Ansible inventory file +# - A Secret named 'sshkey' with a key named 'ssh-privatekey +# that contains the ssh key to connect to the hosts +# - A PersistentVolumeClaim named 'certcheck-reports' where the +# generated reports are going to be stored +# (see examples/README.md for more details) +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: certificate-check +spec: + containers: + - name: openshift-ansible + image: openshift/openshift-ansible + env: + - name: PLAYBOOK_FILE + value: playbooks/certificate_expiry/html_and_json_timestamp.yaml + - name: INVENTORY_FILE + value: /tmp/inventory/hosts # from configmap vol below + - name: ANSIBLE_PRIVATE_KEY_FILE # from secret vol below + value: /opt/app-root/src/.ssh/id_rsa/ssh-privatekey + - name: CERT_EXPIRY_WARN_DAYS + value: "45" # must be a string, don't forget the quotes + volumeMounts: + - name: sshkey + mountPath: /opt/app-root/src/.ssh/id_rsa + - name: inventory + mountPath: /tmp/inventory + - name: reports + mountPath: /var/lib/certcheck + volumes: + - name: sshkey + secret: + secretName: sshkey + - name: inventory + configMap: + name: inventory + - name: reports + persistentVolumeClaim: + claimName: certcheck-reports + restartPolicy: Never diff --git a/examples/scheduled-certcheck-upload.yaml b/examples/scheduled-certcheck-upload.yaml new file mode 100644 index 000000000..b0a97361b --- /dev/null +++ b/examples/scheduled-certcheck-upload.yaml @@ -0,0 +1,53 @@ +# An example ScheduledJob to run a regular check of OpenShift's internal +# certificate status. +# +# Each job will upload new reports to a directory in the master hosts +# +# The Job specification is the same as 'certificate-check-upload.yaml' +# and the expected pre-configuration is equivalent. +# See that Job example and examples/README.md for more details. +# +# NOTE: ScheduledJob has been renamed to CronJob in upstream k8s recently. At +# some point (OpenShift 3.6+) this will have to be renamed to "kind: CronJob" +# and once the API stabilizes the apiVersion will have to be updated too. +--- +apiVersion: batch/v2alpha1 +kind: ScheduledJob +metadata: + name: certificate-check + labels: + app: certcheck +spec: + schedule: "0 0 1 * *" # every 1st day of the month at midnight + jobTemplate: + metadata: + labels: + app: certcheck + spec: + template: + spec: + containers: + - name: openshift-ansible + image: openshift/openshift-ansible + env: + - name: PLAYBOOK_FILE + value: playbooks/certificate_expiry/easy-mode-upload.yaml + - name: INVENTORY_FILE + value: /tmp/inventory/hosts # from configmap vol below + - name: ANSIBLE_PRIVATE_KEY_FILE # from secret vol below + value: /opt/app-root/src/.ssh/id_rsa/ssh-privatekey + - name: CERT_EXPIRY_WARN_DAYS + value: "45" # must be a string, don't forget the quotes + volumeMounts: + - name: sshkey + mountPath: /opt/app-root/src/.ssh/id_rsa + - name: inventory + mountPath: /tmp/inventory + volumes: + - name: sshkey + secret: + secretName: sshkey + - name: inventory + configMap: + name: inventory + restartPolicy: Never diff --git a/examples/scheduled-certcheck-volume.yaml b/examples/scheduled-certcheck-volume.yaml new file mode 100644 index 000000000..74cdc9e7f --- /dev/null +++ b/examples/scheduled-certcheck-volume.yaml @@ -0,0 +1,58 @@ +# An example ScheduledJob to run a regular check of OpenShift's internal +# certificate status. +# +# Each job will add a new pair of reports to the configured Persistent Volume +# +# The Job specification is the same as 'certificate-check-volume.yaml' +# and the expected pre-configuration is equivalent. +# See that Job example and examples/README.md for more details. +# +# NOTE: ScheduledJob has been renamed to CronJob in upstream k8s recently. At +# some point (OpenShift 3.6+) this will have to be renamed to "kind: CronJob" +# and once the API stabilizes the apiVersion will have to be updated too. +--- +apiVersion: batch/v2alpha1 +kind: ScheduledJob +metadata: + name: certificate-check + labels: + app: certcheck +spec: + schedule: "0 0 1 * *" # every 1st day of the month at midnight + jobTemplate: + metadata: + labels: + app: certcheck + spec: + template: + spec: + containers: + - name: openshift-ansible + image: openshift/openshift-ansible + env: + - name: PLAYBOOK_FILE + value: playbooks/certificate_expiry/html_and_json_timestamp.yaml + - name: INVENTORY_FILE + value: /tmp/inventory/hosts # from configmap vol below + - name: ANSIBLE_PRIVATE_KEY_FILE # from secret vol below + value: /opt/app-root/src/.ssh/id_rsa/ssh-privatekey + - name: CERT_EXPIRY_WARN_DAYS + value: "45" # must be a string, don't forget the quotes + volumeMounts: + - name: sshkey + mountPath: /opt/app-root/src/.ssh/id_rsa + - name: inventory + mountPath: /tmp/inventory + - name: reports + mountPath: /var/lib/certcheck + volumes: + - name: sshkey + secret: + secretName: sshkey + - name: inventory + configMap: + name: inventory + - name: reports + persistentVolumeClaim: + claimName: certcheck-reports + restartPolicy: Never diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index b11fbc407..10c8600ba 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -1,6 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- # vim: expandtab:tabstop=4:shiftwidth=4 +# pylint: disable=too-many-lines """ Custom filters for use in openshift-ansible """ @@ -128,34 +129,57 @@ def oo_merge_hostvars(hostvars, variables, inventory_hostname): return merged_hostvars -def oo_collect(data, attribute=None, filters=None): +def oo_collect(data_list, attribute=None, filters=None): """ This takes a list of dict and collects all attributes specified into a list. If filter is specified then we will include all items that match _ALL_ of filters. If a dict entry is missing the key in a filter it will be excluded from the match. - Ex: data = [ {'a':1, 'b':5, 'z': 'z'}, # True, return - {'a':2, 'z': 'z'}, # True, return - {'a':3, 'z': 'z'}, # True, return - {'a':4, 'z': 'b'}, # FAILED, obj['z'] != obj['z'] - ] + Ex: data_list = [ {'a':1, 'b':5, 'z': 'z'}, # True, return + {'a':2, 'z': 'z'}, # True, return + {'a':3, 'z': 'z'}, # True, return + {'a':4, 'z': 'b'}, # FAILED, obj['z'] != obj['z'] + ] attribute = 'a' filters = {'z': 'z'} returns [1, 2, 3] + + This also deals with lists of lists with dict as elements. + Ex: data_list = [ + [ {'a':1, 'b':5, 'z': 'z'}, # True, return + {'a':2, 'b':6, 'z': 'z'} # True, return + ], + [ {'a':3, 'z': 'z'}, # True, return + {'a':4, 'z': 'b'} # FAILED, obj['z'] != obj['z'] + ], + {'a':5, 'z': 'z'}, # True, return + ] + attribute = 'a' + filters = {'z': 'z'} + returns [1, 2, 3, 5] """ - if not isinstance(data, list): - raise errors.AnsibleFilterError("|failed expects to filter on a List") + if not isinstance(data_list, list): + raise errors.AnsibleFilterError("oo_collect expects to filter on a List") if not attribute: - raise errors.AnsibleFilterError("|failed expects attribute to be set") + raise errors.AnsibleFilterError("oo_collect expects attribute to be set") + + data = [] + retval = [] + + for item in data_list: + if isinstance(item, list): + retval.extend(oo_collect(item, attribute, filters)) + else: + data.append(item) if filters is not None: if not isinstance(filters, dict): - raise errors.AnsibleFilterError("|failed expects filter to be a" - " dict") - retval = [get_attr(d, attribute) for d in data if ( - all([d.get(key, None) == filters[key] for key in filters]))] + raise errors.AnsibleFilterError( + "oo_collect expects filter to be a dict") + retval.extend([get_attr(d, attribute) for d in data if ( + all([d.get(key, None) == filters[key] for key in filters]))]) else: - retval = [get_attr(d, attribute) for d in data] + retval.extend([get_attr(d, attribute) for d in data]) retval = [val for val in retval if val is not None] @@ -749,6 +773,23 @@ def oo_persistent_volumes(hostvars, groups, persistent_volumes=None): fsType=filesystem, volumeID=volume_id))) persistent_volumes.append(persistent_volume) + elif kind == 'glusterfs': + volume = params['volume']['name'] + size = params['volume']['size'] + access_modes = params['access']['modes'] + endpoints = params['glusterfs']['endpoints'] + path = params['glusterfs']['path'] + read_only = params['glusterfs']['readOnly'] + persistent_volume = dict( + name="{0}-volume".format(volume), + capacity=size, + access_modes=access_modes, + storage=dict( + glusterfs=dict( + endpoints=endpoints, + path=path, + readOnly=read_only))) + persistent_volumes.append(persistent_volume) elif not (kind == 'object' or kind == 'dynamic'): msg = "|failed invalid storage kind '{0}' for component '{1}'".format( kind, diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example index 27914e60a..f70971537 100644 --- a/inventory/byo/hosts.origin.example +++ b/inventory/byo/hosts.origin.example @@ -22,7 +22,7 @@ ansible_ssh_user=root # Debug level for all OpenShift components (Defaults to 2) debug_level=2 -# deployment type valid values are origin, online, atomic-enterprise and openshift-enterprise +# Specify the deployment type. Valid values are origin and openshift-enterprise. openshift_deployment_type=origin # Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we @@ -426,6 +426,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57 #openshift_hosted_registry_storage_volume_size=10Gi # +# Native GlusterFS Registry Storage +#openshift_hosted_registry_storage_kind=glusterfs +# # AWS S3 # S3 bucket must already exist. #openshift_hosted_registry_storage_kind=object diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example index f0269bff8..f5e0de1b0 100644 --- a/inventory/byo/hosts.ose.example +++ b/inventory/byo/hosts.ose.example @@ -22,7 +22,7 @@ ansible_ssh_user=root # Debug level for all OpenShift components (Defaults to 2) debug_level=2 -# deployment type valid values are origin, online, atomic-enterprise, and openshift-enterprise +# Specify the deployment type. Valid values are origin and openshift-enterprise. openshift_deployment_type=openshift-enterprise # Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we @@ -426,6 +426,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57 #openshift_hosted_registry_storage_volume_size=10Gi # +# Native GlusterFS Registry Storage +#openshift_hosted_registry_storage_kind=glusterfs +# # AWS S3 # # S3 bucket must already exist. diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 992fe63c2..61b8f9f37 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -9,7 +9,7 @@ %global __requires_exclude ^/usr/bin/ansible-playbook$ Name: openshift-ansible -Version: 3.6.15 +Version: 3.6.26 Release: 1%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 @@ -270,6 +270,108 @@ Atomic OpenShift Utilities includes %changelog +* Tue Apr 18 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.26-1 +- Correct role dependencies (rteague@redhat.com) +- Allow for GlusterFS to provide registry storage (jarrpa@redhat.com) +- Integrate GlusterFS into OpenShift installation (jarrpa@redhat.com) +- GlusterFS playbook and role (jarrpa@redhat.com) + +* Mon Apr 17 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.25-1 +- Fix default image tag for enterprise (sdodson@redhat.com) +- Cast etcd_debug to a boolean (skuznets@redhat.com) + +* Fri Apr 14 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.24-1 +- tox tests: pin test requirement versions (lmeyer@redhat.com) +- This is no longer a widely encountered issue (sdodson@redhat.com) +- Standardize use of byo and common for network_manager.yml + (rteague@redhat.com) +- Disable swap space on nodes at install and upgrade (rteague@redhat.com) +- Do not check package version on non-master/node (rhcarvalho@gmail.com) + +* Thu Apr 13 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.23-1 +- Refactor initialize groups tasks (rteague@redhat.com) +- tox tests: pin test requirement versions (lmeyer@redhat.com) +- skip PackageAvailability check if not yum (jvallejo@redhat.com) +- Document service_type for openshift-enterprise (rhcarvalho@gmail.com) +- Remove references to outdated deployment_type (rhcarvalho@gmail.com) +- Update deployment_type documentation (rhcarvalho@gmail.com) +- Document merge time trends page (rhcarvalho@gmail.com) +- Remove outdated documentation (rhcarvalho@gmail.com) +- Remove outdated build instructions (rhcarvalho@gmail.com) +- openshift_sanitize_inventory: disallow conflicting deployment types + (lmeyer@redhat.com) +- Refactor docker upgrade playbooks (rteague@redhat.com) +- Changed Hawkular Metrics secrets to use a format similar to the one + automatically generated by OpenShift (juraci@kroehling.de) + +* Wed Apr 12 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.22-1 +- Fixed spelling mistake. (kwoodson@redhat.com) +- Remove unnecessary folder refs (rteague@redhat.com) +- Switching commands for modules during upgrade of router and registry. + (kwoodson@redhat.com) +- Fixing a compatibility issue with python 2.7 to 3.5 when reading from + subprocess. (kwoodson@redhat.com) +- Refactor use of initialize_oo_option_facts.yml (rteague@redhat.com) +- preflight checks: refactor and fix aos_version (lmeyer@redhat.com) +- Add external provisioners playbook starting with aws efs (mawong@redhat.com) + +* Tue Apr 11 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.21-1 +- Adding a query for the existing docker-registry route. (kwoodson@redhat.com) +- Removing docker-registry route from cockpit-ui. (kwoodson@redhat.com) + +* Fri Apr 07 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.20-1 +- Fixed a bug when oc command fails. (kwoodson@redhat.com) +- openshift_sanitize_inventory: validate release (lmeyer@redhat.com) + +* Fri Apr 07 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.19-1 +- Add example scheduled certificate check (pep@redhat.com) +- Switch from ignoring to passing on checks (rteague@redhat.com) +- Add tests for action plugin (rhcarvalho@gmail.com) +- Remove unnecessary code (rhcarvalho@gmail.com) +- Make resolve_checks more strict (rhcarvalho@gmail.com) + +* Fri Apr 07 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.18-1 +- master-api: add mount for /var/log (gscrivan@redhat.com) +- master: add mount for /var/log (gscrivan@redhat.com) +- unexclude excluder if it is to be upgraded and already installed + (jchaloup@redhat.com) +- Bump calico policy controller (djosborne10@gmail.com) +- Fixed a string format and a lint space issue (kwoodson@redhat.com) +- Fixed name and selector to be mutually exclusive (kwoodson@redhat.com) +- Adding ability to delete by selector. (kwoodson@redhat.com) +- Adding delete with selector support. (kwoodson@redhat.com) + +* Thu Apr 06 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.17-1 +- Adding signed router cert and fixing server_cert bug. (kwoodson@redhat.com) + +* Wed Apr 05 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.16-1 +- Removing test coverage for shared code. (kwoodson@redhat.com) +- Port 10255 unnecessary. Removing all instances (ccallega@redhat.com) +- oo_filters: Disable pylint too-many-lines test (jarrpa@redhat.com) +- oo_collect: Allow list elements to be lists of dict (jarrpa@redhat.com) +- oc_label: handle case where _get() returns no results (jarrpa@redhat.com) +- Addressing py27-yamllint (esauer@redhat.com) +- Add 'docker-registry.default.svc' to cert-redeploy too (sdodson@redhat.com) +- Support unicode output when dumping yaml (rteague@redhat.com) +- Add docker-registry.default.svc short name to registry service signing + (sdodson@redhat.com) +- oc_configmap: Add missing check for name (jarrpa@redhat.com) +- oo_collect: Update comments to show source of failure (jarrpa@redhat.com) +- openshift_facts: Allow examples_content_version to be set to v1.6 + (jarrpa@redhat.com) +- Restart polkitd to workaround a bug in polkitd (sdodson@redhat.com) +- Add names to openshift_image_tag asserts (smilner@redhat.com) +- doc: Remove atomic-openshift deployment type (smilner@redhat.com) +- openshift_version now requires prepended version formats (smilner@redhat.com) +- Warn if openshift_image_tag is defined by hand for package installs + (smilner@redhat.com) +- Verify openshift_image_tag is valid during openshift_version main + (smilner@redhat.com) +- Add openshift_version fact fallback debug messages (smilner@redhat.com) +- cleanup: when in openshift_version tasks are multiline (smilner@redhat.com) +- Compatibility updates to openshift_logging role for ansible 2.2.2.0+ + (esauer@redhat.com) + * Tue Apr 04 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.15-1 - Document etcd_ca_default_days in example inventories. (abutcher@redhat.com) - Fixed a bug. Ansible requires a msg param when module.fail_json. diff --git a/playbooks/byo/openshift-cluster/cluster_hosts.yml b/playbooks/byo/openshift-cluster/cluster_hosts.yml index cb464cf0d..268a65415 100644 --- a/playbooks/byo/openshift-cluster/cluster_hosts.yml +++ b/playbooks/byo/openshift-cluster/cluster_hosts.yml @@ -13,6 +13,8 @@ g_new_node_hosts: "{{ groups.new_nodes | default([]) }}" g_nfs_hosts: "{{ groups.nfs | default([]) }}" +g_glusterfs_hosts: "{{ groups.glusterfs | default([]) }}" + g_all_hosts: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts) | union(g_lb_hosts) | union(g_nfs_hosts) | union(g_new_node_hosts)| union(g_new_master_hosts) diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml index 4db0720d0..acf5469bf 100644 --- a/playbooks/byo/openshift-cluster/config.yml +++ b/playbooks/byo/openshift-cluster/config.yml @@ -1,4 +1,8 @@ --- +- include: initialize_groups.yml + tags: + - always + - include: ../../common/openshift-cluster/std_include.yml tags: - always diff --git a/playbooks/byo/openshift-cluster/enable_dnsmasq.yml b/playbooks/byo/openshift-cluster/enable_dnsmasq.yml index 32f9ebfd3..9ce8f0d3c 100644 --- a/playbooks/byo/openshift-cluster/enable_dnsmasq.yml +++ b/playbooks/byo/openshift-cluster/enable_dnsmasq.yml @@ -1,26 +1,4 @@ --- -- name: Create initial host groups for localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tags: - - always - tasks: - - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml - - name: Evaluate group l_oo_all_hosts - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts | default([]) }}" - changed_when: False - -- name: Create initial host groups for all hosts - hosts: l_oo_all_hosts - gather_facts: no - tags: - - always - tasks: - - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml +- include: initialize_groups.yml - include: ../../common/openshift-cluster/enable_dnsmasq.yml diff --git a/playbooks/byo/openshift-cluster/initialize_groups.yml b/playbooks/byo/openshift-cluster/initialize_groups.yml new file mode 100644 index 000000000..2785dcc3b --- /dev/null +++ b/playbooks/byo/openshift-cluster/initialize_groups.yml @@ -0,0 +1,24 @@ +--- +- name: Create initial host groups for localhost + hosts: localhost + connection: local + become: no + gather_facts: no + tags: + - always + tasks: + - include_vars: cluster_hosts.yml + - name: Evaluate group l_oo_all_hosts + add_host: + name: "{{ item }}" + groups: l_oo_all_hosts + with_items: "{{ g_all_hosts | default([]) }}" + changed_when: no + +- name: Create initial host groups for all hosts + hosts: l_oo_all_hosts + gather_facts: no + tags: + - always + tasks: + - include_vars: cluster_hosts.yml diff --git a/playbooks/byo/openshift-cluster/openshift-logging.yml b/playbooks/byo/openshift-cluster/openshift-logging.yml index f8eebe898..76f165c6d 100644 --- a/playbooks/byo/openshift-cluster/openshift-logging.yml +++ b/playbooks/byo/openshift-cluster/openshift-logging.yml @@ -4,29 +4,7 @@ # Hosted logging on. See inventory/byo/hosts.*.example for the # currently supported method. # -- name: Create initial host groups for localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tags: - - always - tasks: - - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml - - name: Evaluate group l_oo_all_hosts - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts | default([]) }}" - changed_when: False - -- name: Create initial host groups for all hosts - hosts: l_oo_all_hosts - gather_facts: no - tags: - - always - tasks: - - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml +- include: initialize_groups.yml - include: ../../common/openshift-cluster/openshift_logging.yml vars: diff --git a/playbooks/byo/openshift-cluster/redeploy-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-certificates.yml index ad24b9ad0..012ce69ec 100644 --- a/playbooks/byo/openshift-cluster/redeploy-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-certificates.yml @@ -1,4 +1,8 @@ --- +- include: initialize_groups.yml + tags: + - always + - include: ../../common/openshift-cluster/std_include.yml tags: - always diff --git a/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml index ee49364fa..8516baee8 100644 --- a/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml @@ -1,4 +1,8 @@ --- +- include: initialize_groups.yml + tags: + - always + - include: ../../common/openshift-cluster/std_include.yml tags: - always diff --git a/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml index 9c8248c4e..566e8b261 100644 --- a/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml @@ -1,4 +1,8 @@ --- +- include: initialize_groups.yml + tags: + - always + - include: ../../common/openshift-cluster/std_include.yml tags: - always diff --git a/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml index 1695111d0..42777e5e6 100644 --- a/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml @@ -1,4 +1,8 @@ --- +- include: initialize_groups.yml + tags: + - always + - include: ../../common/openshift-cluster/std_include.yml tags: - always diff --git a/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml b/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml index e44e95467..3b33e0d6f 100644 --- a/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml +++ b/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml @@ -1,4 +1,8 @@ --- +- include: initialize_groups.yml + tags: + - always + - include: ../../common/openshift-cluster/std_include.yml tags: - always diff --git a/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml index 53ee68db9..30feabab3 100644 --- a/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml @@ -1,4 +1,8 @@ --- +- include: initialize_groups.yml + tags: + - always + - include: ../../common/openshift-cluster/std_include.yml tags: - always diff --git a/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml index f8c267569..2630fb234 100644 --- a/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml @@ -1,4 +1,8 @@ --- +- include: initialize_groups.yml + tags: + - always + - include: ../../common/openshift-cluster/std_include.yml tags: - always diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/nuke_images.sh b/playbooks/byo/openshift-cluster/upgrades/docker/nuke_images.sh deleted file mode 120000 index d5d864b63..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/docker/nuke_images.sh +++ /dev/null @@ -1 +0,0 @@ -../../../../common/openshift-cluster/upgrades/files/nuke_images.sh
\ No newline at end of file diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml index 5feb33be4..7f31e26e1 100644 --- a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml @@ -1,36 +1,5 @@ --- # Playbook to upgrade Docker to the max allowable version for an OpenShift cluster. -- name: Create initial host groups for localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tags: - - always - tasks: - - include_vars: ../../cluster_hosts.yml - - name: Evaluate group l_oo_all_hosts - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts | default([]) }}" - changed_when: False +- include: ../../initialize_groups.yml -- name: Create initial host groups for all hosts - hosts: l_oo_all_hosts - gather_facts: no - tags: - - always - tasks: - - include_vars: ../../cluster_hosts.yml - -- include: ../../../../common/openshift-cluster/evaluate_groups.yml - vars: - # Do not allow adding hosts during upgrade. - g_new_master_hosts: [] - g_new_node_hosts: [] - openshift_cluster_id: "{{ cluster_id | default('default') }}" - -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - -- include: docker_upgrade.yml +- include: ../../../../common/openshift-cluster/upgrades/docker/docker_upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml index 106dcc12d..8005a17a3 100644 --- a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml +++ b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml @@ -1,26 +1,4 @@ --- -- name: Create initial host groups for localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tags: - - always - tasks: - - include_vars: ../cluster_hosts.yml - - name: Evaluate group l_oo_all_hosts - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts | default([]) }}" - changed_when: False - -- name: Create initial host groups for all hosts - hosts: l_oo_all_hosts - gather_facts: no - tags: - - always - tasks: - - include_vars: ../cluster_hosts.yml +- include: ../initialize_groups.yml - include: ../../../common/openshift-cluster/upgrades/etcd/main.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml index d268850d8..690b663f4 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml @@ -2,6 +2,8 @@ # # Full Control Plane + Nodes Upgrade # +- include: ../../initialize_groups.yml + - include: ../../../../common/openshift-cluster/upgrades/init.yml tags: - pre_upgrade diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml index d11e51640..fca2c04f3 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml @@ -11,6 +11,8 @@ # # You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. # +- include: ../../initialize_groups.yml + - include: ../../../../common/openshift-cluster/upgrades/init.yml tags: - pre_upgrade diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml index 5a0f143ac..d171ac3cd 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml @@ -4,6 +4,8 @@ # # Upgrades nodes only, but requires the control plane to have already been upgraded. # +- include: ../../initialize_groups.yml + - include: ../../../../common/openshift-cluster/upgrades/init.yml tags: - pre_upgrade diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml index 25d8cd2ba..217163802 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml @@ -2,6 +2,8 @@ # # Full Control Plane + Nodes Upgrade # +- include: ../../initialize_groups.yml + - include: ../../../../common/openshift-cluster/upgrades/init.yml tags: - pre_upgrade diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml index d52f3c111..d21c195bf 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml @@ -11,6 +11,8 @@ # # You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. # +- include: ../../initialize_groups.yml + - include: ../../../../common/openshift-cluster/upgrades/init.yml tags: - pre_upgrade diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml index 07c734a40..7bb66611c 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml @@ -4,6 +4,8 @@ # # Upgrades nodes only, but requires the control plane to have already been upgraded. # +- include: ../../initialize_groups.yml + - include: ../../../../common/openshift-cluster/upgrades/init.yml tags: - pre_upgrade diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml index 86f5a36ca..f0900e04e 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml @@ -2,6 +2,8 @@ # # Full Control Plane + Nodes Upgrade # +- include: ../../initialize_groups.yml + - include: ../../../../common/openshift-cluster/upgrades/init.yml tags: - pre_upgrade diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml index a2f1cd2b1..e8d834a04 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml @@ -11,6 +11,8 @@ # # You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. # +- include: ../../initialize_groups.yml + - include: ../../../../common/openshift-cluster/upgrades/init.yml tags: - pre_upgrade diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml index f858de3d5..c2a4debc8 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml @@ -4,6 +4,8 @@ # # Upgrades nodes only, but requires the control plane to have already been upgraded. # +- include: ../../initialize_groups.yml + - include: ../../../../common/openshift-cluster/upgrades/init.yml tags: - pre_upgrade diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml index 900bbc8d8..763e79e01 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -2,6 +2,8 @@ # # Full Control Plane + Nodes Upgrade # +- include: ../../initialize_groups.yml + - include: ../../../../common/openshift-cluster/upgrades/init.yml tags: - pre_upgrade diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index 5bd0f7ac5..7a1377be2 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -11,6 +11,8 @@ # # You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. # +- include: ../../initialize_groups.yml + - include: ../../../../common/openshift-cluster/upgrades/init.yml tags: - pre_upgrade diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml index 96d89dbdd..065746493 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml @@ -4,6 +4,8 @@ # # Upgrades nodes only, but requires the control plane to have already been upgraded. # +- include: ../../initialize_groups.yml + - include: ../../../../common/openshift-cluster/upgrades/init.yml tags: - pre_upgrade diff --git a/playbooks/byo/openshift-etcd/restart.yml b/playbooks/byo/openshift-etcd/restart.yml index 19403116d..d43533641 100644 --- a/playbooks/byo/openshift-etcd/restart.yml +++ b/playbooks/byo/openshift-etcd/restart.yml @@ -1,4 +1,8 @@ --- +- include: ../openshift-cluster/initialize_groups.yml + tags: + - always + - include: ../../common/openshift-cluster/std_include.yml tags: - always diff --git a/playbooks/byo/openshift-master/restart.yml b/playbooks/byo/openshift-master/restart.yml index 21e4cff1b..7988863f3 100644 --- a/playbooks/byo/openshift-master/restart.yml +++ b/playbooks/byo/openshift-master/restart.yml @@ -1,4 +1,8 @@ --- +- include: ../openshift-cluster/initialize_groups.yml + tags: + - always + - include: ../../common/openshift-cluster/std_include.yml tags: - always diff --git a/playbooks/byo/openshift-master/scaleup.yml b/playbooks/byo/openshift-master/scaleup.yml index a5705e990..8aa07a664 100644 --- a/playbooks/byo/openshift-master/scaleup.yml +++ b/playbooks/byo/openshift-master/scaleup.yml @@ -1,27 +1,5 @@ --- -- name: Create initial host groups for localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tags: - - always - tasks: - - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml - - name: Evaluate group l_oo_all_hosts - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts | default([]) }}" - changed_when: False - -- name: Create initial host groups for all hosts - hosts: l_oo_all_hosts - gather_facts: no - tags: - - always - tasks: - - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml +- include: ../openshift-cluster/initialize_groups.yml - include: ../../common/openshift-master/scaleup.yml vars: diff --git a/playbooks/byo/openshift-node/network_manager.yml b/playbooks/byo/openshift-node/network_manager.yml index 9bb3ea17f..b23692237 100644 --- a/playbooks/byo/openshift-node/network_manager.yml +++ b/playbooks/byo/openshift-node/network_manager.yml @@ -1,42 +1,4 @@ --- -- name: Create initial host groups for localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tags: - - always - tasks: - - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml - - name: Evaluate group l_oo_all_hosts - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts | default([]) }}" - changed_when: False +- include: ../openshift-cluster/initialize_groups.yml -- name: Install and configure NetworkManager - hosts: l_oo_all_hosts - become: yes - tasks: - - name: install NetworkManager - package: - name: 'NetworkManager' - state: present - - - name: configure NetworkManager - lineinfile: - dest: "/etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4['interface'] }}" - regexp: '^{{ item }}=' - line: '{{ item }}=yes' - state: present - create: yes - with_items: - - 'USE_PEERDNS' - - 'NM_CONTROLLED' - - - name: enable and start NetworkManager - service: - name: 'NetworkManager' - state: started - enabled: yes +- include: ../../common/openshift-node/network_manager.yml diff --git a/playbooks/byo/openshift-node/restart.yml b/playbooks/byo/openshift-node/restart.yml index 6861625b9..92665d71d 100644 --- a/playbooks/byo/openshift-node/restart.yml +++ b/playbooks/byo/openshift-node/restart.yml @@ -1,4 +1,8 @@ --- +- include: ../openshift-cluster/initialize_groups.yml + tags: + - always + - include: ../../common/openshift-cluster/std_include.yml tags: - always diff --git a/playbooks/byo/openshift-node/scaleup.yml b/playbooks/byo/openshift-node/scaleup.yml index 88d236b53..c6965fd6f 100644 --- a/playbooks/byo/openshift-node/scaleup.yml +++ b/playbooks/byo/openshift-node/scaleup.yml @@ -1,27 +1,5 @@ --- -- name: Create initial host groups for localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tags: - - always - tasks: - - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml - - name: Evaluate group l_oo_all_hosts - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts | default([]) }}" - changed_when: False - -- name: Create initial host groups for all hosts - hosts: l_oo_all_hosts - gather_facts: no - tags: - - always - tasks: - - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml +- include: ../openshift-cluster/initialize_groups.yml - include: ../../common/openshift-node/scaleup.yml vars: diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml index a21b6a0a5..3b10323d6 100644 --- a/playbooks/byo/openshift_facts.yml +++ b/playbooks/byo/openshift_facts.yml @@ -1,4 +1,8 @@ --- +- include: openshift-cluster/initialize_groups.yml + tags: + - always + - include: ../common/openshift-cluster/std_include.yml tags: - always diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml index 8c6d77024..777743def 100644 --- a/playbooks/byo/rhel_subscribe.yml +++ b/playbooks/byo/rhel_subscribe.yml @@ -1,4 +1,8 @@ --- +- include: openshift-cluster/initialize_groups.yml + tags: + - always + - include: ../common/openshift-cluster/std_include.yml tags: - always diff --git a/playbooks/certificate_expiry/easy-mode-upload.yaml b/playbooks/certificate_expiry/easy-mode-upload.yaml new file mode 100644 index 000000000..378d1f154 --- /dev/null +++ b/playbooks/certificate_expiry/easy-mode-upload.yaml @@ -0,0 +1,40 @@ +# This example generates HTML and JSON reports and +# +# Copies of the generated HTML and JSON reports are uploaded to the masters, +# which is particularly useful when this playbook is run from a container. +# +# All certificates (healthy or not) are included in the results +# +# Optional environment variables to alter the behaviour of the playbook: +# CERT_EXPIRY_WARN_DAYS: Length of the warning window in days (45) +# COPY_TO_PATH: path to copy reports to in the masters (/etc/origin/certificate_expiration_report) +--- +- name: Generate certificate expiration reports + hosts: nodes:masters:etcd + gather_facts: no + vars: + openshift_certificate_expiry_save_json_results: yes + openshift_certificate_expiry_generate_html_report: yes + openshift_certificate_expiry_show_all: yes + openshift_certificate_expiry_warning_days: "{{ lookup('env', 'CERT_EXPIRY_WARN_DAYS') | default('45', true) }}" + roles: + - role: openshift_certificate_expiry + +- name: Upload reports to master + hosts: masters + gather_facts: no + vars: + destination_path: "{{ lookup('env', 'COPY_TO_PATH') | default('/etc/origin/certificate_expiration_report', true) }}" + timestamp: "{{ lookup('pipe', 'date +%Y%m%d') }}" + tasks: + - name: Ensure that the target directory exists + file: + path: "{{ destination_path }}" + state: directory + - name: Copy the reports + copy: + dest: "{{ destination_path }}/{{ timestamp }}-{{ item }}" + src: "/tmp/{{ item }}" + with_items: + - "cert-expiry-report.html" + - "cert-expiry-report.json" diff --git a/playbooks/certificate_expiry/html_and_json_timestamp.yaml b/playbooks/certificate_expiry/html_and_json_timestamp.yaml new file mode 100644 index 000000000..2189455b7 --- /dev/null +++ b/playbooks/certificate_expiry/html_and_json_timestamp.yaml @@ -0,0 +1,16 @@ +--- +# Generate timestamped HTML and JSON reports in /var/lib/certcheck + +- name: Check cert expirys + hosts: nodes:masters:etcd + become: yes + gather_facts: no + vars: + openshift_certificate_expiry_generate_html_report: yes + openshift_certificate_expiry_save_json_results: yes + openshift_certificate_expiry_show_all: yes + timestamp: "{{ lookup('pipe', 'date +%Y%m%d') }}" + openshift_certificate_expiry_html_report_path: "/var/lib/certcheck/{{ timestamp }}-cert-expiry-report.html" + openshift_certificate_expiry_json_results_path: "/var/lib/certcheck/{{ timestamp }}-cert-expiry-report.json" + roles: + - role: openshift_certificate_expiry diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 1b967b7f1..239bb211b 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -1,30 +1,7 @@ --- -- name: Set oo_option facts - hosts: oo_all_hosts +- include: initialize_oo_option_facts.yml tags: - always - tasks: - - set_fact: - openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}" - when: openshift_docker_additional_registries is not defined - - set_fact: - openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}" - when: openshift_docker_insecure_registries is not defined - - set_fact: - openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}" - when: openshift_docker_blocked_registries is not defined - - set_fact: - openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}" - when: openshift_docker_options is not defined - - set_fact: - openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}" - when: openshift_docker_log_driver is not defined - - set_fact: - openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}" - when: openshift_docker_log_options is not defined - - set_fact: - openshift_docker_selinux_enabled: "{{ lookup('oo_option', 'docker_selinux_enabled') }}" - when: openshift_docker_selinux_enabled is not defined - include: disable_excluder.yml tags: @@ -54,6 +31,10 @@ tags: - node +- include: ../openshift-glusterfs/config.yml + tags: + - glusterfs + - include: openshift_hosted.yml tags: - hosted diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml index 45a4875a3..6aac70f63 100644 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -29,6 +29,10 @@ msg: The nfs group must be limited to one host when: "{{ (groups[g_nfs_hosts] | default([])) | length > 1 }}" + - fail: + msg: This playbook requires g_glusterfs_hosts to be set + when: "{{ g_glusterfs_hosts is not defined }}" + - name: Evaluate oo_all_hosts add_host: name: "{{ item }}" @@ -119,3 +123,12 @@ ansible_become: "{{ g_sudo | default(omit) }}" with_items: "{{ g_nfs_hosts | default([]) }}" changed_when: no + + - name: Evaluate oo_glusterfs_to_config + add_host: + name: "{{ item }}" + groups: oo_glusterfs_to_config + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" + with_items: "{{ g_glusterfs_hosts | default([]) }}" + changed_when: no diff --git a/playbooks/common/openshift-cluster/initialize_oo_option_facts.yml b/playbooks/common/openshift-cluster/initialize_oo_option_facts.yml new file mode 100644 index 000000000..ac3c702a0 --- /dev/null +++ b/playbooks/common/openshift-cluster/initialize_oo_option_facts.yml @@ -0,0 +1,27 @@ +--- +- name: Set oo_option facts + hosts: oo_all_hosts + tags: + - always + tasks: + - set_fact: + openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}" + when: openshift_docker_additional_registries is not defined + - set_fact: + openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}" + when: openshift_docker_insecure_registries is not defined + - set_fact: + openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}" + when: openshift_docker_blocked_registries is not defined + - set_fact: + openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}" + when: openshift_docker_options is not defined + - set_fact: + openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}" + when: openshift_docker_log_driver is not defined + - set_fact: + openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}" + when: openshift_docker_log_options is not defined + - set_fact: + openshift_docker_selinux_enabled: "{{ lookup('oo_option', 'docker_selinux_enabled') }}" + when: openshift_docker_selinux_enabled is not defined diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml index d96a78c4c..57580406c 100644 --- a/playbooks/common/openshift-cluster/openshift_logging.yml +++ b/playbooks/common/openshift-cluster/openshift_logging.yml @@ -1,11 +1,13 @@ --- +- include: evaluate_groups.yml + - name: OpenShift Aggregated Logging hosts: oo_first_master roles: - openshift_logging - name: Update Master configs - hosts: masters:!oo_first_master + hosts: oo_masters:!oo_first_master tasks: - block: - include_role: diff --git a/playbooks/common/openshift-cluster/openshift_provisioners.yml b/playbooks/common/openshift-cluster/openshift_provisioners.yml new file mode 100644 index 000000000..b1ca6f606 --- /dev/null +++ b/playbooks/common/openshift-cluster/openshift_provisioners.yml @@ -0,0 +1,5 @@ +--- +- name: OpenShift Provisioners + hosts: oo_first_master + roles: + - openshift_provisioners diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml index cbb4a2434..3b26abcc7 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml @@ -130,7 +130,7 @@ state: absent changed_when: false -- include: ../../../common/openshift-etcd/restart.yml +- include: ../openshift-etcd/restart.yml # Update master config when ca-bundle not referenced. Services will be # restarted below after new CA certificate has been distributed. @@ -322,7 +322,7 @@ group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}" with_items: "{{ client_users }}" -- include: ../../../common/openshift-master/restart.yml +- include: ../openshift-master/restart.yml - name: Distribute OpenShift CA certificate to nodes hosts: oo_nodes_to_config @@ -371,4 +371,4 @@ state: absent changed_when: false -- include: ../../../common/openshift-node/restart.yml +- include: ../openshift-node/restart.yml diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml index 74cc1d527..6ed31a644 100644 --- a/playbooks/common/openshift-cluster/std_include.yml +++ b/playbooks/common/openshift-cluster/std_include.yml @@ -1,28 +1,4 @@ --- -- name: Create initial host groups for localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tags: - - always - tasks: - - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml - - name: Evaluate group l_oo_all_hosts - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts | default([]) }}" - changed_when: no - -- name: Create initial host groups for all hosts - hosts: l_oo_all_hosts - gather_facts: no - tags: - - always - tasks: - - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml - - include: evaluate_groups.yml tags: - always diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml index 304559f6e..07db071ce 100644 --- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -1,4 +1,13 @@ --- +- include: ../../evaluate_groups.yml + vars: + # Do not allow adding hosts during upgrade. + g_new_master_hosts: [] + g_new_node_hosts: [] + openshift_cluster_id: "{{ cluster_id | default('default') }}" + +- include: ../initialize_nodes_to_upgrade.yml + - name: Check for appropriate Docker versions hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config roles: @@ -11,7 +20,7 @@ msg: Cannot upgrade Docker on Atomic operating systems. when: openshift.common.is_atomic | bool - - include: ../../../../common/openshift-cluster/upgrades/docker/upgrade_check.yml + - include: upgrade_check.yml when: docker_upgrade is not defined or docker_upgrade | bool @@ -47,7 +56,7 @@ delegate_to: "{{ groups.oo_first_master.0 }}" when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade - - include: ../../../../common/openshift-cluster/upgrades/docker/upgrade.yml + - include: upgrade.yml when: l_docker_upgrade is defined and l_docker_upgrade | bool - name: Set node schedulability diff --git a/playbooks/common/openshift-cluster/upgrades/files/nuke_images.sh b/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh index 8635eab0d..8635eab0d 100644 --- a/playbooks/common/openshift-cluster/upgrades/files/nuke_images.sh +++ b/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/roles b/playbooks/common/openshift-cluster/upgrades/docker/roles index 6bc1a7aef..6bc1a7aef 120000 --- a/playbooks/byo/openshift-cluster/upgrades/docker/roles +++ b/playbooks/common/openshift-cluster/upgrades/docker/roles diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml index bcbc4ee02..cbf6d58b3 100644 --- a/playbooks/common/openshift-cluster/upgrades/init.yml +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -1,28 +1,4 @@ --- -- name: Create initial host groups for localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tags: - - always - tasks: - - include_vars: ../../../byo/openshift-cluster/cluster_hosts.yml - - name: Evaluate group l_oo_all_hosts - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts | default([]) }}" - changed_when: False - -- name: Create initial host groups for all hosts - hosts: l_oo_all_hosts - gather_facts: no - tags: - - always - tasks: - - include_vars: ../../../byo/openshift-cluster/cluster_hosts.yml - - include: ../evaluate_groups.yml vars: # Do not allow adding hosts during upgrade. @@ -30,27 +6,7 @@ g_new_node_hosts: [] openshift_cluster_id: "{{ cluster_id | default('default') }}" -- name: Set oo_options - hosts: oo_all_hosts - tasks: - - set_fact: - openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}" - when: openshift_docker_additional_registries is not defined - - set_fact: - openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}" - when: openshift_docker_insecure_registries is not defined - - set_fact: - openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}" - when: openshift_docker_blocked_registries is not defined - - set_fact: - openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}" - when: openshift_docker_options is not defined - - set_fact: - openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}" - when: openshift_docker_log_driver is not defined - - set_fact: - openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}" - when: openshift_docker_log_options is not defined +- include: ../initialize_oo_option_facts.yml - include: ../initialize_facts.yml @@ -70,8 +26,8 @@ tasks: - name: Check if iptables is running command: systemctl status iptables - ignore_errors: true changed_when: false + failed_when: false register: service_iptables_status - name: Set fact os_firewall_use_firewalld FALSE for iptables diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index c00795a8d..0d7cdb227 100644 --- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -5,9 +5,10 @@ - name: Upgrade default router and default registry hosts: oo_first_master vars: - registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', openshift_image_tag ) }}" - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', openshift_image_tag ) }}" - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig" + registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | + replace ( '${version}', openshift_image_tag ) }}" + router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | + replace ( '${version}', openshift_image_tag ) }}" pre_tasks: - name: Load lib_openshift modules @@ -21,7 +22,10 @@ selector: 'router' register: all_routers - - set_fact: haproxy_routers="{{ all_routers.results.results[0]['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}" + - set_fact: + haproxy_routers: "{{ all_routers.results.results[0]['items'] | + oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | + oo_select_keys_from_list(['metadata']) }}" when: - all_routers.results.returncode == 0 @@ -30,16 +34,15 @@ - all_routers.results.returncode != 0 - name: Update router image to current version + oc_edit: + kind: dc + name: "{{ item['labels']['deploymentconfig'] }}" + namespace: "{{ item['namespace'] }}" + content: + spec.template.spec.containers[0].image: "{{ router_image }}" + with_items: "{{ haproxy_routers }}" when: - all_routers.results.returncode == 0 - command: > - {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -n {{ item['namespace'] }} -p - '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}' - --api-version=v1 - with_items: "{{ haproxy_routers }}" - # AUDIT:changed_when_note: `false` not being set here. What we - # need to do is check the current router image version and see if - # this task needs to be ran. - name: Check for default registry oc_obj: @@ -49,15 +52,14 @@ register: _default_registry - name: Update registry image to current version + oc_edit: + kind: dc + name: docker-registry + namespace: default + content: + spec.template.spec.containers[0].image: "{{ registry_image }}" when: - _default_registry.results.results[0] != {} - command: > - {{ oc_cmd }} patch dc/docker-registry -n default -p - '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}' - --api-version=v1 - # AUDIT:changed_when_note: `false` not being set here. What we - # need to do is check the current registry image version and see - # if this task needs to be ran. roles: - openshift_manageiq diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml new file mode 100644 index 000000000..75faf5ba8 --- /dev/null +++ b/playbooks/common/openshift-glusterfs/config.yml @@ -0,0 +1,21 @@ +--- +- name: Open firewall ports for GlusterFS + hosts: oo_glusterfs_to_config + vars: + os_firewall_allow: + - service: glusterfs_sshd + port: "2222/tcp" + - service: glusterfs_daemon + port: "24007/tcp" + - service: glusterfs_management + port: "24008/tcp" + - service: glusterfs_bricks + port: "49152-49251/tcp" + roles: + - os_firewall + +- name: Configure GlusterFS + hosts: oo_first_master + roles: + - role: openshift_storage_glusterfs + when: groups.oo_glusterfs_to_config | default([]) | count > 0 diff --git a/playbooks/common/openshift-glusterfs/filter_plugins b/playbooks/common/openshift-glusterfs/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/common/openshift-glusterfs/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-glusterfs/lookup_plugins b/playbooks/common/openshift-glusterfs/lookup_plugins new file mode 120000 index 000000000..ac79701db --- /dev/null +++ b/playbooks/common/openshift-glusterfs/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-glusterfs/roles b/playbooks/common/openshift-glusterfs/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/playbooks/common/openshift-glusterfs/roles @@ -0,0 +1 @@ +../../../roles/
\ No newline at end of file diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml index b35368bf1..6fec346c3 100644 --- a/playbooks/common/openshift-master/restart.yml +++ b/playbooks/common/openshift-master/restart.yml @@ -1,5 +1,5 @@ --- -- include: ../../common/openshift-master/validate_restart.yml +- include: validate_restart.yml - name: Restart masters hosts: oo_masters_to_config @@ -12,8 +12,8 @@ roles: - openshift_facts post_tasks: - - include: ../../common/openshift-master/restart_hosts.yml + - include: restart_hosts.yml when: openshift_rolling_restart_mode | default('services') == 'system' - - include: ../../common/openshift-master/restart_services.yml + - include: restart_services.yml when: openshift_rolling_restart_mode | default('services') == 'services' diff --git a/playbooks/common/openshift-node/network_manager.yml b/playbooks/common/openshift-node/network_manager.yml new file mode 100644 index 000000000..be050c12c --- /dev/null +++ b/playbooks/common/openshift-node/network_manager.yml @@ -0,0 +1,26 @@ +--- +- name: Install and configure NetworkManager + hosts: l_oo_all_hosts + become: yes + tasks: + - name: install NetworkManager + package: + name: 'NetworkManager' + state: present + + - name: configure NetworkManager + lineinfile: + dest: "/etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4['interface'] }}" + regexp: '^{{ item }}=' + line: '{{ item }}=yes' + state: present + create: yes + with_items: + - 'USE_PEERDNS' + - 'NM_CONTROLLED' + + - name: enable and start NetworkManager + service: + name: 'NetworkManager' + state: started + enabled: yes diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml index 20ce47c07..82329eac1 100644 --- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml +++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml @@ -340,16 +340,6 @@ resources: port_range_max: 10250 remote_mode: remote_group_id - direction: ingress - protocol: tcp - port_range_min: 10255 - port_range_max: 10255 - remote_mode: remote_group_id - - direction: ingress - protocol: udp - port_range_min: 10255 - port_range_max: 10255 - remote_mode: remote_group_id - - direction: ingress protocol: udp port_range_min: 4789 port_range_max: 4789 diff --git a/requirements.txt b/requirements.txt index 241313b6f..d00de5ed4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,8 @@ -ansible>=2.2 -click -pyOpenSSL +# Versions are pinned to prevent pypi releases arbitrarily breaking +# tests with new APIs/semantics. We want to update versions deliberately. +ansible==2.2.2.0 +click==6.7 +pyOpenSSL==16.2.0 # We need to disable ruamel.yaml for now because of test failures #ruamel.yaml -six +six==1.10.0 diff --git a/roles/calico_master/templates/calico-policy-controller.yml.j2 b/roles/calico_master/templates/calico-policy-controller.yml.j2 index 66c334ceb..3fb1abf0d 100644 --- a/roles/calico_master/templates/calico-policy-controller.yml.j2 +++ b/roles/calico_master/templates/calico-policy-controller.yml.j2 @@ -74,7 +74,7 @@ spec: serviceAccountName: calico containers: - name: calico-policy-controller - image: quay.io/calico/kube-policy-controller:v0.5.3 + image: quay.io/calico/kube-policy-controller:v0.5.4 env: # The location of the Calico etcd cluster. - name: ETCD_ENDPOINTS diff --git a/roles/cockpit-ui/tasks/main.yml b/roles/cockpit-ui/tasks/main.yml index 8bd68787a..0114498f8 100644 --- a/roles/cockpit-ui/tasks/main.yml +++ b/roles/cockpit-ui/tasks/main.yml @@ -1,13 +1,16 @@ --- - block: - - name: Create passthrough route for docker-registry + + # When openshift_hosted_manage_registry=true the openshift_hosted + # role will create the appropriate route for the docker-registry. + # When openshift_hosted_manage_registry=false then this code will + # not be run. + - name: fetch the docker-registry route oc_route: kubeconfig: "{{ openshift_master_config_dir }}/admin.kubeconfig" name: docker-registry namespace: default - service_name: docker-registry - state: present - tls_termination: passthrough + state: list register: docker_registry_route - name: Create passthrough route for registry-console @@ -41,7 +44,7 @@ {% if openshift_cockpit_deployer_prefix is defined %}-p IMAGE_PREFIX="{{ openshift_cockpit_deployer_prefix }}"{% endif %} {% if openshift_cockpit_deployer_version is defined %}-p IMAGE_VERSION="{{ openshift_cockpit_deployer_version }}"{% endif %} -p OPENSHIFT_OAUTH_PROVIDER_URL="{{ openshift.master.public_api_url }}" - -p REGISTRY_HOST="{{ docker_registry_route.results.results[0].spec.host }}" + -p REGISTRY_HOST="{{ docker_registry_route.results[0].spec.host }}" -p COCKPIT_KUBE_URL="https://{{ registry_console_cockpit_kube.results.results[0].spec.host }}" --config={{ openshift_hosted_kubeconfig }} -n default diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2 index 9151dd0bd..1b5598f46 100644 --- a/roles/etcd/templates/etcd.conf.j2 +++ b/roles/etcd/templates/etcd.conf.j2 @@ -62,7 +62,7 @@ ETCD_PEER_KEY_FILE={{ etcd_peer_key_file }} {% endif -%} #[logging] -ETCD_DEBUG="{{ etcd_debug | default(false) | string }}" +ETCD_DEBUG="{{ etcd_debug | default(false) | bool | string }}" {% if etcd_log_package_levels is defined %} ETCD_LOG_PACKAGE_LEVELS="{{ etcd_log_package_levels }}" {% endif %} diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py index 2f6026fbf..4d083c4d5 100644 --- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py +++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py @@ -157,13 +157,13 @@ EXAMPLES = ''' # -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- -class YeditException(Exception): +class YeditException(Exception): # pragma: no cover ''' Exception class for Yedit ''' pass # pylint: disable=too-many-public-methods -class Yedit(object): +class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" @@ -919,11 +919,15 @@ class OpenShiftCLI(object): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) - def _delete(self, resource, rname, selector=None): + def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' - cmd = ['delete', resource, rname] - if selector: - cmd.append('--selector=%s' % selector) + cmd = ['delete', resource] + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) + else: + raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) @@ -941,7 +945,7 @@ class OpenShiftCLI(object): else: cmd.append(template_name) if params: - param_str = ["%s=%s" % (key, value) for key, value in params.items()] + param_str = ["{}={}".format(key, value) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) @@ -958,13 +962,13 @@ class OpenShiftCLI(object): return self.openshift_cmd(['create', '-f', fname]) - def _get(self, resource, rname=None, selector=None): + def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] - if selector: - cmd.append('--selector=%s' % selector) - elif rname: - cmd.append(rname) + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) cmd.extend(['-o', 'json']) @@ -984,9 +988,9 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) - cmd.append('--schedulable=%s' % schedulable) + cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 @@ -1001,10 +1005,10 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) @@ -1017,16 +1021,16 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: - cmd.append('--grace-period=%s' % int(grace_period)) + cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') @@ -1131,7 +1135,7 @@ class OpenShiftCLI(object): return rval -class Utils(object): +class Utils(object): # pragma: no cover ''' utilities for openshiftcli modules ''' @staticmethod @@ -1504,7 +1508,10 @@ class CAServerCert(OpenShiftCLI): x509output, _ = proc.communicate() if proc.returncode == 0: regex = re.compile(r"^\s*X509v3 Subject Alternative Name:\s*?\n\s*(.*)\s*\n", re.MULTILINE) - match = regex.search(x509output) # E501 + match = regex.search(x509output.decode()) # E501 + if not match: + return False + for entry in re.split(r", *", match.group(1)): if entry.startswith('DNS') or entry.startswith('IP Address'): cert_names.append(entry.split(':')[1]) @@ -1551,7 +1558,7 @@ class CAServerCert(OpenShiftCLI): api_rval = server_cert.create() if api_rval['returncode'] != 0: - return {'Failed': True, 'msg': api_rval} + return {'failed': True, 'msg': api_rval} return {'changed': True, 'results': api_rval, 'state': state} diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py index 5f49eef39..48e80a7cd 100644 --- a/roles/lib_openshift/library/oc_adm_manage_node.py +++ b/roles/lib_openshift/library/oc_adm_manage_node.py @@ -143,13 +143,13 @@ EXAMPLES = ''' # -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- -class YeditException(Exception): +class YeditException(Exception): # pragma: no cover ''' Exception class for Yedit ''' pass # pylint: disable=too-many-public-methods -class Yedit(object): +class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" @@ -905,11 +905,15 @@ class OpenShiftCLI(object): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) - def _delete(self, resource, rname, selector=None): + def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' - cmd = ['delete', resource, rname] - if selector: - cmd.append('--selector=%s' % selector) + cmd = ['delete', resource] + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) + else: + raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) @@ -927,7 +931,7 @@ class OpenShiftCLI(object): else: cmd.append(template_name) if params: - param_str = ["%s=%s" % (key, value) for key, value in params.items()] + param_str = ["{}={}".format(key, value) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) @@ -944,13 +948,13 @@ class OpenShiftCLI(object): return self.openshift_cmd(['create', '-f', fname]) - def _get(self, resource, rname=None, selector=None): + def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] - if selector: - cmd.append('--selector=%s' % selector) - elif rname: - cmd.append(rname) + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) cmd.extend(['-o', 'json']) @@ -970,9 +974,9 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) - cmd.append('--schedulable=%s' % schedulable) + cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 @@ -987,10 +991,10 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) @@ -1003,16 +1007,16 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: - cmd.append('--grace-period=%s' % int(grace_period)) + cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') @@ -1117,7 +1121,7 @@ class OpenShiftCLI(object): return rval -class Utils(object): +class Utils(object): # pragma: no cover ''' utilities for openshiftcli modules ''' @staticmethod @@ -1457,7 +1461,7 @@ class ManageNode(OpenShiftCLI): if selector: _sel = selector - results = self._get('node', rname=_node, selector=_sel) + results = self._get('node', name=_node, selector=_sel) if results['returncode'] != 0: return results diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py index 7caba04f5..35168d1a3 100644 --- a/roles/lib_openshift/library/oc_adm_policy_group.py +++ b/roles/lib_openshift/library/oc_adm_policy_group.py @@ -129,13 +129,13 @@ EXAMPLES = ''' # -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- -class YeditException(Exception): +class YeditException(Exception): # pragma: no cover ''' Exception class for Yedit ''' pass # pylint: disable=too-many-public-methods -class Yedit(object): +class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" @@ -891,11 +891,15 @@ class OpenShiftCLI(object): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) - def _delete(self, resource, rname, selector=None): + def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' - cmd = ['delete', resource, rname] - if selector: - cmd.append('--selector=%s' % selector) + cmd = ['delete', resource] + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) + else: + raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) @@ -913,7 +917,7 @@ class OpenShiftCLI(object): else: cmd.append(template_name) if params: - param_str = ["%s=%s" % (key, value) for key, value in params.items()] + param_str = ["{}={}".format(key, value) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) @@ -930,13 +934,13 @@ class OpenShiftCLI(object): return self.openshift_cmd(['create', '-f', fname]) - def _get(self, resource, rname=None, selector=None): + def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] - if selector: - cmd.append('--selector=%s' % selector) - elif rname: - cmd.append(rname) + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) cmd.extend(['-o', 'json']) @@ -956,9 +960,9 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) - cmd.append('--schedulable=%s' % schedulable) + cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 @@ -973,10 +977,10 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) @@ -989,16 +993,16 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: - cmd.append('--grace-period=%s' % int(grace_period)) + cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') @@ -1103,7 +1107,7 @@ class OpenShiftCLI(object): return rval -class Utils(object): +class Utils(object): # pragma: no cover ''' utilities for openshiftcli modules ''' @staticmethod diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py index aac3f7166..5f7e4b8fa 100644 --- a/roles/lib_openshift/library/oc_adm_policy_user.py +++ b/roles/lib_openshift/library/oc_adm_policy_user.py @@ -129,13 +129,13 @@ EXAMPLES = ''' # -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- -class YeditException(Exception): +class YeditException(Exception): # pragma: no cover ''' Exception class for Yedit ''' pass # pylint: disable=too-many-public-methods -class Yedit(object): +class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" @@ -891,11 +891,15 @@ class OpenShiftCLI(object): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) - def _delete(self, resource, rname, selector=None): + def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' - cmd = ['delete', resource, rname] - if selector: - cmd.append('--selector=%s' % selector) + cmd = ['delete', resource] + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) + else: + raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) @@ -913,7 +917,7 @@ class OpenShiftCLI(object): else: cmd.append(template_name) if params: - param_str = ["%s=%s" % (key, value) for key, value in params.items()] + param_str = ["{}={}".format(key, value) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) @@ -930,13 +934,13 @@ class OpenShiftCLI(object): return self.openshift_cmd(['create', '-f', fname]) - def _get(self, resource, rname=None, selector=None): + def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] - if selector: - cmd.append('--selector=%s' % selector) - elif rname: - cmd.append(rname) + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) cmd.extend(['-o', 'json']) @@ -956,9 +960,9 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) - cmd.append('--schedulable=%s' % schedulable) + cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 @@ -973,10 +977,10 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) @@ -989,16 +993,16 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: - cmd.append('--grace-period=%s' % int(grace_period)) + cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') @@ -1103,7 +1107,7 @@ class OpenShiftCLI(object): return rval -class Utils(object): +class Utils(object): # pragma: no cover ''' utilities for openshiftcli modules ''' @staticmethod diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py index b0345b026..a6718d921 100644 --- a/roles/lib_openshift/library/oc_adm_registry.py +++ b/roles/lib_openshift/library/oc_adm_registry.py @@ -247,13 +247,13 @@ EXAMPLES = ''' # -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- -class YeditException(Exception): +class YeditException(Exception): # pragma: no cover ''' Exception class for Yedit ''' pass # pylint: disable=too-many-public-methods -class Yedit(object): +class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" @@ -1009,11 +1009,15 @@ class OpenShiftCLI(object): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) - def _delete(self, resource, rname, selector=None): + def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' - cmd = ['delete', resource, rname] - if selector: - cmd.append('--selector=%s' % selector) + cmd = ['delete', resource] + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) + else: + raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) @@ -1031,7 +1035,7 @@ class OpenShiftCLI(object): else: cmd.append(template_name) if params: - param_str = ["%s=%s" % (key, value) for key, value in params.items()] + param_str = ["{}={}".format(key, value) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) @@ -1048,13 +1052,13 @@ class OpenShiftCLI(object): return self.openshift_cmd(['create', '-f', fname]) - def _get(self, resource, rname=None, selector=None): + def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] - if selector: - cmd.append('--selector=%s' % selector) - elif rname: - cmd.append(rname) + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) cmd.extend(['-o', 'json']) @@ -1074,9 +1078,9 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) - cmd.append('--schedulable=%s' % schedulable) + cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 @@ -1091,10 +1095,10 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) @@ -1107,16 +1111,16 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: - cmd.append('--grace-period=%s' % int(grace_period)) + cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') @@ -1221,7 +1225,7 @@ class OpenShiftCLI(object): return rval -class Utils(object): +class Utils(object): # pragma: no cover ''' utilities for openshiftcli modules ''' @staticmethod @@ -2301,7 +2305,7 @@ class Registry(OpenShiftCLI): rval = 0 for part in self.registry_parts: - result = self._get(part['kind'], rname=part['name']) + result = self._get(part['kind'], name=part['name']) if result['returncode'] == 0 and part['kind'] == 'dc': self.deploymentconfig = DeploymentConfig(result['results'][0]) elif result['returncode'] == 0 and part['kind'] == 'svc': diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py index 307269da4..0e4b336fb 100644 --- a/roles/lib_openshift/library/oc_adm_router.py +++ b/roles/lib_openshift/library/oc_adm_router.py @@ -272,13 +272,13 @@ EXAMPLES = ''' # -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- -class YeditException(Exception): +class YeditException(Exception): # pragma: no cover ''' Exception class for Yedit ''' pass # pylint: disable=too-many-public-methods -class Yedit(object): +class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" @@ -1034,11 +1034,15 @@ class OpenShiftCLI(object): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) - def _delete(self, resource, rname, selector=None): + def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' - cmd = ['delete', resource, rname] - if selector: - cmd.append('--selector=%s' % selector) + cmd = ['delete', resource] + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) + else: + raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) @@ -1056,7 +1060,7 @@ class OpenShiftCLI(object): else: cmd.append(template_name) if params: - param_str = ["%s=%s" % (key, value) for key, value in params.items()] + param_str = ["{}={}".format(key, value) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) @@ -1073,13 +1077,13 @@ class OpenShiftCLI(object): return self.openshift_cmd(['create', '-f', fname]) - def _get(self, resource, rname=None, selector=None): + def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] - if selector: - cmd.append('--selector=%s' % selector) - elif rname: - cmd.append(rname) + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) cmd.extend(['-o', 'json']) @@ -1099,9 +1103,9 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) - cmd.append('--schedulable=%s' % schedulable) + cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 @@ -1116,10 +1120,10 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) @@ -1132,16 +1136,16 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: - cmd.append('--grace-period=%s' % int(grace_period)) + cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') @@ -1246,7 +1250,7 @@ class OpenShiftCLI(object): return rval -class Utils(object): +class Utils(object): # pragma: no cover ''' utilities for openshiftcli modules ''' @staticmethod @@ -2685,7 +2689,7 @@ class Router(OpenShiftCLI): self.secret = None self.rolebinding = None for part in self.router_parts: - result = self._get(part['kind'], rname=part['name']) + result = self._get(part['kind'], name=part['name']) if result['returncode'] == 0 and part['kind'] == 'dc': self.deploymentconfig = DeploymentConfig(result['results'][0]) elif result['returncode'] == 0 and part['kind'] == 'svc': diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py index 308a7d806..a34ce351e 100644 --- a/roles/lib_openshift/library/oc_clusterrole.py +++ b/roles/lib_openshift/library/oc_clusterrole.py @@ -121,13 +121,13 @@ EXAMPLES = ''' # -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- -class YeditException(Exception): +class YeditException(Exception): # pragma: no cover ''' Exception class for Yedit ''' pass # pylint: disable=too-many-public-methods -class Yedit(object): +class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" @@ -883,11 +883,15 @@ class OpenShiftCLI(object): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) - def _delete(self, resource, rname, selector=None): + def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' - cmd = ['delete', resource, rname] - if selector: - cmd.append('--selector=%s' % selector) + cmd = ['delete', resource] + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) + else: + raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) @@ -905,7 +909,7 @@ class OpenShiftCLI(object): else: cmd.append(template_name) if params: - param_str = ["%s=%s" % (key, value) for key, value in params.items()] + param_str = ["{}={}".format(key, value) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) @@ -922,13 +926,13 @@ class OpenShiftCLI(object): return self.openshift_cmd(['create', '-f', fname]) - def _get(self, resource, rname=None, selector=None): + def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] - if selector: - cmd.append('--selector=%s' % selector) - elif rname: - cmd.append(rname) + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) cmd.extend(['-o', 'json']) @@ -948,9 +952,9 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) - cmd.append('--schedulable=%s' % schedulable) + cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 @@ -965,10 +969,10 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) @@ -981,16 +985,16 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: - cmd.append('--grace-period=%s' % int(grace_period)) + cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') @@ -1095,7 +1099,7 @@ class OpenShiftCLI(object): return rval -class Utils(object): +class Utils(object): # pragma: no cover ''' utilities for openshiftcli modules ''' @staticmethod diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py index 96345ffe0..69dd23a0e 100644 --- a/roles/lib_openshift/library/oc_configmap.py +++ b/roles/lib_openshift/library/oc_configmap.py @@ -127,13 +127,13 @@ EXAMPLES = ''' # -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- -class YeditException(Exception): +class YeditException(Exception): # pragma: no cover ''' Exception class for Yedit ''' pass # pylint: disable=too-many-public-methods -class Yedit(object): +class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" @@ -889,11 +889,15 @@ class OpenShiftCLI(object): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) - def _delete(self, resource, rname, selector=None): + def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' - cmd = ['delete', resource, rname] - if selector: - cmd.append('--selector=%s' % selector) + cmd = ['delete', resource] + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) + else: + raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) @@ -911,7 +915,7 @@ class OpenShiftCLI(object): else: cmd.append(template_name) if params: - param_str = ["%s=%s" % (key, value) for key, value in params.items()] + param_str = ["{}={}".format(key, value) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) @@ -928,13 +932,13 @@ class OpenShiftCLI(object): return self.openshift_cmd(['create', '-f', fname]) - def _get(self, resource, rname=None, selector=None): + def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] - if selector: - cmd.append('--selector=%s' % selector) - elif rname: - cmd.append(rname) + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) cmd.extend(['-o', 'json']) @@ -954,9 +958,9 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) - cmd.append('--schedulable=%s' % schedulable) + cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 @@ -971,10 +975,10 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) @@ -987,16 +991,16 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: - cmd.append('--grace-period=%s' % int(grace_period)) + cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') @@ -1101,7 +1105,7 @@ class OpenShiftCLI(object): return rval -class Utils(object): +class Utils(object): # pragma: no cover ''' utilities for openshiftcli modules ''' @staticmethod @@ -1524,6 +1528,10 @@ class OCConfigMap(OpenShiftCLI): if state == 'list': return {'changed': False, 'results': api_rval, 'state': state} + if not params['name']: + return {'failed': True, + 'msg': 'Please specify a name when state is absent|present.'} + ######## # Delete ######## diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py index 99027c07f..70329ccfe 100644 --- a/roles/lib_openshift/library/oc_edit.py +++ b/roles/lib_openshift/library/oc_edit.py @@ -171,13 +171,13 @@ oc_edit: # -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- -class YeditException(Exception): +class YeditException(Exception): # pragma: no cover ''' Exception class for Yedit ''' pass # pylint: disable=too-many-public-methods -class Yedit(object): +class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" @@ -933,11 +933,15 @@ class OpenShiftCLI(object): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) - def _delete(self, resource, rname, selector=None): + def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' - cmd = ['delete', resource, rname] - if selector: - cmd.append('--selector=%s' % selector) + cmd = ['delete', resource] + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) + else: + raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) @@ -955,7 +959,7 @@ class OpenShiftCLI(object): else: cmd.append(template_name) if params: - param_str = ["%s=%s" % (key, value) for key, value in params.items()] + param_str = ["{}={}".format(key, value) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) @@ -972,13 +976,13 @@ class OpenShiftCLI(object): return self.openshift_cmd(['create', '-f', fname]) - def _get(self, resource, rname=None, selector=None): + def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] - if selector: - cmd.append('--selector=%s' % selector) - elif rname: - cmd.append(rname) + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) cmd.extend(['-o', 'json']) @@ -998,9 +1002,9 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) - cmd.append('--schedulable=%s' % schedulable) + cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 @@ -1015,10 +1019,10 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) @@ -1031,16 +1035,16 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: - cmd.append('--grace-period=%s' % int(grace_period)) + cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') @@ -1145,7 +1149,7 @@ class OpenShiftCLI(object): return rval -class Utils(object): +class Utils(object): # pragma: no cover ''' utilities for openshiftcli modules ''' @staticmethod diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py index 34f86a478..bda5eebc5 100644 --- a/roles/lib_openshift/library/oc_env.py +++ b/roles/lib_openshift/library/oc_env.py @@ -138,13 +138,13 @@ EXAMPLES = ''' # -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- -class YeditException(Exception): +class YeditException(Exception): # pragma: no cover ''' Exception class for Yedit ''' pass # pylint: disable=too-many-public-methods -class Yedit(object): +class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" @@ -900,11 +900,15 @@ class OpenShiftCLI(object): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) - def _delete(self, resource, rname, selector=None): + def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' - cmd = ['delete', resource, rname] - if selector: - cmd.append('--selector=%s' % selector) + cmd = ['delete', resource] + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) + else: + raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) @@ -922,7 +926,7 @@ class OpenShiftCLI(object): else: cmd.append(template_name) if params: - param_str = ["%s=%s" % (key, value) for key, value in params.items()] + param_str = ["{}={}".format(key, value) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) @@ -939,13 +943,13 @@ class OpenShiftCLI(object): return self.openshift_cmd(['create', '-f', fname]) - def _get(self, resource, rname=None, selector=None): + def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] - if selector: - cmd.append('--selector=%s' % selector) - elif rname: - cmd.append(rname) + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) cmd.extend(['-o', 'json']) @@ -965,9 +969,9 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) - cmd.append('--schedulable=%s' % schedulable) + cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 @@ -982,10 +986,10 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) @@ -998,16 +1002,16 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: - cmd.append('--grace-period=%s' % int(grace_period)) + cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') @@ -1112,7 +1116,7 @@ class OpenShiftCLI(object): return rval -class Utils(object): +class Utils(object): # pragma: no cover ''' utilities for openshiftcli modules ''' @staticmethod diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py index 00d67108d..462e14868 100644 --- a/roles/lib_openshift/library/oc_group.py +++ b/roles/lib_openshift/library/oc_group.py @@ -111,13 +111,13 @@ EXAMPLES = ''' # -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- -class YeditException(Exception): +class YeditException(Exception): # pragma: no cover ''' Exception class for Yedit ''' pass # pylint: disable=too-many-public-methods -class Yedit(object): +class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" @@ -873,11 +873,15 @@ class OpenShiftCLI(object): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) - def _delete(self, resource, rname, selector=None): + def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' - cmd = ['delete', resource, rname] - if selector: - cmd.append('--selector=%s' % selector) + cmd = ['delete', resource] + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) + else: + raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) @@ -895,7 +899,7 @@ class OpenShiftCLI(object): else: cmd.append(template_name) if params: - param_str = ["%s=%s" % (key, value) for key, value in params.items()] + param_str = ["{}={}".format(key, value) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) @@ -912,13 +916,13 @@ class OpenShiftCLI(object): return self.openshift_cmd(['create', '-f', fname]) - def _get(self, resource, rname=None, selector=None): + def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] - if selector: - cmd.append('--selector=%s' % selector) - elif rname: - cmd.append(rname) + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) cmd.extend(['-o', 'json']) @@ -938,9 +942,9 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) - cmd.append('--schedulable=%s' % schedulable) + cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 @@ -955,10 +959,10 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) @@ -971,16 +975,16 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: - cmd.append('--grace-period=%s' % int(grace_period)) + cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') @@ -1085,7 +1089,7 @@ class OpenShiftCLI(object): return rval -class Utils(object): +class Utils(object): # pragma: no cover ''' utilities for openshiftcli modules ''' @staticmethod diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py index ee918a2d1..8aed060bb 100644 --- a/roles/lib_openshift/library/oc_image.py +++ b/roles/lib_openshift/library/oc_image.py @@ -130,13 +130,13 @@ EXAMPLES = ''' # -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- -class YeditException(Exception): +class YeditException(Exception): # pragma: no cover ''' Exception class for Yedit ''' pass # pylint: disable=too-many-public-methods -class Yedit(object): +class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" @@ -892,11 +892,15 @@ class OpenShiftCLI(object): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) - def _delete(self, resource, rname, selector=None): + def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' - cmd = ['delete', resource, rname] - if selector: - cmd.append('--selector=%s' % selector) + cmd = ['delete', resource] + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) + else: + raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) @@ -914,7 +918,7 @@ class OpenShiftCLI(object): else: cmd.append(template_name) if params: - param_str = ["%s=%s" % (key, value) for key, value in params.items()] + param_str = ["{}={}".format(key, value) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) @@ -931,13 +935,13 @@ class OpenShiftCLI(object): return self.openshift_cmd(['create', '-f', fname]) - def _get(self, resource, rname=None, selector=None): + def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] - if selector: - cmd.append('--selector=%s' % selector) - elif rname: - cmd.append(rname) + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) cmd.extend(['-o', 'json']) @@ -957,9 +961,9 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) - cmd.append('--schedulable=%s' % schedulable) + cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 @@ -974,10 +978,10 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) @@ -990,16 +994,16 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: - cmd.append('--grace-period=%s' % int(grace_period)) + cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') @@ -1104,7 +1108,7 @@ class OpenShiftCLI(object): return rval -class Utils(object): +class Utils(object): # pragma: no cover ''' utilities for openshiftcli modules ''' @staticmethod diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py index 62b6049c4..0d18a7afe 100644 --- a/roles/lib_openshift/library/oc_label.py +++ b/roles/lib_openshift/library/oc_label.py @@ -147,13 +147,13 @@ EXAMPLES = ''' # -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- -class YeditException(Exception): +class YeditException(Exception): # pragma: no cover ''' Exception class for Yedit ''' pass # pylint: disable=too-many-public-methods -class Yedit(object): +class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" @@ -909,11 +909,15 @@ class OpenShiftCLI(object): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) - def _delete(self, resource, rname, selector=None): + def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' - cmd = ['delete', resource, rname] - if selector: - cmd.append('--selector=%s' % selector) + cmd = ['delete', resource] + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) + else: + raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) @@ -931,7 +935,7 @@ class OpenShiftCLI(object): else: cmd.append(template_name) if params: - param_str = ["%s=%s" % (key, value) for key, value in params.items()] + param_str = ["{}={}".format(key, value) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) @@ -948,13 +952,13 @@ class OpenShiftCLI(object): return self.openshift_cmd(['create', '-f', fname]) - def _get(self, resource, rname=None, selector=None): + def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] - if selector: - cmd.append('--selector=%s' % selector) - elif rname: - cmd.append(rname) + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) cmd.extend(['-o', 'json']) @@ -974,9 +978,9 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) - cmd.append('--schedulable=%s' % schedulable) + cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 @@ -991,10 +995,10 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) @@ -1007,16 +1011,16 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: - cmd.append('--grace-period=%s' % int(grace_period)) + cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') @@ -1121,7 +1125,7 @@ class OpenShiftCLI(object): return rval -class Utils(object): +class Utils(object): # pragma: no cover ''' utilities for openshiftcli modules ''' @staticmethod @@ -1551,9 +1555,9 @@ class OCLabel(OpenShiftCLI): label_list = [] if self.name: - result = self._get(resource=self.kind, rname=self.name, selector=self.selector) + result = self._get(resource=self.kind, name=self.name, selector=self.selector) - if 'labels' in result['results'][0]['metadata']: + if result['results'][0] and 'labels' in result['results'][0]['metadata']: label_list.append(result['results'][0]['metadata']['labels']) else: label_list.append({}) diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py index 075c286e0..0b01670c6 100644 --- a/roles/lib_openshift/library/oc_obj.py +++ b/roles/lib_openshift/library/oc_obj.py @@ -150,13 +150,13 @@ register: router_output # -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- -class YeditException(Exception): +class YeditException(Exception): # pragma: no cover ''' Exception class for Yedit ''' pass # pylint: disable=too-many-public-methods -class Yedit(object): +class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" @@ -912,11 +912,15 @@ class OpenShiftCLI(object): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) - def _delete(self, resource, rname, selector=None): + def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' - cmd = ['delete', resource, rname] - if selector: - cmd.append('--selector=%s' % selector) + cmd = ['delete', resource] + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) + else: + raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) @@ -934,7 +938,7 @@ class OpenShiftCLI(object): else: cmd.append(template_name) if params: - param_str = ["%s=%s" % (key, value) for key, value in params.items()] + param_str = ["{}={}".format(key, value) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) @@ -951,13 +955,13 @@ class OpenShiftCLI(object): return self.openshift_cmd(['create', '-f', fname]) - def _get(self, resource, rname=None, selector=None): + def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] - if selector: - cmd.append('--selector=%s' % selector) - elif rname: - cmd.append(rname) + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) cmd.extend(['-o', 'json']) @@ -977,9 +981,9 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) - cmd.append('--schedulable=%s' % schedulable) + cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 @@ -994,10 +998,10 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) @@ -1010,16 +1014,16 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: - cmd.append('--grace-period=%s' % int(grace_period)) + cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') @@ -1124,7 +1128,7 @@ class OpenShiftCLI(object): return rval -class Utils(object): +class Utils(object): # pragma: no cover ''' utilities for openshiftcli modules ''' @staticmethod @@ -1430,7 +1434,7 @@ class OCObject(OpenShiftCLI): def __init__(self, kind, namespace, - rname=None, + name=None, selector=None, kubeconfig='/etc/origin/master/admin.kubeconfig', verbose=False, @@ -1439,21 +1443,21 @@ class OCObject(OpenShiftCLI): super(OCObject, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose, all_namespaces=all_namespaces) self.kind = kind - self.name = rname + self.name = name self.selector = selector def get(self): '''return a kind by name ''' - results = self._get(self.kind, rname=self.name, selector=self.selector) - if results['returncode'] != 0 and 'stderr' in results and \ - '\"%s\" not found' % self.name in results['stderr']: + results = self._get(self.kind, name=self.name, selector=self.selector) + if (results['returncode'] != 0 and 'stderr' in results and + '\"{}\" not found'.format(self.name) in results['stderr']): results['returncode'] = 0 return results def delete(self): - '''return all pods ''' - return self._delete(self.kind, self.name) + '''delete the object''' + return self._delete(self.kind, name=self.name, selector=self.selector) def create(self, files=None, content=None): ''' @@ -1529,24 +1533,33 @@ class OCObject(OpenShiftCLI): # Get ##### if state == 'list': - return {'changed': False, 'results': api_rval, 'state': 'list'} - - if not params['name']: - return {'failed': True, 'msg': 'Please specify a name when state is absent|present.'} # noqa: E501 + return {'changed': False, 'results': api_rval, 'state': state} ######## # Delete ######## if state == 'absent': - if not Utils.exists(api_rval['results'], params['name']): - return {'changed': False, 'state': 'absent'} + # if we were passed a name, verify its not in our results + if params['name'] is not None and not Utils.exists(api_rval['results'], params['name']): + return {'changed': False, 'state': state} + + # verify results are empty for the selector + if params['selector'] is not None and len(api_rval['results']) == 0: + return {'changed': False, 'state': state} if check_mode: return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete'} api_rval = ocobj.delete() - return {'changed': True, 'results': api_rval, 'state': 'absent'} + if api_rval['returncode'] != 0: + return {'failed': True, 'msg': api_rval} + + return {'changed': True, 'results': api_rval, 'state': state} + + # create/update: Must define a name beyond this point + if not params['name']: + return {'failed': True, 'msg': 'Please specify a name when state is present.'} if state == 'present': ######## @@ -1572,7 +1585,7 @@ class OCObject(OpenShiftCLI): if params['files'] and params['delete_after']: Utils.cleanup(params['files']) - return {'changed': True, 'results': api_rval, 'state': "present"} + return {'changed': True, 'results': api_rval, 'state': state} ######## # Update @@ -1587,7 +1600,7 @@ class OCObject(OpenShiftCLI): if params['files'] and params['delete_after']: Utils.cleanup(params['files']) - return {'changed': False, 'results': api_rval['results'][0], 'state': "present"} + return {'changed': False, 'results': api_rval['results'][0], 'state': state} if check_mode: return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'} @@ -1606,7 +1619,7 @@ class OCObject(OpenShiftCLI): if api_rval['returncode'] != 0: return {'failed': True, 'msg': api_rval} - return {'changed': True, 'results': api_rval, 'state': "present"} + return {'changed': True, 'results': api_rval, 'state': state} # -*- -*- -*- End included fragment: class/oc_obj.py -*- -*- -*- @@ -1634,7 +1647,7 @@ def main(): force=dict(default=False, type='bool'), selector=dict(default=None, type='str'), ), - mutually_exclusive=[["content", "files"]], + mutually_exclusive=[["content", "files"], ["selector", "name"]], supports_check_mode=True, ) diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py index d65e1d4c9..9b321b47c 100644 --- a/roles/lib_openshift/library/oc_objectvalidator.py +++ b/roles/lib_openshift/library/oc_objectvalidator.py @@ -82,13 +82,13 @@ oc_objectvalidator: # -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- -class YeditException(Exception): +class YeditException(Exception): # pragma: no cover ''' Exception class for Yedit ''' pass # pylint: disable=too-many-public-methods -class Yedit(object): +class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" @@ -844,11 +844,15 @@ class OpenShiftCLI(object): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) - def _delete(self, resource, rname, selector=None): + def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' - cmd = ['delete', resource, rname] - if selector: - cmd.append('--selector=%s' % selector) + cmd = ['delete', resource] + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) + else: + raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) @@ -866,7 +870,7 @@ class OpenShiftCLI(object): else: cmd.append(template_name) if params: - param_str = ["%s=%s" % (key, value) for key, value in params.items()] + param_str = ["{}={}".format(key, value) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) @@ -883,13 +887,13 @@ class OpenShiftCLI(object): return self.openshift_cmd(['create', '-f', fname]) - def _get(self, resource, rname=None, selector=None): + def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] - if selector: - cmd.append('--selector=%s' % selector) - elif rname: - cmd.append(rname) + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) cmd.extend(['-o', 'json']) @@ -909,9 +913,9 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) - cmd.append('--schedulable=%s' % schedulable) + cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 @@ -926,10 +930,10 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) @@ -942,16 +946,16 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: - cmd.append('--grace-period=%s' % int(grace_period)) + cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') @@ -1056,7 +1060,7 @@ class OpenShiftCLI(object): return rval -class Utils(object): +class Utils(object): # pragma: no cover ''' utilities for openshiftcli modules ''' @staticmethod diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py index 112d9ab5f..34f80ce13 100644 --- a/roles/lib_openshift/library/oc_process.py +++ b/roles/lib_openshift/library/oc_process.py @@ -139,13 +139,13 @@ EXAMPLES = ''' # -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- -class YeditException(Exception): +class YeditException(Exception): # pragma: no cover ''' Exception class for Yedit ''' pass # pylint: disable=too-many-public-methods -class Yedit(object): +class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" @@ -901,11 +901,15 @@ class OpenShiftCLI(object): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) - def _delete(self, resource, rname, selector=None): + def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' - cmd = ['delete', resource, rname] - if selector: - cmd.append('--selector=%s' % selector) + cmd = ['delete', resource] + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) + else: + raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) @@ -923,7 +927,7 @@ class OpenShiftCLI(object): else: cmd.append(template_name) if params: - param_str = ["%s=%s" % (key, value) for key, value in params.items()] + param_str = ["{}={}".format(key, value) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) @@ -940,13 +944,13 @@ class OpenShiftCLI(object): return self.openshift_cmd(['create', '-f', fname]) - def _get(self, resource, rname=None, selector=None): + def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] - if selector: - cmd.append('--selector=%s' % selector) - elif rname: - cmd.append(rname) + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) cmd.extend(['-o', 'json']) @@ -966,9 +970,9 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) - cmd.append('--schedulable=%s' % schedulable) + cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 @@ -983,10 +987,10 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) @@ -999,16 +1003,16 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: - cmd.append('--grace-period=%s' % int(grace_period)) + cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') @@ -1113,7 +1117,7 @@ class OpenShiftCLI(object): return rval -class Utils(object): +class Utils(object): # pragma: no cover ''' utilities for openshiftcli modules ''' @staticmethod diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py index 3fddce055..331f31e41 100644 --- a/roles/lib_openshift/library/oc_project.py +++ b/roles/lib_openshift/library/oc_project.py @@ -136,13 +136,13 @@ EXAMPLES = ''' # -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- -class YeditException(Exception): +class YeditException(Exception): # pragma: no cover ''' Exception class for Yedit ''' pass # pylint: disable=too-many-public-methods -class Yedit(object): +class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" @@ -898,11 +898,15 @@ class OpenShiftCLI(object): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) - def _delete(self, resource, rname, selector=None): + def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' - cmd = ['delete', resource, rname] - if selector: - cmd.append('--selector=%s' % selector) + cmd = ['delete', resource] + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) + else: + raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) @@ -920,7 +924,7 @@ class OpenShiftCLI(object): else: cmd.append(template_name) if params: - param_str = ["%s=%s" % (key, value) for key, value in params.items()] + param_str = ["{}={}".format(key, value) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) @@ -937,13 +941,13 @@ class OpenShiftCLI(object): return self.openshift_cmd(['create', '-f', fname]) - def _get(self, resource, rname=None, selector=None): + def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] - if selector: - cmd.append('--selector=%s' % selector) - elif rname: - cmd.append(rname) + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) cmd.extend(['-o', 'json']) @@ -963,9 +967,9 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) - cmd.append('--schedulable=%s' % schedulable) + cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 @@ -980,10 +984,10 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) @@ -996,16 +1000,16 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: - cmd.append('--grace-period=%s' % int(grace_period)) + cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') @@ -1110,7 +1114,7 @@ class OpenShiftCLI(object): return rval -class Utils(object): +class Utils(object): # pragma: no cover ''' utilities for openshiftcli modules ''' @staticmethod diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py index d63f6e063..3e4601cc3 100644 --- a/roles/lib_openshift/library/oc_pvc.py +++ b/roles/lib_openshift/library/oc_pvc.py @@ -131,13 +131,13 @@ EXAMPLES = ''' # -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- -class YeditException(Exception): +class YeditException(Exception): # pragma: no cover ''' Exception class for Yedit ''' pass # pylint: disable=too-many-public-methods -class Yedit(object): +class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" @@ -893,11 +893,15 @@ class OpenShiftCLI(object): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) - def _delete(self, resource, rname, selector=None): + def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' - cmd = ['delete', resource, rname] - if selector: - cmd.append('--selector=%s' % selector) + cmd = ['delete', resource] + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) + else: + raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) @@ -915,7 +919,7 @@ class OpenShiftCLI(object): else: cmd.append(template_name) if params: - param_str = ["%s=%s" % (key, value) for key, value in params.items()] + param_str = ["{}={}".format(key, value) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) @@ -932,13 +936,13 @@ class OpenShiftCLI(object): return self.openshift_cmd(['create', '-f', fname]) - def _get(self, resource, rname=None, selector=None): + def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] - if selector: - cmd.append('--selector=%s' % selector) - elif rname: - cmd.append(rname) + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) cmd.extend(['-o', 'json']) @@ -958,9 +962,9 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) - cmd.append('--schedulable=%s' % schedulable) + cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 @@ -975,10 +979,10 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) @@ -991,16 +995,16 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: - cmd.append('--grace-period=%s' % int(grace_period)) + cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') @@ -1105,7 +1109,7 @@ class OpenShiftCLI(object): return rval -class Utils(object): +class Utils(object): # pragma: no cover ''' utilities for openshiftcli modules ''' @staticmethod diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py index daddec69f..755ab3b02 100644 --- a/roles/lib_openshift/library/oc_route.py +++ b/roles/lib_openshift/library/oc_route.py @@ -181,13 +181,13 @@ EXAMPLES = ''' # -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- -class YeditException(Exception): +class YeditException(Exception): # pragma: no cover ''' Exception class for Yedit ''' pass # pylint: disable=too-many-public-methods -class Yedit(object): +class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" @@ -943,11 +943,15 @@ class OpenShiftCLI(object): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) - def _delete(self, resource, rname, selector=None): + def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' - cmd = ['delete', resource, rname] - if selector: - cmd.append('--selector=%s' % selector) + cmd = ['delete', resource] + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) + else: + raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) @@ -965,7 +969,7 @@ class OpenShiftCLI(object): else: cmd.append(template_name) if params: - param_str = ["%s=%s" % (key, value) for key, value in params.items()] + param_str = ["{}={}".format(key, value) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) @@ -982,13 +986,13 @@ class OpenShiftCLI(object): return self.openshift_cmd(['create', '-f', fname]) - def _get(self, resource, rname=None, selector=None): + def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] - if selector: - cmd.append('--selector=%s' % selector) - elif rname: - cmd.append(rname) + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) cmd.extend(['-o', 'json']) @@ -1008,9 +1012,9 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) - cmd.append('--schedulable=%s' % schedulable) + cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 @@ -1025,10 +1029,10 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) @@ -1041,16 +1045,16 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: - cmd.append('--grace-period=%s' % int(grace_period)) + cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') @@ -1155,7 +1159,7 @@ class OpenShiftCLI(object): return rval -class Utils(object): +class Utils(object): # pragma: no cover ''' utilities for openshiftcli modules ''' @staticmethod diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py index 92e9362be..0c83338b0 100644 --- a/roles/lib_openshift/library/oc_scale.py +++ b/roles/lib_openshift/library/oc_scale.py @@ -125,13 +125,13 @@ EXAMPLES = ''' # -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- -class YeditException(Exception): +class YeditException(Exception): # pragma: no cover ''' Exception class for Yedit ''' pass # pylint: disable=too-many-public-methods -class Yedit(object): +class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" @@ -887,11 +887,15 @@ class OpenShiftCLI(object): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) - def _delete(self, resource, rname, selector=None): + def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' - cmd = ['delete', resource, rname] - if selector: - cmd.append('--selector=%s' % selector) + cmd = ['delete', resource] + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) + else: + raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) @@ -909,7 +913,7 @@ class OpenShiftCLI(object): else: cmd.append(template_name) if params: - param_str = ["%s=%s" % (key, value) for key, value in params.items()] + param_str = ["{}={}".format(key, value) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) @@ -926,13 +930,13 @@ class OpenShiftCLI(object): return self.openshift_cmd(['create', '-f', fname]) - def _get(self, resource, rname=None, selector=None): + def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] - if selector: - cmd.append('--selector=%s' % selector) - elif rname: - cmd.append(rname) + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) cmd.extend(['-o', 'json']) @@ -952,9 +956,9 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) - cmd.append('--schedulable=%s' % schedulable) + cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 @@ -969,10 +973,10 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) @@ -985,16 +989,16 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: - cmd.append('--grace-period=%s' % int(grace_period)) + cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') @@ -1099,7 +1103,7 @@ class OpenShiftCLI(object): return rval -class Utils(object): +class Utils(object): # pragma: no cover ''' utilities for openshiftcli modules ''' @staticmethod diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py index 1ffdce4df..26e52a926 100644 --- a/roles/lib_openshift/library/oc_secret.py +++ b/roles/lib_openshift/library/oc_secret.py @@ -171,13 +171,13 @@ EXAMPLES = ''' # -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- -class YeditException(Exception): +class YeditException(Exception): # pragma: no cover ''' Exception class for Yedit ''' pass # pylint: disable=too-many-public-methods -class Yedit(object): +class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" @@ -933,11 +933,15 @@ class OpenShiftCLI(object): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) - def _delete(self, resource, rname, selector=None): + def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' - cmd = ['delete', resource, rname] - if selector: - cmd.append('--selector=%s' % selector) + cmd = ['delete', resource] + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) + else: + raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) @@ -955,7 +959,7 @@ class OpenShiftCLI(object): else: cmd.append(template_name) if params: - param_str = ["%s=%s" % (key, value) for key, value in params.items()] + param_str = ["{}={}".format(key, value) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) @@ -972,13 +976,13 @@ class OpenShiftCLI(object): return self.openshift_cmd(['create', '-f', fname]) - def _get(self, resource, rname=None, selector=None): + def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] - if selector: - cmd.append('--selector=%s' % selector) - elif rname: - cmd.append(rname) + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) cmd.extend(['-o', 'json']) @@ -998,9 +1002,9 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) - cmd.append('--schedulable=%s' % schedulable) + cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 @@ -1015,10 +1019,10 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) @@ -1031,16 +1035,16 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: - cmd.append('--grace-period=%s' % int(grace_period)) + cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') @@ -1145,7 +1149,7 @@ class OpenShiftCLI(object): return rval -class Utils(object): +class Utils(object): # pragma: no cover ''' utilities for openshiftcli modules ''' @staticmethod diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py index 77056d5de..440cda1b3 100644 --- a/roles/lib_openshift/library/oc_service.py +++ b/roles/lib_openshift/library/oc_service.py @@ -177,13 +177,13 @@ EXAMPLES = ''' # -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- -class YeditException(Exception): +class YeditException(Exception): # pragma: no cover ''' Exception class for Yedit ''' pass # pylint: disable=too-many-public-methods -class Yedit(object): +class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" @@ -939,11 +939,15 @@ class OpenShiftCLI(object): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) - def _delete(self, resource, rname, selector=None): + def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' - cmd = ['delete', resource, rname] - if selector: - cmd.append('--selector=%s' % selector) + cmd = ['delete', resource] + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) + else: + raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) @@ -961,7 +965,7 @@ class OpenShiftCLI(object): else: cmd.append(template_name) if params: - param_str = ["%s=%s" % (key, value) for key, value in params.items()] + param_str = ["{}={}".format(key, value) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) @@ -978,13 +982,13 @@ class OpenShiftCLI(object): return self.openshift_cmd(['create', '-f', fname]) - def _get(self, resource, rname=None, selector=None): + def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] - if selector: - cmd.append('--selector=%s' % selector) - elif rname: - cmd.append(rname) + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) cmd.extend(['-o', 'json']) @@ -1004,9 +1008,9 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) - cmd.append('--schedulable=%s' % schedulable) + cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 @@ -1021,10 +1025,10 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) @@ -1037,16 +1041,16 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: - cmd.append('--grace-period=%s' % int(grace_period)) + cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') @@ -1151,7 +1155,7 @@ class OpenShiftCLI(object): return rval -class Utils(object): +class Utils(object): # pragma: no cover ''' utilities for openshiftcli modules ''' @staticmethod diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py index 807bfc992..5eb36ee32 100644 --- a/roles/lib_openshift/library/oc_serviceaccount.py +++ b/roles/lib_openshift/library/oc_serviceaccount.py @@ -123,13 +123,13 @@ EXAMPLES = ''' # -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- -class YeditException(Exception): +class YeditException(Exception): # pragma: no cover ''' Exception class for Yedit ''' pass # pylint: disable=too-many-public-methods -class Yedit(object): +class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" @@ -885,11 +885,15 @@ class OpenShiftCLI(object): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) - def _delete(self, resource, rname, selector=None): + def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' - cmd = ['delete', resource, rname] - if selector: - cmd.append('--selector=%s' % selector) + cmd = ['delete', resource] + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) + else: + raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) @@ -907,7 +911,7 @@ class OpenShiftCLI(object): else: cmd.append(template_name) if params: - param_str = ["%s=%s" % (key, value) for key, value in params.items()] + param_str = ["{}={}".format(key, value) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) @@ -924,13 +928,13 @@ class OpenShiftCLI(object): return self.openshift_cmd(['create', '-f', fname]) - def _get(self, resource, rname=None, selector=None): + def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] - if selector: - cmd.append('--selector=%s' % selector) - elif rname: - cmd.append(rname) + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) cmd.extend(['-o', 'json']) @@ -950,9 +954,9 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) - cmd.append('--schedulable=%s' % schedulable) + cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 @@ -967,10 +971,10 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) @@ -983,16 +987,16 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: - cmd.append('--grace-period=%s' % int(grace_period)) + cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') @@ -1097,7 +1101,7 @@ class OpenShiftCLI(object): return rval -class Utils(object): +class Utils(object): # pragma: no cover ''' utilities for openshiftcli modules ''' @staticmethod diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py index c8f4ebef7..1bc788e87 100644 --- a/roles/lib_openshift/library/oc_serviceaccount_secret.py +++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py @@ -123,13 +123,13 @@ EXAMPLES = ''' # -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- -class YeditException(Exception): +class YeditException(Exception): # pragma: no cover ''' Exception class for Yedit ''' pass # pylint: disable=too-many-public-methods -class Yedit(object): +class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" @@ -885,11 +885,15 @@ class OpenShiftCLI(object): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) - def _delete(self, resource, rname, selector=None): + def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' - cmd = ['delete', resource, rname] - if selector: - cmd.append('--selector=%s' % selector) + cmd = ['delete', resource] + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) + else: + raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) @@ -907,7 +911,7 @@ class OpenShiftCLI(object): else: cmd.append(template_name) if params: - param_str = ["%s=%s" % (key, value) for key, value in params.items()] + param_str = ["{}={}".format(key, value) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) @@ -924,13 +928,13 @@ class OpenShiftCLI(object): return self.openshift_cmd(['create', '-f', fname]) - def _get(self, resource, rname=None, selector=None): + def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] - if selector: - cmd.append('--selector=%s' % selector) - elif rname: - cmd.append(rname) + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) cmd.extend(['-o', 'json']) @@ -950,9 +954,9 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) - cmd.append('--schedulable=%s' % schedulable) + cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 @@ -967,10 +971,10 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) @@ -983,16 +987,16 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: - cmd.append('--grace-period=%s' % int(grace_period)) + cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') @@ -1097,7 +1101,7 @@ class OpenShiftCLI(object): return rval -class Utils(object): +class Utils(object): # pragma: no cover ''' utilities for openshiftcli modules ''' @staticmethod diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py index aa9f07980..3009e661a 100644 --- a/roles/lib_openshift/library/oc_user.py +++ b/roles/lib_openshift/library/oc_user.py @@ -183,13 +183,13 @@ ok: [ded-int-aws-master-61034] => { # -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- -class YeditException(Exception): +class YeditException(Exception): # pragma: no cover ''' Exception class for Yedit ''' pass # pylint: disable=too-many-public-methods -class Yedit(object): +class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" @@ -945,11 +945,15 @@ class OpenShiftCLI(object): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) - def _delete(self, resource, rname, selector=None): + def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' - cmd = ['delete', resource, rname] - if selector: - cmd.append('--selector=%s' % selector) + cmd = ['delete', resource] + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) + else: + raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) @@ -967,7 +971,7 @@ class OpenShiftCLI(object): else: cmd.append(template_name) if params: - param_str = ["%s=%s" % (key, value) for key, value in params.items()] + param_str = ["{}={}".format(key, value) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) @@ -984,13 +988,13 @@ class OpenShiftCLI(object): return self.openshift_cmd(['create', '-f', fname]) - def _get(self, resource, rname=None, selector=None): + def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] - if selector: - cmd.append('--selector=%s' % selector) - elif rname: - cmd.append(rname) + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) cmd.extend(['-o', 'json']) @@ -1010,9 +1014,9 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) - cmd.append('--schedulable=%s' % schedulable) + cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 @@ -1027,10 +1031,10 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) @@ -1043,16 +1047,16 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: - cmd.append('--grace-period=%s' % int(grace_period)) + cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') @@ -1157,7 +1161,7 @@ class OpenShiftCLI(object): return rval -class Utils(object): +class Utils(object): # pragma: no cover ''' utilities for openshiftcli modules ''' @staticmethod diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py index eb293322d..88f295a74 100644 --- a/roles/lib_openshift/library/oc_version.py +++ b/roles/lib_openshift/library/oc_version.py @@ -95,13 +95,13 @@ oc_version: # -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- -class YeditException(Exception): +class YeditException(Exception): # pragma: no cover ''' Exception class for Yedit ''' pass # pylint: disable=too-many-public-methods -class Yedit(object): +class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" @@ -857,11 +857,15 @@ class OpenShiftCLI(object): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) - def _delete(self, resource, rname, selector=None): + def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' - cmd = ['delete', resource, rname] - if selector: - cmd.append('--selector=%s' % selector) + cmd = ['delete', resource] + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) + else: + raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) @@ -879,7 +883,7 @@ class OpenShiftCLI(object): else: cmd.append(template_name) if params: - param_str = ["%s=%s" % (key, value) for key, value in params.items()] + param_str = ["{}={}".format(key, value) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) @@ -896,13 +900,13 @@ class OpenShiftCLI(object): return self.openshift_cmd(['create', '-f', fname]) - def _get(self, resource, rname=None, selector=None): + def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] - if selector: - cmd.append('--selector=%s' % selector) - elif rname: - cmd.append(rname) + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) cmd.extend(['-o', 'json']) @@ -922,9 +926,9 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) - cmd.append('--schedulable=%s' % schedulable) + cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 @@ -939,10 +943,10 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) @@ -955,16 +959,16 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: - cmd.append('--grace-period=%s' % int(grace_period)) + cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') @@ -1069,7 +1073,7 @@ class OpenShiftCLI(object): return rval -class Utils(object): +class Utils(object): # pragma: no cover ''' utilities for openshiftcli modules ''' @staticmethod diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py index 23b292763..5f936fb49 100644 --- a/roles/lib_openshift/library/oc_volume.py +++ b/roles/lib_openshift/library/oc_volume.py @@ -160,13 +160,13 @@ EXAMPLES = ''' # -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- -class YeditException(Exception): +class YeditException(Exception): # pragma: no cover ''' Exception class for Yedit ''' pass # pylint: disable=too-many-public-methods -class Yedit(object): +class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" @@ -922,11 +922,15 @@ class OpenShiftCLI(object): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) - def _delete(self, resource, rname, selector=None): + def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' - cmd = ['delete', resource, rname] - if selector: - cmd.append('--selector=%s' % selector) + cmd = ['delete', resource] + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) + else: + raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) @@ -944,7 +948,7 @@ class OpenShiftCLI(object): else: cmd.append(template_name) if params: - param_str = ["%s=%s" % (key, value) for key, value in params.items()] + param_str = ["{}={}".format(key, value) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) @@ -961,13 +965,13 @@ class OpenShiftCLI(object): return self.openshift_cmd(['create', '-f', fname]) - def _get(self, resource, rname=None, selector=None): + def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] - if selector: - cmd.append('--selector=%s' % selector) - elif rname: - cmd.append(rname) + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) cmd.extend(['-o', 'json']) @@ -987,9 +991,9 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) - cmd.append('--schedulable=%s' % schedulable) + cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 @@ -1004,10 +1008,10 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) @@ -1020,16 +1024,16 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: - cmd.append('--grace-period=%s' % int(grace_period)) + cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') @@ -1134,7 +1138,7 @@ class OpenShiftCLI(object): return rval -class Utils(object): +class Utils(object): # pragma: no cover ''' utilities for openshiftcli modules ''' @staticmethod diff --git a/roles/lib_openshift/src/ansible/oc_obj.py b/roles/lib_openshift/src/ansible/oc_obj.py index 701740e4f..6ab53d044 100644 --- a/roles/lib_openshift/src/ansible/oc_obj.py +++ b/roles/lib_openshift/src/ansible/oc_obj.py @@ -23,7 +23,7 @@ def main(): force=dict(default=False, type='bool'), selector=dict(default=None, type='str'), ), - mutually_exclusive=[["content", "files"]], + mutually_exclusive=[["content", "files"], ["selector", "name"]], supports_check_mode=True, ) diff --git a/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py b/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py index fa0c4e3af..cf99a6584 100644 --- a/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py +++ b/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py @@ -77,7 +77,10 @@ class CAServerCert(OpenShiftCLI): x509output, _ = proc.communicate() if proc.returncode == 0: regex = re.compile(r"^\s*X509v3 Subject Alternative Name:\s*?\n\s*(.*)\s*\n", re.MULTILINE) - match = regex.search(x509output) # E501 + match = regex.search(x509output.decode()) # E501 + if not match: + return False + for entry in re.split(r", *", match.group(1)): if entry.startswith('DNS') or entry.startswith('IP Address'): cert_names.append(entry.split(':')[1]) @@ -124,7 +127,7 @@ class CAServerCert(OpenShiftCLI): api_rval = server_cert.create() if api_rval['returncode'] != 0: - return {'Failed': True, 'msg': api_rval} + return {'failed': True, 'msg': api_rval} return {'changed': True, 'results': api_rval, 'state': state} diff --git a/roles/lib_openshift/src/class/oc_adm_manage_node.py b/roles/lib_openshift/src/class/oc_adm_manage_node.py index c07320477..6d9f24baa 100644 --- a/roles/lib_openshift/src/class/oc_adm_manage_node.py +++ b/roles/lib_openshift/src/class/oc_adm_manage_node.py @@ -44,7 +44,7 @@ class ManageNode(OpenShiftCLI): if selector: _sel = selector - results = self._get('node', rname=_node, selector=_sel) + results = self._get('node', name=_node, selector=_sel) if results['returncode'] != 0: return results diff --git a/roles/lib_openshift/src/class/oc_adm_registry.py b/roles/lib_openshift/src/class/oc_adm_registry.py index 25519c9c9..720b44cdc 100644 --- a/roles/lib_openshift/src/class/oc_adm_registry.py +++ b/roles/lib_openshift/src/class/oc_adm_registry.py @@ -105,7 +105,7 @@ class Registry(OpenShiftCLI): rval = 0 for part in self.registry_parts: - result = self._get(part['kind'], rname=part['name']) + result = self._get(part['kind'], name=part['name']) if result['returncode'] == 0 and part['kind'] == 'dc': self.deploymentconfig = DeploymentConfig(result['results'][0]) elif result['returncode'] == 0 and part['kind'] == 'svc': diff --git a/roles/lib_openshift/src/class/oc_adm_router.py b/roles/lib_openshift/src/class/oc_adm_router.py index 356d06fdf..1a0b94b80 100644 --- a/roles/lib_openshift/src/class/oc_adm_router.py +++ b/roles/lib_openshift/src/class/oc_adm_router.py @@ -136,7 +136,7 @@ class Router(OpenShiftCLI): self.secret = None self.rolebinding = None for part in self.router_parts: - result = self._get(part['kind'], rname=part['name']) + result = self._get(part['kind'], name=part['name']) if result['returncode'] == 0 and part['kind'] == 'dc': self.deploymentconfig = DeploymentConfig(result['results'][0]) elif result['returncode'] == 0 and part['kind'] == 'svc': diff --git a/roles/lib_openshift/src/class/oc_configmap.py b/roles/lib_openshift/src/class/oc_configmap.py index 87de3e1df..de77d1102 100644 --- a/roles/lib_openshift/src/class/oc_configmap.py +++ b/roles/lib_openshift/src/class/oc_configmap.py @@ -127,6 +127,10 @@ class OCConfigMap(OpenShiftCLI): if state == 'list': return {'changed': False, 'results': api_rval, 'state': state} + if not params['name']: + return {'failed': True, + 'msg': 'Please specify a name when state is absent|present.'} + ######## # Delete ######## diff --git a/roles/lib_openshift/src/class/oc_label.py b/roles/lib_openshift/src/class/oc_label.py index bd312c170..0a6895177 100644 --- a/roles/lib_openshift/src/class/oc_label.py +++ b/roles/lib_openshift/src/class/oc_label.py @@ -134,9 +134,9 @@ class OCLabel(OpenShiftCLI): label_list = [] if self.name: - result = self._get(resource=self.kind, rname=self.name, selector=self.selector) + result = self._get(resource=self.kind, name=self.name, selector=self.selector) - if 'labels' in result['results'][0]['metadata']: + if result['results'][0] and 'labels' in result['results'][0]['metadata']: label_list.append(result['results'][0]['metadata']['labels']) else: label_list.append({}) diff --git a/roles/lib_openshift/src/class/oc_obj.py b/roles/lib_openshift/src/class/oc_obj.py index 51d3ce996..667b98eac 100644 --- a/roles/lib_openshift/src/class/oc_obj.py +++ b/roles/lib_openshift/src/class/oc_obj.py @@ -10,7 +10,7 @@ class OCObject(OpenShiftCLI): def __init__(self, kind, namespace, - rname=None, + name=None, selector=None, kubeconfig='/etc/origin/master/admin.kubeconfig', verbose=False, @@ -19,21 +19,21 @@ class OCObject(OpenShiftCLI): super(OCObject, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose, all_namespaces=all_namespaces) self.kind = kind - self.name = rname + self.name = name self.selector = selector def get(self): '''return a kind by name ''' - results = self._get(self.kind, rname=self.name, selector=self.selector) - if results['returncode'] != 0 and 'stderr' in results and \ - '\"%s\" not found' % self.name in results['stderr']: + results = self._get(self.kind, name=self.name, selector=self.selector) + if (results['returncode'] != 0 and 'stderr' in results and + '\"{}\" not found'.format(self.name) in results['stderr']): results['returncode'] = 0 return results def delete(self): - '''return all pods ''' - return self._delete(self.kind, self.name) + '''delete the object''' + return self._delete(self.kind, name=self.name, selector=self.selector) def create(self, files=None, content=None): ''' @@ -109,24 +109,33 @@ class OCObject(OpenShiftCLI): # Get ##### if state == 'list': - return {'changed': False, 'results': api_rval, 'state': 'list'} - - if not params['name']: - return {'failed': True, 'msg': 'Please specify a name when state is absent|present.'} # noqa: E501 + return {'changed': False, 'results': api_rval, 'state': state} ######## # Delete ######## if state == 'absent': - if not Utils.exists(api_rval['results'], params['name']): - return {'changed': False, 'state': 'absent'} + # if we were passed a name, verify its not in our results + if params['name'] is not None and not Utils.exists(api_rval['results'], params['name']): + return {'changed': False, 'state': state} + + # verify results are empty for the selector + if params['selector'] is not None and len(api_rval['results']) == 0: + return {'changed': False, 'state': state} if check_mode: return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete'} api_rval = ocobj.delete() - return {'changed': True, 'results': api_rval, 'state': 'absent'} + if api_rval['returncode'] != 0: + return {'failed': True, 'msg': api_rval} + + return {'changed': True, 'results': api_rval, 'state': state} + + # create/update: Must define a name beyond this point + if not params['name']: + return {'failed': True, 'msg': 'Please specify a name when state is present.'} if state == 'present': ######## @@ -152,7 +161,7 @@ class OCObject(OpenShiftCLI): if params['files'] and params['delete_after']: Utils.cleanup(params['files']) - return {'changed': True, 'results': api_rval, 'state': "present"} + return {'changed': True, 'results': api_rval, 'state': state} ######## # Update @@ -167,7 +176,7 @@ class OCObject(OpenShiftCLI): if params['files'] and params['delete_after']: Utils.cleanup(params['files']) - return {'changed': False, 'results': api_rval['results'][0], 'state': "present"} + return {'changed': False, 'results': api_rval['results'][0], 'state': state} if check_mode: return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'} @@ -186,4 +195,4 @@ class OCObject(OpenShiftCLI): if api_rval['returncode'] != 0: return {'failed': True, 'msg': api_rval} - return {'changed': True, 'results': api_rval, 'state': "present"} + return {'changed': True, 'results': api_rval, 'state': state} diff --git a/roles/lib_openshift/src/generate.py b/roles/lib_openshift/src/generate.py index 3f23455b5..2570f51dd 100755 --- a/roles/lib_openshift/src/generate.py +++ b/roles/lib_openshift/src/generate.py @@ -5,12 +5,16 @@ import argparse import os +import re import yaml import six OPENSHIFT_ANSIBLE_PATH = os.path.dirname(os.path.realpath(__file__)) OPENSHIFT_ANSIBLE_SOURCES_PATH = os.path.join(OPENSHIFT_ANSIBLE_PATH, 'sources.yml') # noqa: E501 LIBRARY = os.path.join(OPENSHIFT_ANSIBLE_PATH, '..', 'library/') +SKIP_COVERAGE_PATTERN = [re.compile('class Yedit.*$'), + re.compile('class Utils.*$')] +PRAGMA_STRING = ' # pragma: no cover' class GenerateAnsibleException(Exception): @@ -72,6 +76,11 @@ def generate(parts): if idx in [0, 1] and 'flake8: noqa' in line or 'pylint: skip-file' in line: # noqa: E501 continue + for skip in SKIP_COVERAGE_PATTERN: + if re.match(skip, line): + line = line.strip() + line += PRAGMA_STRING + os.linesep + data.write(line) fragment_banner(fpart, "footer", data) diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py index 132c586c9..1868b1420 100644 --- a/roles/lib_openshift/src/lib/base.py +++ b/roles/lib_openshift/src/lib/base.py @@ -95,11 +95,15 @@ class OpenShiftCLI(object): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) - def _delete(self, resource, rname, selector=None): + def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' - cmd = ['delete', resource, rname] - if selector: - cmd.append('--selector=%s' % selector) + cmd = ['delete', resource] + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) + else: + raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) @@ -117,7 +121,7 @@ class OpenShiftCLI(object): else: cmd.append(template_name) if params: - param_str = ["%s=%s" % (key, value) for key, value in params.items()] + param_str = ["{}={}".format(key, value) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) @@ -134,13 +138,13 @@ class OpenShiftCLI(object): return self.openshift_cmd(['create', '-f', fname]) - def _get(self, resource, rname=None, selector=None): + def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] - if selector: - cmd.append('--selector=%s' % selector) - elif rname: - cmd.append(rname) + if selector is not None: + cmd.append('--selector={}'.format(selector)) + elif name is not None: + cmd.append(name) cmd.extend(['-o', 'json']) @@ -160,9 +164,9 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) - cmd.append('--schedulable=%s' % schedulable) + cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 @@ -177,10 +181,10 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) @@ -193,16 +197,16 @@ class OpenShiftCLI(object): if node: cmd.extend(node) else: - cmd.append('--selector=%s' % selector) + cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: - cmd.append('--pod-selector=%s' % pod_selector) + cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: - cmd.append('--grace-period=%s' % int(grace_period)) + cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') diff --git a/roles/lib_openshift/src/test/integration/oc_obj.yml b/roles/lib_openshift/src/test/integration/oc_obj.yml new file mode 100755 index 000000000..c22a2f6a9 --- /dev/null +++ b/roles/lib_openshift/src/test/integration/oc_obj.yml @@ -0,0 +1,207 @@ +#!/usr/bin/ansible-playbook --module-path=../../../library/ +# ./oc_obj.yml -e "cli_master_test=$OPENSHIFT_MASTER +--- +- hosts: "{{ cli_master_test }}" + gather_facts: no + user: root + tasks: + - name: create test project + oc_project: + name: test + description: all things test + node_selector: "" + + # Create Check # + - name: create a dc + oc_obj: + state: present + name: mysql + namespace: test + kind: dc + content: + path: /tmp/dcout + data: + apiVersion: v1 + kind: DeploymentConfig + metadata: + labels: + name: mysql + name: mysql + spec: + replicas: 1 + selector: {} + strategy: + resources: {} + type: Recreate + template: + metadata: + labels: + name: mysql + spec: + containers: + - env: + - name: MYSQL_USER + value: mysql + - name: MYSQL_PASSWORD + value: mysql + - name: MYSQL_DATABASE + value: mysql + - name: MYSQL_ROOT_PASSWORD + value: mysql + image: openshift/mysql-55-centos7:latest + imagePullPolicy: Always + name: mysql + ports: + - containerPort: 3306 + name: tcp-3306 + protocol: TCP + resources: {} + securityContext: + capabilities: {} + privileged: false + terminationMessagePath: /dev/termination-log + dnsPolicy: ClusterFirst + restartPolicy: Always + securityContext: {} + terminationGracePeriodSeconds: 31 + triggers: + - type: ConfigChange + - imageChangeParams: + automatic: true + containerNames: + - mysql + from: + kind: ImageStreamTag + name: mysql:latest + type: ImageChange + + - name: fetch created dc + oc_obj: + name: mysql + kind: dc + state: list + namespace: test + register: dcout + + - debug: var=dcout + + - assert: + that: + - dcout.results.returncode == 0 + - dcout.results.results[0].metadata.name == 'mysql' + # End Create Check # + + + # Delete Check # + - name: delete created dc + oc_obj: + name: mysql + kind: dc + state: absent + namespace: test + register: dcout + + - name: fetch delete dc + oc_obj: + name: mysql + kind: dc + state: list + namespace: test + register: dcout + + - debug: var=dcout + + - assert: + that: + - dcout.results.returncode == 0 + - "'\"mysql\" not found' in dcout.results.stderr" + # End Delete Check # + + # Delete selector Check # + - name: create a dc + oc_obj: + state: present + name: mysql + namespace: test + kind: dc + content: + path: /tmp/dcout + data: + apiVersion: v1 + kind: DeploymentConfig + metadata: + labels: + name: mysql + name: mysql + spec: + replicas: 1 + selector: {} + strategy: + resources: {} + type: Recreate + template: + metadata: + labels: + name: mysql + spec: + containers: + - env: + - name: MYSQL_USER + value: mysql + - name: MYSQL_PASSWORD + value: mysql + - name: MYSQL_DATABASE + value: mysql + - name: MYSQL_ROOT_PASSWORD + value: mysql + image: openshift/mysql-55-centos7:latest + imagePullPolicy: Always + name: mysql + ports: + - containerPort: 3306 + name: tcp-3306 + protocol: TCP + resources: {} + securityContext: + capabilities: {} + privileged: false + terminationMessagePath: /dev/termination-log + dnsPolicy: ClusterFirst + restartPolicy: Always + securityContext: {} + terminationGracePeriodSeconds: 31 + triggers: + - type: ConfigChange + - imageChangeParams: + automatic: true + containerNames: + - mysql + from: + kind: ImageStreamTag + name: mysql:latest + type: ImageChange + + - name: delete using selector + oc_obj: + namespace: test + selector: name=mysql + kind: dc + state: absent + register: dcout + + - debug: var=dcout + + - name: get the dc + oc_obj: + namespace: test + selector: name=mysql + kind: dc + state: list + register: dcout + + - debug: var=dcout + + - assert: + that: + - dcout.results.returncode == 0 + - dcout.results.results[0]["items"]|length == 0 diff --git a/roles/openshift_certificate_expiry/README.md b/roles/openshift_certificate_expiry/README.md index df43c3770..107e27f89 100644 --- a/roles/openshift_certificate_expiry/README.md +++ b/roles/openshift_certificate_expiry/README.md @@ -19,7 +19,6 @@ to be used with an inventory that is representative of the cluster. For best results run `ansible-playbook` with the `-v` option. - # Role Variables Core variables in this role: @@ -51,8 +50,8 @@ How to use the Certificate Expiration Checking Role. Run one of the example playbooks using an inventory file representative of your existing cluster. Some example playbooks are -included in this role, or you can read on below after this example to -craft you own. +included in this role, or you can [read on below for more examples](#more-example-playbooks) +to help you craft you own. ``` $ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/easy-mode.yaml @@ -69,11 +68,47 @@ Using the `easy-mode.yaml` playbook will produce: > `/usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/easy-mode.yaml` > instead +## Run from a container + +The example playbooks that use this role are packaged in the +[container image for openshift-ansible](../../README_CONTAINER_IMAGE.md), so you +can run any of them by setting the `PLAYBOOK_FILE` environment variable when +running an openshift-ansible container. + +There are several [examples](../../examples/README.md) in the `examples` directory that run certificate check playbooks from a container running on OpenShift. + ## More Example Playbooks > **Note:** These Playbooks are available to run directly out of the > [/playbooks/certificate_expiry/](../../playbooks/certificate_expiry/) directory. +### Default behavior + +This playbook just invokes the certificate expiration check role with default options: + + +```yaml +--- +- name: Check cert expirys + hosts: nodes:masters:etcd + become: yes + gather_facts: no + roles: + - role: openshift_certificate_expiry +``` + +**From git:** +``` +$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/default.yaml +``` +**From openshift-ansible-playbooks rpm:** +``` +$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/default.yaml +``` + +> [View This Playbook](../../playbooks/certificate_expiry/default.yaml) + +### Easy mode This example playbook is great if you're just wanting to **try the role out**. This playbook enables HTML and JSON reports. All @@ -104,35 +139,70 @@ $ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/ce > [View This Playbook](../../playbooks/certificate_expiry/easy-mode.yaml) -*** +### Easy mode and upload reports to masters + +This example builds on top of [easy-mode.yaml](#easy-mode) and additionally +uploads a copy of the generated reports to the masters, with a timestamp in the +file names. + +This is specially useful when the playbook runs from within a container, because +the reports are generated inside the container and we need a way to access them. +Uploading a copy of the reports to the masters is one way to make it easy to +access them. Alternatively you can use the +[role variables](#role-variables) that control the path of the generated reports +to point to a container volume (see the [playbook with custom paths](#generate-html-and-json-reports-in-a-custom-path) for an example). -Default behavior: +With the container use case in mind, this playbook allows control over some +options via environment variables: + + - `CERT_EXPIRY_WARN_DAYS`: sets `openshift_certificate_expiry_warning_days`, overriding the role's default. + - `COPY_TO_PATH`: path in the masters where generated reports are uploaded. ```yaml --- -- name: Check cert expirys +- name: Generate certificate expiration reports hosts: nodes:masters:etcd - become: yes gather_facts: no + vars: + openshift_certificate_expiry_save_json_results: yes + openshift_certificate_expiry_generate_html_report: yes + openshift_certificate_expiry_show_all: yes + openshift_certificate_expiry_warning_days: "{{ lookup('env', 'CERT_EXPIRY_WARN_DAYS') | default('45', true) }}" roles: - role: openshift_certificate_expiry + +- name: Upload reports to master + hosts: masters + gather_facts: no + vars: + destination_path: "{{ lookup('env', 'COPY_TO_PATH') | default('/etc/origin/certificate_expiration_report', true) }}" + timestamp: "{{ lookup('pipe', 'date +%Y%m%d') }}" + tasks: + - name: Create directory in masters + file: + path: "{{ destination_path }}" + state: directory + - name: Copy the reports to the masters + copy: + dest: "{{ destination_path }}/{{ timestamp }}-{{ item }}" + src: "/tmp/{{ item }}" + with_items: + - "cert-expiry-report.html" + - "cert-expiry-report.json" ``` **From git:** ``` -$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/default.yaml +$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/easy-mode-upload.yaml ``` **From openshift-ansible-playbooks rpm:** ``` -$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/default.yaml +$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/easy-mode-upload.yaml ``` -> [View This Playbook](../../playbooks/certificate_expiry/default.yaml) +> [View This Playbook](../../playbooks/certificate_expiry/easy-mode-upload.yaml) -*** - - -Generate HTML and JSON artifacts in their default paths: +### Generate HTML and JSON artifacts in their default paths ```yaml --- @@ -158,7 +228,38 @@ $ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/ce > [View This Playbook](../../playbooks/certificate_expiry/html_and_json_default_paths.yaml) -*** +### Generate HTML and JSON reports in a custom path + +This example customizes the report generation path to point to a specific path (`/var/lib/certcheck`) and uses a date timestamp for the generated files. This allows you to reuse a certain location to keep multiple copies of the reports. + +```yaml +--- +- name: Check cert expirys + hosts: nodes:masters:etcd + become: yes + gather_facts: no + vars: + openshift_certificate_expiry_generate_html_report: yes + openshift_certificate_expiry_save_json_results: yes + timestamp: "{{ lookup('pipe', 'date +%Y%m%d') }}" + openshift_certificate_expiry_html_report_path: "/var/lib/certcheck/{{ timestamp }}-cert-expiry-report.html" + openshift_certificate_expiry_json_results_path: "/var/lib/certcheck/{{ timestamp }}-cert-expiry-report.json" + roles: + - role: openshift_certificate_expiry +``` + +**From git:** +``` +$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/html_and_json_timestamp.yaml +``` +**From openshift-ansible-playbooks rpm:** +``` +$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/html_and_json_timestamp.yaml +``` + +> [View This Playbook](../../playbooks/certificate_expiry/html_and_json_timestamp.yaml) + +### Long warning window Change the expiration warning window to 1500 days (good for testing the module out): @@ -186,7 +287,7 @@ $ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/ce > [View This Playbook](../../playbooks/certificate_expiry/longer_warning_period.yaml) -*** +### Long warning window and JSON report Change the expiration warning window to 1500 days (good for testing the module out) and save the results as a JSON file: diff --git a/roles/openshift_excluder/tasks/disable.yml b/roles/openshift_excluder/tasks/disable.yml index 325d2a4e8..97044fff6 100644 --- a/roles/openshift_excluder/tasks/disable.yml +++ b/roles/openshift_excluder/tasks/disable.yml @@ -4,6 +4,14 @@ # - docker_excluder_package_state - include: init.yml +# unexclude the current openshift/origin-excluder if it is installed so it can be updated +- include: unexclude.yml + vars: + unexclude_docker_excluder: false + unexclude_openshift_excluder: "{{ openshift_excluder_on | bool }}" + when: + - not openshift.common.is_atomic | bool + # Install any excluder that is enabled - include: install.yml vars: diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index e1f4c4e6d..adeb85c3f 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -936,7 +936,9 @@ def set_version_facts_if_unset(facts): facts['common']['version_gte_3_5_or_1_5'] = version_gte_3_5_or_1_5 facts['common']['version_gte_3_6_or_1_6'] = version_gte_3_6_or_1_6 - if version_gte_3_5_or_1_5: + if version_gte_3_6_or_1_6: + examples_content_version = 'v1.6' + elif version_gte_3_5_or_1_5: examples_content_version = 'v1.5' elif version_gte_3_4_or_1_4: examples_content_version = 'v1.4' @@ -2153,6 +2155,10 @@ class OpenShiftFacts(object): nfs=dict( directory='/exports', options='*(rw,root_squash)'), + glusterfs=dict( + endpoints='glusterfs-registry-endpoints', + path='glusterfs-registry-volume', + readOnly=False), host=None, access=dict( modes=['ReadWriteMany'] diff --git a/roles/openshift_health_checker/action_plugins/openshift_health_check.py b/roles/openshift_health_checker/action_plugins/openshift_health_check.py index cf0fe19f1..03c40b78b 100644 --- a/roles/openshift_health_checker/action_plugins/openshift_health_check.py +++ b/roles/openshift_health_checker/action_plugins/openshift_health_check.py @@ -4,6 +4,7 @@ Ansible action plugin to execute health checks in OpenShift clusters. # pylint: disable=wrong-import-position,missing-docstring,invalid-name import sys import os +from collections import defaultdict try: from __main__ import display @@ -41,20 +42,11 @@ class ActionModule(ActionBase): return result args = self._task.args - requested_checks = resolve_checks(args.get("checks", []), known_checks.values()) - - unknown_checks = requested_checks - set(known_checks) - if unknown_checks: - result["failed"] = True - result["msg"] = ( - "One or more checks are unknown: {}. " - "Make sure there is no typo in the playbook and no files are missing." - ).format(", ".join(unknown_checks)) - return result + resolved_checks = resolve_checks(args.get("checks", []), known_checks.values()) result["checks"] = check_results = {} - for check_name in requested_checks & set(known_checks): + for check_name in resolved_checks: display.banner("CHECK [{} : {}]".format(check_name, task_vars["ansible_host"])) check = known_checks[check_name] @@ -81,10 +73,7 @@ class ActionModule(ActionBase): load_checks() known_checks = {} - - known_check_classes = set(cls for cls in OpenShiftCheck.subclasses()) - - for cls in known_check_classes: + for cls in OpenShiftCheck.subclasses(): check_name = cls.name if check_name in known_checks: other_cls = known_checks[check_name].__class__ @@ -94,26 +83,45 @@ class ActionModule(ActionBase): cls.__module__, cls.__name__, other_cls.__module__, other_cls.__name__)) known_checks[check_name] = cls(execute_module=self._execute_module) - return known_checks def resolve_checks(names, all_checks): """Returns a set of resolved check names. - Resolving a check name involves expanding tag references (e.g., '@tag') with - all the checks that contain the given tag. + Resolving a check name expands tag references (e.g., "@tag") to all the + checks that contain the given tag. OpenShiftCheckException is raised if + names contains an unknown check or tag name. names should be a sequence of strings. all_checks should be a sequence of check classes/instances. """ - resolved = set() - for name in names: - if name.startswith("@"): - for check in all_checks: - if name[1:] in check.tags: - resolved.add(check.name) - else: - resolved.add(name) + known_check_names = set(check.name for check in all_checks) + known_tag_names = set(name for check in all_checks for name in check.tags) + + check_names = set(name for name in names if not name.startswith('@')) + tag_names = set(name[1:] for name in names if name.startswith('@')) + + unknown_check_names = check_names - known_check_names + unknown_tag_names = tag_names - known_tag_names + + if unknown_check_names or unknown_tag_names: + msg = [] + if unknown_check_names: + msg.append('Unknown check names: {}.'.format(', '.join(sorted(unknown_check_names)))) + if unknown_tag_names: + msg.append('Unknown tag names: {}.'.format(', '.join(sorted(unknown_tag_names)))) + msg.append('Make sure there is no typo in the playbook and no files are missing.') + raise OpenShiftCheckException('\n'.join(msg)) + + tag_to_checks = defaultdict(set) + for check in all_checks: + for tag in check.tags: + tag_to_checks[tag].add(check.name) + + resolved = check_names.copy() + for tag in tag_names: + resolved.update(tag_to_checks[tag]) + return resolved diff --git a/roles/openshift_health_checker/library/aos_version.py b/roles/openshift_health_checker/library/aos_version.py index 191a4b107..a46589443 100755 --- a/roles/openshift_health_checker/library/aos_version.py +++ b/roles/openshift_health_checker/library/aos_version.py @@ -1,91 +1,199 @@ #!/usr/bin/python # vim: expandtab:tabstop=4:shiftwidth=4 ''' -Ansible module for determining if multiple versions of an OpenShift package are -available, and if the version requested is available down to the given -precision. - -Multiple versions available suggest that multiple repos are enabled for the -different versions, which may cause installation problems. +Ansible module for yum-based systems determining if multiple releases +of an OpenShift package are available, and if the release requested +(if any) is available down to the given precision. + +For Enterprise, multiple releases available suggest that multiple repos +are enabled for the different releases, which may cause installation +problems. With Origin, however, this is a normal state of affairs as +all the releases are provided in a single repo with the expectation that +only the latest can be installed. + +Code in the openshift_version role contains a lot of logic to pin down +the exact package and image version to use and so does some validation +of release availability already. Without duplicating all that, we would +like the user to have a helpful error message if we detect things will +not work out right. Note that if openshift_release is not specified in +the inventory, the version comparison checks just pass. + +TODO: fail gracefully on non-yum systems (dnf in Fedora) ''' -import yum # pylint: disable=import-error - from ansible.module_utils.basic import AnsibleModule +IMPORT_EXCEPTION = None +try: + import yum # pylint: disable=import-error +except ImportError as err: + IMPORT_EXCEPTION = err # in tox test env, yum import fails + -def main(): # pylint: disable=missing-docstring,too-many-branches +class AosVersionException(Exception): + '''Base exception class for package version problems''' + def __init__(self, message, problem_pkgs=None): + Exception.__init__(self, message) + self.problem_pkgs = problem_pkgs + + +def main(): + '''Entrypoint for this Ansible module''' module = AnsibleModule( argument_spec=dict( - prefix=dict(required=True), # atomic-openshift, origin, ... - version=dict(required=True), + requested_openshift_release=dict(type="str", default=''), + openshift_deployment_type=dict(required=True), + rpm_prefix=dict(required=True), # atomic-openshift, origin, ...? ), supports_check_mode=True ) - def bail(error): # pylint: disable=missing-docstring - module.fail_json(msg=error) - - rpm_prefix = module.params['prefix'] + if IMPORT_EXCEPTION: + module.fail_json(msg="aos_version module could not import yum: %s" % IMPORT_EXCEPTION) + # determine the packages we will look for + rpm_prefix = module.params['rpm_prefix'] if not rpm_prefix: - bail("prefix must not be empty") - - yb = yum.YumBase() # pylint: disable=invalid-name - yb.conf.disable_excludes = ["all"] # assume the openshift excluder will be managed, ignore current state - - # search for package versions available for aos pkgs - expected_pkgs = [ + module.fail_json(msg="rpm_prefix must not be empty") + expected_pkgs = set([ rpm_prefix, rpm_prefix + '-master', rpm_prefix + '-node', - ] + ]) + + # determine what level of precision the user specified for the openshift version. + # should look like a version string with possibly many segments e.g. "3.4.1": + requested_openshift_release = module.params['requested_openshift_release'] + + # get the list of packages available and complain if anything is wrong + try: + pkgs = _retrieve_available_packages(expected_pkgs) + if requested_openshift_release: + _check_precise_version_found(pkgs, expected_pkgs, requested_openshift_release) + _check_higher_version_found(pkgs, expected_pkgs, requested_openshift_release) + if module.params['openshift_deployment_type'] in ['openshift-enterprise']: + _check_multi_minor_release(pkgs, expected_pkgs) + except AosVersionException as excinfo: + module.fail_json(msg=str(excinfo)) + module.exit_json(changed=False) + + +def _retrieve_available_packages(expected_pkgs): + # search for package versions available for openshift pkgs + yb = yum.YumBase() # pylint: disable=invalid-name + + # The openshift excluder prevents unintended updates to openshift + # packages by setting yum excludes on those packages. See: + # https://wiki.centos.org/SpecialInterestGroup/PaaS/OpenShift-Origin-Control-Updates + # Excludes are then disabled during an install or upgrade, but + # this check will most likely be running outside either. When we + # attempt to determine what packages are available via yum they may + # be excluded. So, for our purposes here, disable excludes to see + # what will really be available during an install or upgrade. + yb.conf.disable_excludes = ['all'] + try: pkgs = yb.pkgSack.returnPackages(patterns=expected_pkgs) - except yum.Errors.PackageSackError as e: # pylint: disable=invalid-name + except yum.Errors.PackageSackError as excinfo: # you only hit this if *none* of the packages are available - bail('Unable to find any OpenShift packages.\nCheck your subscription and repo settings.\n%s' % e) + raise AosVersionException('\n'.join([ + 'Unable to find any OpenShift packages.', + 'Check your subscription and repo settings.', + str(excinfo), + ])) + return pkgs - # determine what level of precision we're expecting for the version - expected_version = module.params['version'] - if expected_version.startswith('v'): # v3.3 => 3.3 - expected_version = expected_version[1:] - num_dots = expected_version.count('.') - pkgs_by_name_version = {} +class PreciseVersionNotFound(AosVersionException): + '''Exception for reporting packages not available at given release''' + def __init__(self, requested_release, not_found): + msg = ['Not all of the required packages are available at requested version %s:' % requested_release] + msg += [' ' + name for name in not_found] + msg += ['Please check your subscriptions and enabled repositories.'] + AosVersionException.__init__(self, '\n'.join(msg), not_found) + + +def _check_precise_version_found(pkgs, expected_pkgs, requested_openshift_release): + # see if any packages couldn't be found at requested release version + # we would like to verify that the latest available pkgs have however specific a version is given. + # so e.g. if there is a package version 3.4.1.5 the check passes; if only 3.4.0, it fails. + pkgs_precise_version_found = {} for pkg in pkgs: - # get expected version precision - match_version = '.'.join(pkg.version.split('.')[:num_dots + 1]) - if match_version == expected_version: + if pkg.name not in expected_pkgs: + continue + # does the version match, to the precision requested? + # and, is it strictly greater, at the precision requested? + match_version = '.'.join(pkg.version.split('.')[:requested_openshift_release.count('.') + 1]) + if match_version == requested_openshift_release: pkgs_precise_version_found[pkg.name] = True - # get x.y version precision - minor_version = '.'.join(pkg.version.split('.')[:2]) - if pkg.name not in pkgs_by_name_version: - pkgs_by_name_version[pkg.name] = {} - pkgs_by_name_version[pkg.name][minor_version] = True - # see if any packages couldn't be found at requested version - # see if any packages are available in more than one minor version not_found = [] - multi_found = [] for name in expected_pkgs: if name not in pkgs_precise_version_found: not_found.append(name) + + if not_found: + raise PreciseVersionNotFound(requested_openshift_release, not_found) + + +class FoundHigherVersion(AosVersionException): + '''Exception for reporting that a higher version than requested is available''' + def __init__(self, requested_release, higher_found): + msg = ['Some required package(s) are available at a version', + 'that is higher than requested %s:' % requested_release] + msg += [' ' + name for name in higher_found] + msg += ['This will prevent installing the version you requested.'] + msg += ['Please check your enabled repositories or adjust openshift_release.'] + AosVersionException.__init__(self, '\n'.join(msg), higher_found) + + +def _check_higher_version_found(pkgs, expected_pkgs, requested_openshift_release): + req_release_arr = [int(segment) for segment in requested_openshift_release.split(".")] + # see if any packages are available in a version higher than requested + higher_version_for_pkg = {} + for pkg in pkgs: + if pkg.name not in expected_pkgs: + continue + version = [int(segment) for segment in pkg.version.split(".")] + too_high = version[:len(req_release_arr)] > req_release_arr + higher_than_seen = version > higher_version_for_pkg.get(pkg.name, []) + if too_high and higher_than_seen: + higher_version_for_pkg[pkg.name] = version + + if higher_version_for_pkg: + higher_found = [] + for name, version in higher_version_for_pkg.items(): + higher_found.append(name + '-' + '.'.join(str(segment) for segment in version)) + raise FoundHigherVersion(requested_openshift_release, higher_found) + + +class FoundMultiRelease(AosVersionException): + '''Exception for reporting multiple minor releases found for same package''' + def __init__(self, multi_found): + msg = ['Multiple minor versions of these packages are available'] + msg += [' ' + name for name in multi_found] + msg += ["There should only be one OpenShift release repository enabled at a time."] + AosVersionException.__init__(self, '\n'.join(msg), multi_found) + + +def _check_multi_minor_release(pkgs, expected_pkgs): + # see if any packages are available in more than one minor version + pkgs_by_name_version = {} + for pkg in pkgs: + # keep track of x.y (minor release) versions seen + minor_release = '.'.join(pkg.version.split('.')[:2]) + if pkg.name not in pkgs_by_name_version: + pkgs_by_name_version[pkg.name] = {} + pkgs_by_name_version[pkg.name][minor_release] = True + + multi_found = [] + for name in expected_pkgs: if name in pkgs_by_name_version and len(pkgs_by_name_version[name]) > 1: multi_found.append(name) - if not_found: - msg = 'Not all of the required packages are available at requested version %s:\n' % expected_version - for name in not_found: - msg += ' %s\n' % name - bail(msg + 'Please check your subscriptions and enabled repositories.') - if multi_found: - msg = 'Multiple minor versions of these packages are available\n' - for name in multi_found: - msg += ' %s\n' % name - bail(msg + "There should only be one OpenShift version's repository enabled at a time.") - module.exit_json(changed=False) + if multi_found: + raise FoundMultiRelease(multi_found) if __name__ == '__main__': diff --git a/roles/openshift_health_checker/meta/main.yml b/roles/openshift_health_checker/meta/main.yml index 0bbeadd34..cd9b55902 100644 --- a/roles/openshift_health_checker/meta/main.yml +++ b/roles/openshift_health_checker/meta/main.yml @@ -1,3 +1,4 @@ --- dependencies: - role: openshift_facts + - role: openshift_repos diff --git a/roles/openshift_health_checker/openshift_checks/package_availability.py b/roles/openshift_health_checker/openshift_checks/package_availability.py index 9891972a6..a7eb720fd 100644 --- a/roles/openshift_health_checker/openshift_checks/package_availability.py +++ b/roles/openshift_health_checker/openshift_checks/package_availability.py @@ -9,6 +9,10 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck): name = "package_availability" tags = ["preflight"] + @classmethod + def is_active(cls, task_vars): + return super(PackageAvailability, cls).is_active(task_vars) and task_vars["ansible_pkg_mgr"] == "yum" + def run(self, tmp, task_vars): rpm_prefix = get_var(task_vars, "openshift", "common", "service_type") group_names = get_var(task_vars, "group_names", default=[]) diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py index 42193a1c6..682f6bd40 100644 --- a/roles/openshift_health_checker/openshift_checks/package_version.py +++ b/roles/openshift_health_checker/openshift_checks/package_version.py @@ -9,12 +9,17 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck): name = "package_version" tags = ["preflight"] - def run(self, tmp, task_vars): - rpm_prefix = get_var(task_vars, "openshift", "common", "service_type") - openshift_release = get_var(task_vars, "openshift_release") + @classmethod + def is_active(cls, task_vars): + """Skip hosts that do not have package requirements.""" + group_names = get_var(task_vars, "group_names", default=[]) + master_or_node = 'masters' in group_names or 'nodes' in group_names + return super(PackageVersion, cls).is_active(task_vars) and master_or_node + def run(self, tmp, task_vars): args = { - "prefix": rpm_prefix, - "version": openshift_release, + "requested_openshift_release": get_var(task_vars, "openshift_release", default=''), + "openshift_deployment_type": get_var(task_vars, "openshift_deployment_type"), + "rpm_prefix": get_var(task_vars, "openshift", "common", "service_type"), } return self.execute_module("aos_version", args, tmp, task_vars) diff --git a/roles/openshift_health_checker/test/action_plugin_test.py b/roles/openshift_health_checker/test/action_plugin_test.py new file mode 100644 index 000000000..a877246f4 --- /dev/null +++ b/roles/openshift_health_checker/test/action_plugin_test.py @@ -0,0 +1,227 @@ +import pytest + +from openshift_health_check import ActionModule, resolve_checks +from openshift_checks import OpenShiftCheckException + + +def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, run_exception=None): + """Returns a new class that is compatible with OpenShiftCheck for testing.""" + + _name, _tags = name, tags + + class FakeCheck(object): + name = _name + tags = _tags or [] + + def __init__(self, execute_module=None): + pass + + @classmethod + def is_active(cls, task_vars): + return is_active + + def run(self, tmp, task_vars): + if run_exception is not None: + raise run_exception + return run_return + + return FakeCheck + + +# Fixtures + + +@pytest.fixture +def plugin(): + task = FakeTask('openshift_health_check', {'checks': ['fake_check']}) + plugin = ActionModule(task, None, None, None, None, None) + return plugin + + +class FakeTask(object): + def __init__(self, action, args): + self.action = action + self.args = args + self.async = 0 + + +@pytest.fixture +def task_vars(): + return dict(openshift=dict(), ansible_host='unit-test-host') + + +# Assertion helpers + + +def failed(result, msg_has=None): + if msg_has is not None: + assert 'msg' in result + for term in msg_has: + assert term in result['msg'] + return result.get('failed', False) + + +def changed(result): + return result.get('changed', False) + + +def skipped(result): + return result.get('skipped', False) + + +# Tests + + +@pytest.mark.parametrize('task_vars', [ + None, + {}, +]) +def test_action_plugin_missing_openshift_facts(plugin, task_vars): + result = plugin.run(tmp=None, task_vars=task_vars) + + assert failed(result, msg_has=['openshift_facts']) + + +def test_action_plugin_cannot_load_checks_with_the_same_name(plugin, task_vars, monkeypatch): + FakeCheck1 = fake_check('duplicate_name') + FakeCheck2 = fake_check('duplicate_name') + checks = [FakeCheck1, FakeCheck2] + monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks)) + + result = plugin.run(tmp=None, task_vars=task_vars) + + assert failed(result, msg_has=['unique', 'duplicate_name', 'FakeCheck']) + + +def test_action_plugin_skip_non_active_checks(plugin, task_vars, monkeypatch): + checks = [fake_check(is_active=False)] + monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks)) + + result = plugin.run(tmp=None, task_vars=task_vars) + + assert result['checks']['fake_check'] == {'skipped': True} + assert not failed(result) + assert not changed(result) + assert not skipped(result) + + +def test_action_plugin_run_check_ok(plugin, task_vars, monkeypatch): + check_return_value = {'ok': 'test'} + check_class = fake_check(run_return=check_return_value) + monkeypatch.setattr(plugin, 'load_known_checks', lambda: {'fake_check': check_class()}) + monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check']) + + result = plugin.run(tmp=None, task_vars=task_vars) + + assert result['checks']['fake_check'] == check_return_value + assert not failed(result) + assert not changed(result) + assert not skipped(result) + + +def test_action_plugin_run_check_changed(plugin, task_vars, monkeypatch): + check_return_value = {'ok': 'test', 'changed': True} + check_class = fake_check(run_return=check_return_value) + monkeypatch.setattr(plugin, 'load_known_checks', lambda: {'fake_check': check_class()}) + monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check']) + + result = plugin.run(tmp=None, task_vars=task_vars) + + assert result['checks']['fake_check'] == check_return_value + assert not failed(result) + assert changed(result) + assert not skipped(result) + + +def test_action_plugin_run_check_fail(plugin, task_vars, monkeypatch): + check_return_value = {'failed': True} + check_class = fake_check(run_return=check_return_value) + monkeypatch.setattr(plugin, 'load_known_checks', lambda: {'fake_check': check_class()}) + monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check']) + + result = plugin.run(tmp=None, task_vars=task_vars) + + assert result['checks']['fake_check'] == check_return_value + assert failed(result, msg_has=['failed']) + assert not changed(result) + assert not skipped(result) + + +def test_action_plugin_run_check_exception(plugin, task_vars, monkeypatch): + exception_msg = 'fake check has an exception' + run_exception = OpenShiftCheckException(exception_msg) + check_class = fake_check(run_exception=run_exception) + monkeypatch.setattr(plugin, 'load_known_checks', lambda: {'fake_check': check_class()}) + monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check']) + + result = plugin.run(tmp=None, task_vars=task_vars) + + assert failed(result['checks']['fake_check'], msg_has=exception_msg) + assert failed(result, msg_has=['failed']) + assert not changed(result) + assert not skipped(result) + + +@pytest.mark.parametrize('names,all_checks,expected', [ + ([], [], set()), + ( + ['a', 'b'], + [ + fake_check('a'), + fake_check('b'), + ], + set(['a', 'b']), + ), + ( + ['a', 'b', '@group'], + [ + fake_check('from_group_1', ['group', 'another_group']), + fake_check('not_in_group', ['another_group']), + fake_check('from_group_2', ['preflight', 'group']), + fake_check('a'), + fake_check('b'), + ], + set(['a', 'b', 'from_group_1', 'from_group_2']), + ), +]) +def test_resolve_checks_ok(names, all_checks, expected): + assert resolve_checks(names, all_checks) == expected + + +@pytest.mark.parametrize('names,all_checks,words_in_exception,words_not_in_exception', [ + ( + ['testA', 'testB'], + [], + ['check', 'name', 'testA', 'testB'], + ['tag', 'group', '@'], + ), + ( + ['@group'], + [], + ['tag', 'name', 'group'], + ['check', '@'], + ), + ( + ['testA', 'testB', '@group'], + [], + ['check', 'name', 'testA', 'testB', 'tag', 'group'], + ['@'], + ), + ( + ['testA', 'testB', '@group'], + [ + fake_check('from_group_1', ['group', 'another_group']), + fake_check('not_in_group', ['another_group']), + fake_check('from_group_2', ['preflight', 'group']), + ], + ['check', 'name', 'testA', 'testB'], + ['tag', 'group', '@'], + ), +]) +def test_resolve_checks_failure(names, all_checks, words_in_exception, words_not_in_exception): + with pytest.raises(Exception) as excinfo: + resolve_checks(names, all_checks) + for word in words_in_exception: + assert word in str(excinfo.value) + for word in words_not_in_exception: + assert word not in str(excinfo.value) diff --git a/roles/openshift_health_checker/test/aos_version_test.py b/roles/openshift_health_checker/test/aos_version_test.py new file mode 100644 index 000000000..39c86067a --- /dev/null +++ b/roles/openshift_health_checker/test/aos_version_test.py @@ -0,0 +1,120 @@ +import pytest +import aos_version + +from collections import namedtuple +Package = namedtuple('Package', ['name', 'version']) + +expected_pkgs = set(['spam', 'eggs']) + + +@pytest.mark.parametrize('pkgs, requested_release, expect_not_found', [ + ( + [], + '3.2.1', + expected_pkgs, # none found + ), + ( + [Package('spam', '3.2.1')], + '3.2', + ['eggs'], # completely missing + ), + ( + [Package('spam', '3.2.1'), Package('eggs', '3.3.2')], + '3.2', + ['eggs'], # not the right version + ), + ( + [Package('spam', '3.2.1'), Package('eggs', '3.2.1')], + '3.2', + [], # all found + ), + ( + [Package('spam', '3.2.1'), Package('eggs', '3.2.1.5')], + '3.2.1', + [], # found with more specific version + ), + ( + [Package('eggs', '1.2.3'), Package('eggs', '3.2.1.5')], + '3.2.1', + ['spam'], # eggs found with multiple versions + ), +]) +def test_check_pkgs_for_precise_version(pkgs, requested_release, expect_not_found): + if expect_not_found: + with pytest.raises(aos_version.PreciseVersionNotFound) as e: + aos_version._check_precise_version_found(pkgs, expected_pkgs, requested_release) + assert set(expect_not_found) == set(e.value.problem_pkgs) + else: + aos_version._check_precise_version_found(pkgs, expected_pkgs, requested_release) + + +@pytest.mark.parametrize('pkgs, requested_release, expect_higher', [ + ( + [], + '3.2.1', + [], + ), + ( + [Package('spam', '3.2.1')], + '3.2', + [], # more precise but not strictly higher + ), + ( + [Package('spam', '3.3')], + '3.2.1', + ['spam-3.3'], # lower precision, but higher + ), + ( + [Package('spam', '3.2.1'), Package('eggs', '3.3.2')], + '3.2', + ['eggs-3.3.2'], # one too high + ), + ( + [Package('eggs', '1.2.3'), Package('eggs', '3.2.1.5'), Package('eggs', '3.4')], + '3.2.1', + ['eggs-3.4'], # multiple versions, one is higher + ), + ( + [Package('eggs', '3.2.1'), Package('eggs', '3.4'), Package('eggs', '3.3')], + '3.2.1', + ['eggs-3.4'], # multiple versions, two are higher + ), +]) +def test_check_pkgs_for_greater_version(pkgs, requested_release, expect_higher): + if expect_higher: + with pytest.raises(aos_version.FoundHigherVersion) as e: + aos_version._check_higher_version_found(pkgs, expected_pkgs, requested_release) + assert set(expect_higher) == set(e.value.problem_pkgs) + else: + aos_version._check_higher_version_found(pkgs, expected_pkgs, requested_release) + + +@pytest.mark.parametrize('pkgs, expect_to_flag_pkgs', [ + ( + [], + [], + ), + ( + [Package('spam', '3.2.1')], + [], + ), + ( + [Package('spam', '3.2.1'), Package('eggs', '3.2.2')], + [], + ), + ( + [Package('spam', '3.2.1'), Package('spam', '3.3.2')], + ['spam'], + ), + ( + [Package('eggs', '1.2.3'), Package('eggs', '3.2.1.5'), Package('eggs', '3.4')], + ['eggs'], + ), +]) +def test_check_pkgs_for_multi_release(pkgs, expect_to_flag_pkgs): + if expect_to_flag_pkgs: + with pytest.raises(aos_version.FoundMultiRelease) as e: + aos_version._check_multi_minor_release(pkgs, expected_pkgs) + assert set(expect_to_flag_pkgs) == set(e.value.problem_pkgs) + else: + aos_version._check_multi_minor_release(pkgs, expected_pkgs) diff --git a/roles/openshift_health_checker/test/conftest.py b/roles/openshift_health_checker/test/conftest.py index bf717ae85..3cbd65507 100644 --- a/roles/openshift_health_checker/test/conftest.py +++ b/roles/openshift_health_checker/test/conftest.py @@ -1,5 +1,11 @@ import os import sys -# extend sys.path so that tests can import openshift_checks -sys.path.insert(1, os.path.dirname(os.path.dirname(__file__))) +# extend sys.path so that tests can import openshift_checks and action plugins +# from this role. +openshift_health_checker_path = os.path.dirname(os.path.dirname(__file__)) +sys.path[1:1] = [ + openshift_health_checker_path, + os.path.join(openshift_health_checker_path, 'action_plugins'), + os.path.join(openshift_health_checker_path, 'library'), +] diff --git a/roles/openshift_health_checker/test/package_availability_test.py b/roles/openshift_health_checker/test/package_availability_test.py index 25385339a..f7e916a46 100644 --- a/roles/openshift_health_checker/test/package_availability_test.py +++ b/roles/openshift_health_checker/test/package_availability_test.py @@ -3,6 +3,20 @@ import pytest from openshift_checks.package_availability import PackageAvailability +@pytest.mark.parametrize('pkg_mgr,is_containerized,is_active', [ + ('yum', False, True), + ('yum', True, False), + ('dnf', True, False), + ('dnf', False, False), +]) +def test_is_active(pkg_mgr, is_containerized, is_active): + task_vars = dict( + ansible_pkg_mgr=pkg_mgr, + openshift=dict(common=dict(is_containerized=is_containerized)), + ) + assert PackageAvailability.is_active(task_vars=task_vars) == is_active + + @pytest.mark.parametrize('task_vars,must_have_packages,must_not_have_packages', [ ( dict(openshift=dict(common=dict(service_type='openshift'))), diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py index cc1d263bc..196d9816a 100644 --- a/roles/openshift_health_checker/test/package_version_test.py +++ b/roles/openshift_health_checker/test/package_version_test.py @@ -1,21 +1,46 @@ +import pytest + from openshift_checks.package_version import PackageVersion def test_package_version(): task_vars = dict( openshift=dict(common=dict(service_type='origin')), - openshift_release='v3.5', + openshift_release='3.5', + openshift_deployment_type='origin', ) return_value = object() def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None): assert module_name == 'aos_version' - assert 'prefix' in module_args - assert 'version' in module_args - assert module_args['prefix'] == task_vars['openshift']['common']['service_type'] - assert module_args['version'] == task_vars['openshift_release'] + assert 'requested_openshift_release' in module_args + assert 'openshift_deployment_type' in module_args + assert 'rpm_prefix' in module_args + assert module_args['requested_openshift_release'] == task_vars['openshift_release'] + assert module_args['openshift_deployment_type'] == task_vars['openshift_deployment_type'] + assert module_args['rpm_prefix'] == task_vars['openshift']['common']['service_type'] return return_value check = PackageVersion(execute_module=execute_module) result = check.run(tmp=None, task_vars=task_vars) assert result is return_value + + +@pytest.mark.parametrize('group_names,is_containerized,is_active', [ + (['masters'], False, True), + # ensure check is skipped on containerized installs + (['masters'], True, False), + (['nodes'], False, True), + (['masters', 'nodes'], False, True), + (['masters', 'etcd'], False, True), + ([], False, False), + (['etcd'], False, False), + (['lb'], False, False), + (['nfs'], False, False), +]) +def test_package_version_skip_when_not_master_nor_node(group_names, is_containerized, is_active): + task_vars = dict( + group_names=group_names, + openshift=dict(common=dict(is_containerized=is_containerized)), + ) + assert PackageVersion.is_active(task_vars=task_vars) == is_active diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml index d73f339f7..596b36239 100644 --- a/roles/openshift_hosted/defaults/main.yml +++ b/roles/openshift_hosted/defaults/main.yml @@ -24,8 +24,9 @@ openshift_hosted_routers: ports: - 80:80 - 443:443 - certificates: "{{ openshift_hosted_router_certificate | default({}) }}" + certificates: "{{ openshift_hosted_router_certificates | default({}) }}" openshift_hosted_router_certificates: {} openshift_hosted_registry_cert_expire_days: 730 +openshift_hosted_router_create_certificate: False diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml index 0b8042473..6e691c26f 100644 --- a/roles/openshift_hosted/tasks/registry/registry.yml +++ b/roles/openshift_hosted/tasks/registry/registry.yml @@ -109,7 +109,7 @@ type: persistentVolumeClaim claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-claim" when: - - openshift.hosted.registry.storage.kind | default(none) in ['nfs', 'openstack'] + - openshift.hosted.registry.storage.kind | default(none) in ['nfs', 'openstack', 'glusterfs'] - name: Create OpenShift registry oc_adm_registry: @@ -123,3 +123,7 @@ volume_mounts: "{{ openshift_hosted_registry_volumes }}" edits: "{{ openshift_hosted_registry_edits }}" force: "{{ True|bool in openshift_hosted_registry_force }}" + +- include: storage/glusterfs.yml + when: + - openshift.hosted.registry.storage.kind | default(none) == 'glusterfs' diff --git a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml new file mode 100644 index 000000000..b18b24266 --- /dev/null +++ b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml @@ -0,0 +1,51 @@ +--- +- name: Wait for registry pods + oc_obj: + namespace: "{{ openshift_hosted_registry_namespace }}" + state: list + kind: pod + selector: "{{ openshift_hosted_registry_name }}={{ openshift_hosted_registry_namespace }}" + register: registry_pods + until: + - "registry_pods.results.results[0]['items'] | count > 0" + # There must be as many matching pods with 'Ready' status True as there are expected replicas + - "registry_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == openshift_hosted_registry_replicas | int" + delay: 10 + retries: "{{ (600 / 10) | int }}" + +- name: Determine registry fsGroup + set_fact: + openshift_hosted_registry_fsgroup: "{{ registry_pods.results.results[0]['items'][0].spec.securityContext.fsGroup }}" + +- name: Create temp mount directory + command: mktemp -d /tmp/openshift-glusterfs-registry-XXXXXX + register: mktemp + changed_when: False + check_mode: no + +- name: Mount registry volume + mount: + state: mounted + fstype: glusterfs + src: "{{ groups.oo_glusterfs_to_config[0] }}:/{{ openshift.hosted.registry.storage.glusterfs.path }}" + name: "{{ mktemp.stdout }}" + +- name: Set registry volume permissions + file: + dest: "{{ mktemp.stdout }}" + state: directory + group: "{{ openshift_hosted_registry_fsgroup }}" + mode: "2775" + recurse: True + +- name: Unmount registry volume + mount: + state: unmounted + name: "{{ mktemp.stdout }}" + +- name: Delete temp mount directory + file: + dest: "{{ mktemp.stdout }}" + state: absent + changed_when: False + check_mode: no diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml index 0861b9ec2..c71d0a34f 100644 --- a/roles/openshift_hosted/tasks/router/router.yml +++ b/roles/openshift_hosted/tasks/router/router.yml @@ -14,6 +14,31 @@ openshift_hosted_router_selector: "{{ openshift.hosted.router.selector | default(None) }}" openshift_hosted_router_image: "{{ openshift.hosted.router.registryurl }}" +# This is for when we desire a cluster signed cert +# The certificate is generated and placed in master_config_dir/ +- block: + - name: generate a default wildcard router certificate + oc_adm_ca_server_cert: + signer_cert: "{{ openshift_master_config_dir }}/ca.crt" + signer_key: "{{ openshift_master_config_dir }}/ca.key" + signer_serial: "{{ openshift_master_config_dir }}/ca.serial.txt" + hostnames: + - "{{ openshift_master_default_subdomain }}" + - "*.{{ openshift_master_default_subdomain }}" + cert: "{{ ('/etc/origin/master/' ~ (item.certificates.certfile | basename)) if 'certfile' in item.certificates else ((openshift_master_config_dir) ~ '/openshift-router.crt') }}" + key: "{{ ('/etc/origin/master/' ~ (item.certificates.keyfile | basename)) if 'keyfile' in item.certificates else ((openshift_master_config_dir) ~ '/openshift-router.key') }}" + with_items: "{{ openshift_hosted_routers }}" + + - name: set the openshift_hosted_router_certificates + set_fact: + openshift_hosted_router_certificates: + certfile: "{{ openshift_master_config_dir ~ '/openshift-router.crt' }}" + keyfile: "{{ openshift_master_config_dir ~ '/openshift-router.key' }}" + cafile: "{{ openshift_master_config_dir ~ '/ca.crt' }}" + + # End Block + when: openshift_hosted_router_create_certificate + - name: Get the certificate contents for router copy: backup: True @@ -21,6 +46,7 @@ src: "{{ item }}" with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificates') | oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}" + when: not openshift_hosted_router_create_certificate - name: Create the router service account(s) oc_serviceaccount: diff --git a/roles/openshift_logging/vars/openshift-enterprise.yml b/roles/openshift_logging/vars/openshift-enterprise.yml index 9679d209a..92e68a0a3 100644 --- a/roles/openshift_logging/vars/openshift-enterprise.yml +++ b/roles/openshift_logging/vars/openshift-enterprise.yml @@ -1,3 +1,3 @@ --- __openshift_logging_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}" -__openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default(openshift_release | default ('3.5.0') ) }}" +__openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default ('3.6.0') }}" diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 index eef0f414e..155abd970 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 @@ -12,7 +12,7 @@ Requires=docker.service EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api Environment=GOTRACEBACK=crash ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-api -ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS +ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-api LimitNOFILE=131072 diff --git a/roles/openshift_master/templates/master_docker/master.docker.service.j2 b/roles/openshift_master/templates/master_docker/master.docker.service.j2 index be7644710..13381cd1a 100644 --- a/roles/openshift_master/templates/master_docker/master.docker.service.j2 +++ b/roles/openshift_master/templates/master_docker/master.docker.service.j2 @@ -8,7 +8,7 @@ Wants=etcd_container.service [Service] EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-master -ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master --config=${CONFIG_FILE} $OPTIONS +ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master --config=${CONFIG_FILE} $OPTIONS ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master Restart=always diff --git a/roles/openshift_metrics/files/import_jks_certs.sh b/roles/openshift_metrics/files/import_jks_certs.sh deleted file mode 100755 index f977b6dd6..000000000 --- a/roles/openshift_metrics/files/import_jks_certs.sh +++ /dev/null @@ -1,52 +0,0 @@ -#!/bin/bash -# -# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -set -ex - -function import_certs() { - dir=$CERT_DIR - hawkular_metrics_keystore_password=$(echo $METRICS_KEYSTORE_PASSWD | base64 --decode) - hawkular_metrics_truststore_password=$(echo $METRICS_TRUSTSTORE_PASSWD | base64 --decode) - hawkular_alias=`keytool -noprompt -list -keystore $dir/hawkular-metrics.truststore -storepass ${hawkular_metrics_truststore_password} | sed -n '7~2s/,.*$//p'` - - if [ ! -f $dir/hawkular-metrics.keystore ]; then - echo "Creating the Hawkular Metrics keystore from the PEM file" - keytool -importkeystore -v \ - -srckeystore $dir/hawkular-metrics.pkcs12 \ - -destkeystore $dir/hawkular-metrics.keystore \ - -srcstoretype PKCS12 \ - -deststoretype JKS \ - -srcstorepass $hawkular_metrics_keystore_password \ - -deststorepass $hawkular_metrics_keystore_password - fi - - cert_alias_names=(ca metricca) - - for cert_alias in ${cert_alias_names[*]}; do - if [[ ! ${hawkular_alias[*]} =~ "$cert_alias" ]]; then - echo "Importing the CA Certificate with alias $cert_alias into the Hawkular Metrics Truststore" - keytool -noprompt -import -v -trustcacerts -alias $cert_alias \ - -file ${dir}/ca.crt \ - -keystore $dir/hawkular-metrics.truststore \ - -trustcacerts \ - -storepass $hawkular_metrics_truststore_password - fi - done -} - -import_certs diff --git a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml index 01fc1ef64..07b7eca33 100644 --- a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml +++ b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml @@ -13,21 +13,6 @@ hostnames: hawkular-cassandra changed_when: no -- slurp: src={{ mktemp.stdout }}/hawkular-metrics-truststore.pwd - register: hawkular_truststore_password - -- stat: path="{{mktemp.stdout}}/{{item}}" - register: pwd_file_stat - with_items: - - hawkular-metrics.pwd - - hawkular-metrics.htpasswd - changed_when: no - -- set_fact: - pwd_files: "{{pwd_files | default({}) | combine ({item.item: item.stat}) }}" - with_items: "{{pwd_file_stat.results}}" - changed_when: no - - name: generate password for hawkular metrics local_action: copy dest="{{ local_tmp.stdout}}/{{ item }}.pwd" content="{{ 15 | oo_random_word }}" with_items: @@ -47,8 +32,6 @@ - hawkular-metrics.pwd - hawkular-metrics.htpasswd -- include: import_jks_certs.yaml - - name: read files for the hawkular-metrics secret shell: > printf '%s: ' '{{ item }}' @@ -56,13 +39,11 @@ register: hawkular_secrets with_items: - ca.crt - - hawkular-metrics.crt - - hawkular-metrics.keystore - - hawkular-metrics-keystore.pwd - - hawkular-metrics.truststore - - hawkular-metrics-truststore.pwd - hawkular-metrics.pwd - hawkular-metrics.htpasswd + - hawkular-metrics.crt + - hawkular-metrics.key + - hawkular-metrics.pem - hawkular-cassandra.crt - hawkular-cassandra.key - hawkular-cassandra.pem @@ -73,42 +54,23 @@ {{ hawkular_secrets.results|map(attribute='stdout')|join(' ')|from_yaml }} -- name: generate hawkular-metrics-secrets secret template - template: - src: secret.j2 - dest: "{{ mktemp.stdout }}/templates/hawkular_metrics_secrets.yaml" - vars: - name: hawkular-metrics-secrets - labels: - metrics-infra: hawkular-metrics - data: - hawkular-metrics.keystore: > - {{ hawkular_secrets['hawkular-metrics.keystore'] }} - hawkular-metrics.keystore.password: > - {{ hawkular_secrets['hawkular-metrics-keystore.pwd'] }} - hawkular-metrics.truststore: > - {{ hawkular_secrets['hawkular-metrics.truststore'] }} - hawkular-metrics.truststore.password: > - {{ hawkular_secrets['hawkular-metrics-truststore.pwd'] }} - hawkular-metrics.keystore.alias: "{{ 'hawkular-metrics'|b64encode }}" - hawkular-metrics.htpasswd.file: > - {{ hawkular_secrets['hawkular-metrics.htpasswd'] }} - when: name not in metrics_secrets.stdout_lines - changed_when: no - -- name: generate hawkular-metrics-certificate secret template +- name: generate hawkular-metrics-certs secret template template: src: secret.j2 - dest: "{{ mktemp.stdout }}/templates/hawkular_metrics_certificate.yaml" + dest: "{{ mktemp.stdout }}/templates/hawkular-metrics-certs.yaml" vars: - name: hawkular-metrics-certificate + name: hawkular-metrics-certs labels: - metrics-infra: hawkular-metrics + metrics-infra: hawkular-metrics-certs + annotations: + service.alpha.openshift.io/originating-service-name: hawkular-metrics data: - hawkular-metrics.certificate: > + tls.crt: > {{ hawkular_secrets['hawkular-metrics.crt'] }} - hawkular-metrics-ca.certificate: > - {{ hawkular_secrets['ca.crt'] }} + tls.key: > + {{ hawkular_secrets['hawkular-metrics.key'] }} + tls.truststore.crt: > + {{ hawkular_secrets['hawkular-cassandra.crt'] }} when: name not in metrics_secrets.stdout_lines changed_when: no @@ -122,6 +84,7 @@ metrics-infra: hawkular-metrics data: hawkular-metrics.username: "{{ 'hawkular'|b64encode }}" + hawkular-metrics.htpasswd: "{{ hawkular_secrets['hawkular-metrics.htpasswd'] }}" hawkular-metrics.password: > {{ hawkular_secrets['hawkular-metrics.pwd'] }} when: name not in metrics_secrets.stdout_lines diff --git a/roles/openshift_metrics/tasks/import_jks_certs.yaml b/roles/openshift_metrics/tasks/import_jks_certs.yaml deleted file mode 100644 index e098145e9..000000000 --- a/roles/openshift_metrics/tasks/import_jks_certs.yaml +++ /dev/null @@ -1,37 +0,0 @@ ---- -- stat: path="{{mktemp.stdout}}/hawkular-metrics.keystore" - register: metrics_keystore - check_mode: no - -- stat: path="{{mktemp.stdout}}/hawkular-metrics.truststore" - register: metrics_truststore - check_mode: no - -- block: - - slurp: src={{ mktemp.stdout }}/hawkular-metrics-keystore.pwd - register: metrics_keystore_password - - - fetch: - dest: "{{local_tmp.stdout}}/" - src: "{{ mktemp.stdout }}/{{item}}" - flat: yes - changed_when: False - with_items: - - hawkular-metrics.pkcs12 - - hawkular-metrics.crt - - ca.crt - - - local_action: command {{role_path}}/files/import_jks_certs.sh - environment: - CERT_DIR: "{{local_tmp.stdout}}" - METRICS_KEYSTORE_PASSWD: "{{metrics_keystore_password.content}}" - METRICS_TRUSTSTORE_PASSWD: "{{hawkular_truststore_password.content}}" - changed_when: False - - - copy: - dest: "{{mktemp.stdout}}/" - src: "{{item}}" - with_fileglob: "{{local_tmp.stdout}}/*.*store" - - when: not metrics_keystore.stat.exists or - not metrics_truststore.stat.exists diff --git a/roles/openshift_metrics/tasks/install_heapster.yaml b/roles/openshift_metrics/tasks/install_heapster.yaml index c490bcdd3..8d27c4930 100644 --- a/roles/openshift_metrics/tasks/install_heapster.yaml +++ b/roles/openshift_metrics/tasks/install_heapster.yaml @@ -20,7 +20,7 @@ - set_fact: heapster_sa_secrets: "{{ heapster_sa_secrets + [item] }}" with_items: - - hawkular-metrics-certificate + - hawkular-metrics-certs - hawkular-metrics-account when: "not {{ openshift_metrics_heapster_standalone | bool }}" diff --git a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 index 361378df3..401db4e58 100644 --- a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 +++ b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 @@ -40,24 +40,20 @@ spec: - "-Dhawkular.metrics.cassandra.nodes=hawkular-cassandra" - "-Dhawkular.metrics.cassandra.use-ssl" - "-Dhawkular.metrics.openshift.auth-methods=openshift-oauth,htpasswd" - - "-Dhawkular.metrics.openshift.htpasswd-file=/secrets/hawkular-metrics.htpasswd.file" + - "-Dhawkular.metrics.openshift.htpasswd-file=/hawkular-account/hawkular-metrics.htpasswd" - "-Dhawkular.metrics.allowed-cors-access-control-allow-headers=authorization" - "-Dhawkular.metrics.default-ttl={{openshift_metrics_duration}}" - "-Dhawkular.metrics.admin-tenant=_hawkular_admin" - "-Dhawkular-alerts.cassandra-nodes=hawkular-cassandra" - "-Dhawkular-alerts.cassandra-use-ssl" - "-Dhawkular.alerts.openshift.auth-methods=openshift-oauth,htpasswd" - - "-Dhawkular.alerts.openshift.htpasswd-file=/secrets/hawkular-metrics.htpasswd.file" + - "-Dhawkular.alerts.openshift.htpasswd-file=/hawkular-account/hawkular-metrics.htpasswd" - "-Dhawkular.alerts.allowed-cors-access-control-allow-headers=authorization" - "-Dorg.apache.tomcat.util.buf.UDecoder.ALLOW_ENCODED_SLASH=true" - "-Dorg.apache.catalina.connector.CoyoteAdapter.ALLOW_BACKSLASH=true" - "-Dcom.datastax.driver.FORCE_NIO=true" - "-DKUBERNETES_MASTER_URL={{openshift_metrics_master_url}}" - "-DUSER_WRITE_ACCESS={{openshift_metrics_hawkular_user_write_access}}" - - "--hmw.keystore=/secrets/hawkular-metrics.keystore" - - "--hmw.truststore=/secrets/hawkular-metrics.truststore" - - "--hmw.keystore_password_file=/secrets/hawkular-metrics.keystore.password" - - "--hmw.truststore_password_file=/secrets/hawkular-metrics.truststore.password" env: - name: POD_NAMESPACE valueFrom: @@ -67,6 +63,8 @@ spec: value: "{{ openshift_metrics_master_url }}" - name: JGROUPS_PASSWORD value: "{{ 17 | oo_random_word }}" + - name: TRUSTSTORE_AUTHORITIES + value: "/hawkular-metrics-certs/tls.truststore.crt" - name: OPENSHIFT_KUBE_PING_NAMESPACE valueFrom: fieldRef: @@ -76,10 +74,10 @@ spec: - name: STARTUP_TIMEOUT value: "{{ openshift_metrics_startup_timeout }}" volumeMounts: - - name: hawkular-metrics-secrets - mountPath: "/secrets" - - name: hawkular-metrics-client-secrets - mountPath: "/client-secrets" + - name: hawkular-metrics-certs + mountPath: "/hawkular-metrics-certs" + - name: hawkular-metrics-account + mountPath: "/hawkular-account" {% if ((openshift_metrics_hawkular_limits_cpu is defined and openshift_metrics_hawkular_limits_cpu is not none) or (openshift_metrics_hawkular_limits_memory is defined and openshift_metrics_hawkular_limits_memory is not none) or (openshift_metrics_hawkular_requests_cpu is defined and openshift_metrics_hawkular_requests_cpu is not none) @@ -118,9 +116,9 @@ spec: command: - "/opt/hawkular/scripts/hawkular-metrics-liveness.py" volumes: - - name: hawkular-metrics-secrets + - name: hawkular-metrics-certs secret: - secretName: hawkular-metrics-secrets - - name: hawkular-metrics-client-secrets + secretName: hawkular-metrics-certs + - name: hawkular-metrics-account secret: secretName: hawkular-metrics-account diff --git a/roles/openshift_metrics/templates/heapster.j2 b/roles/openshift_metrics/templates/heapster.j2 index 7c837db4d..f01ccfd58 100644 --- a/roles/openshift_metrics/templates/heapster.j2 +++ b/roles/openshift_metrics/templates/heapster.j2 @@ -43,15 +43,15 @@ spec: - "--wrapper.username_file=/hawkular-account/hawkular-metrics.username" - "--wrapper.password_file=/hawkular-account/hawkular-metrics.password" - "--wrapper.endpoint_check=https://hawkular-metrics:443/hawkular/metrics/status" - - "--sink=hawkular:https://hawkular-metrics:443?tenant=_system&labelToTenant=pod_namespace&labelNodeId={{openshift_metrics_node_id}}&caCert=/hawkular-cert/hawkular-metrics-ca.certificate&user=%username%&pass=%password%&filter=label(container_name:^system.slice.*|^user.slice)" + - "--sink=hawkular:https://hawkular-metrics:443?tenant=_system&labelToTenant=pod_namespace&labelNodeId={{openshift_metrics_node_id}}&caCert=/hawkular-metrics-certs/tls.crt&user=%username%&pass=%password%&filter=label(container_name:^system.slice.*|^user.slice)" {% endif %} env: - name: STARTUP_TIMEOUT value: "{{ openshift_metrics_startup_timeout }}" -{% if ((openshift_metrics_heapster_limits_cpu is defined and openshift_metrics_heapster_limits_cpu is not none) +{% if ((openshift_metrics_heapster_limits_cpu is defined and openshift_metrics_heapster_limits_cpu is not none) or (openshift_metrics_heapster_limits_memory is defined and openshift_metrics_heapster_limits_memory is not none) or (openshift_metrics_heapster_requests_cpu is defined and openshift_metrics_heapster_requests_cpu is not none) - or (openshift_metrics_heapster_requests_memory is defined and openshift_metrics_heapster_requests_memory is not none)) + or (openshift_metrics_heapster_requests_memory is defined and openshift_metrics_heapster_requests_memory is not none)) %} resources: {% if (openshift_metrics_heapster_limits_cpu is not none @@ -65,8 +65,8 @@ spec: memory: "{{openshift_metrics_heapster_limits_memory}}" {% endif %} {% endif %} -{% if (openshift_metrics_heapster_requests_cpu is not none - or openshift_metrics_heapster_requests_memory is not none) +{% if (openshift_metrics_heapster_requests_cpu is not none + or openshift_metrics_heapster_requests_memory is not none) %} requests: {% if openshift_metrics_heapster_requests_cpu is not none %} @@ -81,8 +81,8 @@ spec: - name: heapster-secrets mountPath: "/secrets" {% if not openshift_metrics_heapster_standalone %} - - name: hawkular-metrics-certificate - mountPath: "/hawkular-cert" + - name: hawkular-metrics-certs + mountPath: "/hawkular-metrics-certs" - name: hawkular-metrics-account mountPath: "/hawkular-account" readinessProbe: @@ -95,9 +95,9 @@ spec: secret: secretName: heapster-secrets {% if not openshift_metrics_heapster_standalone %} - - name: hawkular-metrics-certificate + - name: hawkular-metrics-certs secret: - secretName: hawkular-metrics-certificate + secretName: hawkular-metrics-certs - name: hawkular-metrics-account secret: secretName: hawkular-metrics-account diff --git a/roles/openshift_metrics/vars/openshift-enterprise.yml b/roles/openshift_metrics/vars/openshift-enterprise.yml index f28c3ce48..b20957550 100644 --- a/roles/openshift_metrics/vars/openshift-enterprise.yml +++ b/roles/openshift_metrics/vars/openshift-enterprise.yml @@ -1,3 +1,3 @@ --- __openshift_metrics_image_prefix: "{{ openshift_hosted_metrics_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}" -__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default(openshift_release | default ('3.5.0') ) }}" +__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default ('3.6.0') }}" diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index fffbf2994..bd95f8526 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -6,10 +6,6 @@ os_firewall_allow: port: 80/tcp - service: https port: 443/tcp -- service: Openshift kubelet ReadOnlyPort - port: 10255/tcp -- service: Openshift kubelet ReadOnlyPort udp - port: 10255/udp - service: OpenShift OVS sdn port: 4789/udp when: openshift.node.use_openshift_sdn | bool diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml index c97ff1b4b..0da41d0c1 100644 --- a/roles/openshift_node/meta/main.yml +++ b/roles/openshift_node/meta/main.yml @@ -26,10 +26,6 @@ dependencies: port: 80/tcp - service: https port: 443/tcp - - service: Openshift kubelet ReadOnlyPort - port: 10255/tcp - - service: Openshift kubelet ReadOnlyPort udp - port: 10255/udp - role: os_firewall os_firewall_allow: - service: OpenShift OVS sdn diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 626248306..59003bbf9 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -34,6 +34,33 @@ dns_ip: "{{ openshift_dns_ip | default(none) | get_dns_ip(hostvars[inventory_hostname])}}" env_vars: "{{ openshift_node_env_vars | default(None) }}" +# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory +- name: Check for swap usage + command: grep "^[^#].*swap" /etc/fstab + # grep: match any lines which don't begin with '#' and contain 'swap' + # command: swapon --summary + # Alternate option, however if swap entries are in fstab, swap will be + # enabled at boot. Grepping fstab should catch a condition when swap was + # disabled, but the fstab entries were not removed. + changed_when: false + failed_when: false + register: swap_result + + # Disable Swap Block +- block: + + - name: Disable swap + command: swapoff --all + + - name: Remove swap entries from /etc/fstab + lineinfile: + dest: /etc/fstab + regexp: 'swap' + state: absent + + when: swap_result.stdout_lines | length > 0 + # End Disable Swap Block + # We have to add tuned-profiles in the same transaction otherwise we run into depsolving # problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging. - name: Install Node package diff --git a/roles/openshift_node_upgrade/meta/main.yml b/roles/openshift_node_upgrade/meta/main.yml index cd2f362aa..2a36d8945 100644 --- a/roles/openshift_node_upgrade/meta/main.yml +++ b/roles/openshift_node_upgrade/meta/main.yml @@ -10,4 +10,5 @@ galaxy_info: versions: - 7 dependencies: +- role: lib_utils - role: openshift_common diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml index 6ae8dbc12..01bd3bf38 100644 --- a/roles/openshift_node_upgrade/tasks/main.yml +++ b/roles/openshift_node_upgrade/tasks/main.yml @@ -84,6 +84,33 @@ value: "{{ oreg_url }}" when: oreg_url is defined +# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory +- name: Check for swap usage + command: grep "^[^#].*swap" /etc/fstab + # grep: match any lines which don't begin with '#' and contain 'swap' + # command: swapon --summary + # Alternate option, however if swap entries are in fstab, swap will be + # enabled at boot. Grepping fstab should catch a condition when swap was + # disabled, but the fstab entries were not removed. + changed_when: false + failed_when: false + register: swap_result + + # Disable Swap Block +- block: + + - name: Disable swap + command: swapoff --all + + - name: Remove swap entries from /etc/fstab + lineinfile: + dest: /etc/fstab + regexp: 'swap' + state: absent + + when: swap_result.stdout_lines | length > 0 + # End Disable Swap Block + - name: Restart rpm node service service: name: "{{ openshift.common.service_type }}-node" diff --git a/roles/openshift_provisioners/README.md b/roles/openshift_provisioners/README.md new file mode 100644 index 000000000..7449073e6 --- /dev/null +++ b/roles/openshift_provisioners/README.md @@ -0,0 +1,29 @@ +# OpenShift External Dynamic Provisioners + +## Required Vars +* `openshift_provisioners_install_provisioners`: When `True` the openshift_provisioners role will install provisioners that have their "master" var (e.g. `openshift_provisioners_efs`) set `True`. When `False` will uninstall provisioners that have their var set `True`. + +## Optional Vars +* `openshift_provisioners_image_prefix`: The prefix for the provisioner images to use. Defaults to 'docker.io/openshift/origin-'. +* `openshift_provisioners_image_version`: The image version for the provisioner images to use. Defaults to 'latest'. +* `openshift_provisioners_project`: The namespace that provisioners will be installed in. Defaults to 'openshift-infra'. + +## AWS EFS + +### Prerequisites +* An IAM user assigned the AmazonElasticFileSystemReadOnlyAccess policy (or better) +* An EFS file system in your cluster's region +* [Mount targets](http://docs.aws.amazon.com/efs/latest/ug/accessing-fs.html) and [security groups](http://docs.aws.amazon.com/efs/latest/ug/accessing-fs-create-security-groups.html) such that any node (in any zone in the cluster's region) can mount the EFS file system by its [File system DNS name](http://docs.aws.amazon.com/efs/latest/ug/mounting-fs-mount-cmd-dns-name.html) + +### Required Vars +* `openshift_provisioners_efs_fsid`: The [File system ID](http://docs.aws.amazon.com/efs/latest/ug/gs-step-two-create-efs-resources.html) of the EFS file system, e.g. fs-47a2c22e. +* `openshift_provisioners_efs_region`: The Amazon EC2 region of the EFS file system. +* `openshift_provisioners_efs_aws_access_key_id`: The AWS access key of the IAM user, used to check that the EFS file system specified actually exists. +* `openshift_provisioners_efs_aws_secret_access_key`: The AWS secret access key of the IAM user, used to check that the EFS file system specified actually exists. + +### Optional Vars +* `openshift_provisioners_efs`: When `True` the AWS EFS provisioner will be installed or uninstalled according to whether `openshift_provisioners_install_provisioners` is `True` or `False`, respectively. Defaults to `False`. +* `openshift_provisioners_efs_path`: The path of the directory in the EFS file system in which the EFS provisioner will create a directory to back each PV it creates. It must exist and be mountable by the EFS provisioner. Defaults to '/persistentvolumes'. +* `openshift_provisioners_efs_name`: The `provisioner` name that `StorageClasses` specify. Defaults to 'openshift.org/aws-efs'. +* `openshift_provisioners_efs_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land. +* `openshift_provisioners_efs_supplementalgroup`: The supplemental group to give the pod in case it is needed for permission to write to the EFS file system. Defaults to '65534'. diff --git a/roles/openshift_provisioners/defaults/main.yaml b/roles/openshift_provisioners/defaults/main.yaml new file mode 100644 index 000000000..a6f040831 --- /dev/null +++ b/roles/openshift_provisioners/defaults/main.yaml @@ -0,0 +1,12 @@ +--- +openshift_provisioners_install_provisioners: True +openshift_provisioners_image_prefix: docker.io/openshift/origin- +openshift_provisioners_image_version: latest + +openshift_provisioners_efs: False +openshift_provisioners_efs_path: /persistentvolumes +openshift_provisioners_efs_name: openshift.org/aws-efs +openshift_provisioners_efs_nodeselector: "" +openshift_provisioners_efs_supplementalgroup: '65534' + +openshift_provisioners_project: openshift-infra diff --git a/roles/openshift_provisioners/meta/main.yaml b/roles/openshift_provisioners/meta/main.yaml new file mode 100644 index 000000000..cb9278eb7 --- /dev/null +++ b/roles/openshift_provisioners/meta/main.yaml @@ -0,0 +1,16 @@ +--- +galaxy_info: + author: OpenShift Red Hat + description: OpenShift Provisioners + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 2.2 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud +dependencies: +- role: lib_openshift +- role: openshift_facts diff --git a/roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml b/roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml new file mode 100644 index 000000000..ac21a5e37 --- /dev/null +++ b/roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml @@ -0,0 +1,19 @@ +--- +- name: Generate ClusterRoleBindings + template: src=clusterrolebinding.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-clusterrolebinding.yaml + vars: + acct_name: provisioners-{{item}} + obj_name: run-provisioners-{{item}} + labels: + provisioners-infra: support + crb_usernames: ["system:serviceaccount:{{openshift_provisioners_project}}:{{acct_name}}"] + subjects: + - kind: ServiceAccount + name: "{{acct_name}}" + namespace: "{{openshift_provisioners_project}}" + cr_name: "system:persistent-volume-provisioner" + with_items: + # TODO + - efs + check_mode: no + changed_when: no diff --git a/roles/openshift_provisioners/tasks/generate_secrets.yaml b/roles/openshift_provisioners/tasks/generate_secrets.yaml new file mode 100644 index 000000000..e6cbb1bbf --- /dev/null +++ b/roles/openshift_provisioners/tasks/generate_secrets.yaml @@ -0,0 +1,14 @@ +--- +- name: Generate secret for efs + template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-secret.yaml + vars: + name: efs + obj_name: "provisioners-efs" + labels: + provisioners-infra: support + secrets: + - {key: aws-access-key-id, value: "{{openshift_provisioners_efs_aws_access_key_id}}"} + - {key: aws-secret-access-key, value: "{{openshift_provisioners_efs_aws_secret_access_key}}"} + check_mode: no + changed_when: no + when: openshift_provisioners_efs | bool diff --git a/roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml b/roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml new file mode 100644 index 000000000..4fe0583ee --- /dev/null +++ b/roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml @@ -0,0 +1,12 @@ +--- +- name: Generating serviceaccounts + template: src=serviceaccount.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-sa.yaml + vars: + obj_name: provisioners-{{item}} + labels: + provisioners-infra: support + with_items: + # TODO + - efs + check_mode: no + changed_when: no diff --git a/roles/openshift_provisioners/tasks/install_efs.yaml b/roles/openshift_provisioners/tasks/install_efs.yaml new file mode 100644 index 000000000..57279c665 --- /dev/null +++ b/roles/openshift_provisioners/tasks/install_efs.yaml @@ -0,0 +1,70 @@ +--- +- name: Check efs current replica count + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc provisioners-efs + -o jsonpath='{.spec.replicas}' -n {{openshift_provisioners_project}} + register: efs_replica_count + when: not ansible_check_mode + ignore_errors: yes + changed_when: no + +- name: Generate efs PersistentVolumeClaim + template: src=pvc.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-pvc.yaml + vars: + obj_name: "provisioners-efs" + size: "1Mi" + access_modes: + - "ReadWriteMany" + pv_selector: + provisioners-efs: efs + check_mode: no + changed_when: no + +- name: Generate efs PersistentVolume + template: src=pv.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-pv.yaml + vars: + obj_name: "provisioners-efs" + size: "1Mi" + access_modes: + - "ReadWriteMany" + labels: + provisioners-efs: efs + volume_plugin: "nfs" + volume_source: + - {key: "server", value: "{{openshift_provisioners_efs_fsid}}.efs.{{openshift_provisioners_efs_region}}.amazonaws.com"} + - {key: "path", value: "{{openshift_provisioners_efs_path}}"} + claim_name: "provisioners-efs" + check_mode: no + changed_when: no + +- name: Generate efs DeploymentConfig + template: + src: efs.j2 + dest: "{{ mktemp.stdout }}/templates/{{deploy_name}}-dc.yaml" + vars: + name: efs + deploy_name: "provisioners-efs" + deploy_serviceAccount: "provisioners-efs" + replica_count: "{{efs_replica_count.stdout | default(0)}}" + node_selector: "{{openshift_provisioners_efs_nodeselector | default('') }}" + claim_name: "provisioners-efs" + check_mode: no + changed_when: false + +# anyuid in order to run as root & chgrp shares with allocated gids +- name: "Check efs anyuid permissions" + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + get scc/anyuid -o jsonpath='{.users}' + register: efs_anyuid + check_mode: no + changed_when: no + +- name: "Set anyuid permissions for efs" + command: > + {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy + add-scc-to-user anyuid system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs + register: efs_output + failed_when: "efs_output.rc == 1 and 'exists' not in efs_output.stderr" + check_mode: no + when: efs_anyuid.stdout.find("system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs") == -1 diff --git a/roles/openshift_provisioners/tasks/install_provisioners.yaml b/roles/openshift_provisioners/tasks/install_provisioners.yaml new file mode 100644 index 000000000..324fdcc82 --- /dev/null +++ b/roles/openshift_provisioners/tasks/install_provisioners.yaml @@ -0,0 +1,55 @@ +--- +- name: Check that EFS File System ID is set + fail: msg='the openshift_provisioners_efs_fsid variable is required' + when: (openshift_provisioners_efs | bool) and openshift_provisioners_efs_fsid is not defined + +- name: Check that EFS region is set + fail: msg='the openshift_provisioners_efs_region variable is required' + when: (openshift_provisioners_efs | bool) and openshift_provisioners_efs_region is not defined + +- name: Check that EFS AWS access key id is set + fail: msg='the openshift_provisioners_efs_aws_access_key_id variable is required' + when: (openshift_provisioners_efs | bool) and openshift_provisioners_efs_aws_access_key_id is not defined + +- name: Check that EFS AWS secret access key is set + fail: msg='the openshift_provisioners_efs_aws_secret_access_key variable is required' + when: (openshift_provisioners_efs | bool) and openshift_provisioners_efs_aws_secret_access_key is not defined + +- name: Install support + include: install_support.yaml + +- name: Install EFS + include: install_efs.yaml + when: openshift_provisioners_efs | bool + +- find: paths={{ mktemp.stdout }}/templates patterns=*.yaml + register: object_def_files + changed_when: no + +- slurp: src={{item}} + register: object_defs + with_items: "{{object_def_files.files | map(attribute='path') | list | sort}}" + changed_when: no + +- name: Create objects + include: oc_apply.yaml + vars: + - kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig" + - namespace: "{{ openshift_provisioners_project }}" + - file_name: "{{ file.source }}" + - file_content: "{{ file.content | b64decode | from_yaml }}" + with_items: "{{ object_defs.results }}" + loop_control: + loop_var: file + when: not ansible_check_mode + +- name: Printing out objects to create + debug: msg={{file.content | b64decode }} + with_items: "{{ object_defs.results }}" + loop_control: + loop_var: file + when: ansible_check_mode + +- name: Scaling up cluster + include: start_cluster.yaml + when: start_cluster | default(true) | bool diff --git a/roles/openshift_provisioners/tasks/install_support.yaml b/roles/openshift_provisioners/tasks/install_support.yaml new file mode 100644 index 000000000..ba472f1c9 --- /dev/null +++ b/roles/openshift_provisioners/tasks/install_support.yaml @@ -0,0 +1,24 @@ +--- +- name: Check for provisioners project already exists + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project {{openshift_provisioners_project}} --no-headers + register: provisioners_project_result + ignore_errors: yes + when: not ansible_check_mode + changed_when: no + +- name: Create provisioners project + command: > + {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_provisioners_project}} + when: not ansible_check_mode and "not found" in provisioners_project_result.stderr + +- name: Create temp directory for all our templates + file: path={{mktemp.stdout}}/templates state=directory mode=0755 + changed_when: False + check_mode: no + +- include: generate_secrets.yaml + +- include: generate_clusterrolebindings.yaml + +- include: generate_serviceaccounts.yaml diff --git a/roles/openshift_provisioners/tasks/main.yaml b/roles/openshift_provisioners/tasks/main.yaml new file mode 100644 index 000000000..a50c78c97 --- /dev/null +++ b/roles/openshift_provisioners/tasks/main.yaml @@ -0,0 +1,27 @@ +--- +- name: Create temp directory for doing work in + command: mktemp -td openshift-provisioners-ansible-XXXXXX + register: mktemp + changed_when: False + check_mode: no + +- name: Copy the admin client config(s) + command: > + cp {{ openshift.common.config_base}}/master/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig + changed_when: False + check_mode: no + tags: provisioners_init + +- include: "{{ role_path }}/tasks/install_provisioners.yaml" + when: openshift_provisioners_install_provisioners | default(false) | bool + +- include: "{{ role_path }}/tasks/uninstall_provisioners.yaml" + when: not openshift_provisioners_install_provisioners | default(false) | bool + +- name: Delete temp directory + file: + name: "{{ mktemp.stdout }}" + state: absent + tags: provisioners_cleanup + changed_when: False + check_mode: no diff --git a/roles/openshift_provisioners/tasks/oc_apply.yaml b/roles/openshift_provisioners/tasks/oc_apply.yaml new file mode 100644 index 000000000..49d03f203 --- /dev/null +++ b/roles/openshift_provisioners/tasks/oc_apply.yaml @@ -0,0 +1,51 @@ +--- +- name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}} + command: > + {{ openshift.common.client_binary }} --config={{ kubeconfig }} + get {{file_content.kind}} {{file_content.metadata.name}} + -o jsonpath='{.metadata.resourceVersion}' + -n {{namespace}} + register: generation_init + failed_when: "'not found' not in generation_init.stderr and generation_init.stdout == ''" + changed_when: no + +- name: Applying {{file_name}} + command: > + {{ openshift.common.client_binary }} --config={{ kubeconfig }} + apply -f {{ file_name }} + -n {{ namespace }} + register: generation_apply + failed_when: "'error' in generation_apply.stderr" + changed_when: no + +- name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}} + command: > + {{ openshift.common.client_binary }} --config={{ kubeconfig }} + get {{file_content.kind}} {{file_content.metadata.name}} + -o jsonpath='{.metadata.resourceVersion}' + -n {{namespace}} + register: generation_changed + failed_when: "'not found' not in generation_changed.stderr and generation_changed.stdout == ''" + changed_when: generation_changed.stdout | default (0) | int > generation_init.stdout | default(0) | int + when: + - "'field is immutable' not in generation_apply.stderr" + +- name: Removing previous {{file_name}} + command: > + {{ openshift.common.client_binary }} --config={{ kubeconfig }} + delete -f {{ file_name }} + -n {{ namespace }} + register: generation_delete + failed_when: "'error' in generation_delete.stderr" + changed_when: generation_delete.rc == 0 + when: generation_apply.rc != 0 + +- name: Recreating {{file_name}} + command: > + {{ openshift.common.client_binary }} --config={{ kubeconfig }} + apply -f {{ file_name }} + -n {{ namespace }} + register: generation_apply + failed_when: "'error' in generation_apply.stderr" + changed_when: generation_apply.rc == 0 + when: generation_apply.rc != 0 diff --git a/roles/openshift_provisioners/tasks/start_cluster.yaml b/roles/openshift_provisioners/tasks/start_cluster.yaml new file mode 100644 index 000000000..ee7f545a9 --- /dev/null +++ b/roles/openshift_provisioners/tasks/start_cluster.yaml @@ -0,0 +1,20 @@ +--- +- name: Retrieve efs + oc_obj: + state: list + kind: dc + selector: "provisioners-infra=efs" + namespace: "{{openshift_provisioners_project}}" + register: efs_dc + when: openshift_provisioners_efs | bool + +- name: start efs + oc_scale: + kind: dc + name: "{{ object }}" + namespace: "{{openshift_provisioners_project}}" + replicas: 1 + with_items: "{{ efs_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}" + loop_control: + loop_var: object + when: openshift_provisioners_efs | bool diff --git a/roles/openshift_provisioners/tasks/stop_cluster.yaml b/roles/openshift_provisioners/tasks/stop_cluster.yaml new file mode 100644 index 000000000..30b6b12c8 --- /dev/null +++ b/roles/openshift_provisioners/tasks/stop_cluster.yaml @@ -0,0 +1,20 @@ +--- +- name: Retrieve efs + oc_obj: + state: list + kind: dc + selector: "provisioners-infra=efs" + namespace: "{{openshift_provisioners_project}}" + register: efs_dc + when: openshift_provisioners_efs | bool + +- name: stop efs + oc_scale: + kind: dc + name: "{{ object }}" + namespace: "{{openshift_provisioners_project}}" + replicas: 0 + with_items: "{{ efs_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}" + loop_control: + loop_var: object + when: openshift_provisioners_efs | bool diff --git a/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml b/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml new file mode 100644 index 000000000..0be4bc7d2 --- /dev/null +++ b/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml @@ -0,0 +1,43 @@ +--- +- name: stop provisioners + include: stop_cluster.yaml + +# delete the deployment objects that we had created +- name: delete provisioner api objects + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + delete {{ item }} --selector provisioners-infra -n {{ openshift_provisioners_project }} --ignore-not-found=true + with_items: + - dc + register: delete_result + changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0 + +# delete our old secrets +- name: delete provisioner secrets + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + delete secret {{ item }} -n {{ openshift_provisioners_project }} --ignore-not-found=true + with_items: + - provisioners-efs + ignore_errors: yes + register: delete_result + changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0 + +# delete cluster role bindings +- name: delete cluster role bindings + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + delete clusterrolebindings {{ item }} -n {{ openshift_provisioners_project }} --ignore-not-found=true + with_items: + - run-provisioners-efs + register: delete_result + changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0 + +# delete our service accounts +- name: delete service accounts + oc_serviceaccount: + name: "{{ item }}" + namespace: "{{ openshift_provisioners_project }}" + state: absent + with_items: + - provisioners-efs diff --git a/roles/openshift_provisioners/templates/clusterrolebinding.j2 b/roles/openshift_provisioners/templates/clusterrolebinding.j2 new file mode 100644 index 000000000..994afa32d --- /dev/null +++ b/roles/openshift_provisioners/templates/clusterrolebinding.j2 @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: ClusterRoleBinding +metadata: + name: {{obj_name}} +{% if labels is defined%} + labels: +{% for key, value in labels.iteritems() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} +{% if crb_usernames is defined %} +userNames: +{% for name in crb_usernames %} + - {{ name }} +{% endfor %} +{% endif %} +{% if crb_groupnames is defined %} +groupNames: +{% for name in crb_groupnames %} + - {{ name }} +{% endfor %} +{% endif %} +subjects: +{% for sub in subjects %} + - kind: {{ sub.kind }} + name: {{ sub.name }} + namespace: {{sub.namespace}} +{% endfor %} +roleRef: + name: {{cr_name}} diff --git a/roles/openshift_provisioners/templates/efs.j2 b/roles/openshift_provisioners/templates/efs.j2 new file mode 100644 index 000000000..81b9ccca5 --- /dev/null +++ b/roles/openshift_provisioners/templates/efs.j2 @@ -0,0 +1,58 @@ +kind: DeploymentConfig +apiVersion: v1 +metadata: + name: "{{deploy_name}}" + labels: + provisioners-infra: "{{name}}" + name: "{{name}}" +spec: + replicas: {{replica_count}} + selector: + provisioners-infra: "{{name}}" + name: "{{name}}" + strategy: + type: Recreate + template: + metadata: + name: "{{deploy_name}}" + labels: + provisioners-infra: "{{name}}" + name: "{{name}}" + spec: + serviceAccountName: "{{deploy_serviceAccount}}" +{% if node_selector is iterable and node_selector | length > 0 %} + nodeSelector: +{% for key, value in node_selector.iteritems() %} + {{key}}: "{{value}}" +{% endfor %} +{% endif %} + containers: + - name: efs-provisioner + image: {{openshift_provisioners_image_prefix}}efs-provisioner:{{openshift_provisioners_image_version}} + env: + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: provisioners-efs + key: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: provisioners-efs + key: aws-secret-access-key + - name: FILE_SYSTEM_ID + value: "{{openshift_provisioners_efs_fsid}}" + - name: AWS_REGION + value: "{{openshift_provisioners_efs_region}}" + - name: PROVISIONER_NAME + value: "{{openshift_provisioners_efs_name}}" + volumeMounts: + - name: pv-volume + mountPath: /persistentvolumes + securityContext: + supplementalGroups: + - {{openshift_provisioners_efs_supplementalgroup}} + volumes: + - name: pv-volume + persistentVolumeClaim: + claimName: "{{claim_name}}" diff --git a/roles/openshift_provisioners/templates/pv.j2 b/roles/openshift_provisioners/templates/pv.j2 new file mode 100644 index 000000000..f4128f9f0 --- /dev/null +++ b/roles/openshift_provisioners/templates/pv.j2 @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: {{obj_name}} +{% if annotations is defined %} + annotations: +{% for key,value in annotations.iteritems() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} +{% if labels is defined%} + labels: +{% for key, value in labels.iteritems() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} +spec: + capacity: + storage: {{size}} + accessModes: +{% for mode in access_modes %} + - {{mode}} +{% endfor %} + {{volume_plugin}}: +{% for s in volume_source %} + {{s.key}}: {{s.value}} +{% endfor %} +{% if claim_name is defined%} + claimRef: + name: {{claim_name}} + namespace: {{openshift_provisioners_project}} +{% endif %} diff --git a/roles/openshift_provisioners/templates/pvc.j2 b/roles/openshift_provisioners/templates/pvc.j2 new file mode 100644 index 000000000..83d503056 --- /dev/null +++ b/roles/openshift_provisioners/templates/pvc.j2 @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{obj_name}} +{% if annotations is defined %} + annotations: +{% for key,value in annotations.iteritems() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} +spec: +{% if pv_selector is defined and pv_selector is mapping %} + selector: + matchLabels: +{% for key,value in pv_selector.iteritems() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} + accessModes: +{% for mode in access_modes %} + - {{mode}} +{% endfor %} + resources: + requests: + storage: {{size}} + diff --git a/roles/openshift_provisioners/templates/secret.j2 b/roles/openshift_provisioners/templates/secret.j2 new file mode 100644 index 000000000..78824095b --- /dev/null +++ b/roles/openshift_provisioners/templates/secret.j2 @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{obj_name}} +{% if labels is defined%} + labels: +{% for key, value in labels.iteritems() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} +type: Opaque +data: +{% for s in secrets %} + "{{s.key}}" : "{{s.value | b64encode}}" +{% endfor %} diff --git a/roles/openshift_provisioners/templates/serviceaccount.j2 b/roles/openshift_provisioners/templates/serviceaccount.j2 new file mode 100644 index 000000000..b22acc594 --- /dev/null +++ b/roles/openshift_provisioners/templates/serviceaccount.j2 @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{obj_name}} +{% if labels is defined%} + labels: +{% for key, value in labels.iteritems() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} +{% if secrets is defined %} +secrets: +{% for name in secrets %} +- name: {{ name }} +{% endfor %} +{% endif %} diff --git a/roles/openshift_repos/README.md b/roles/openshift_repos/README.md index 95b155b29..abd1997dd 100644 --- a/roles/openshift_repos/README.md +++ b/roles/openshift_repos/README.md @@ -12,10 +12,10 @@ rhel-7-server-extra-rpms, and rhel-7-server-ose-3.0-rpms repos. Role Variables -------------- -| Name | Default value | | -|-------------------------------|---------------|----------------------------------------------| -| openshift_deployment_type | None | Possible values enterprise, origin, online | -| openshift_additional_repos | {} | TODO | +| Name | Default value | | +|-------------------------------|---------------|------------------------------------| +| openshift_deployment_type | None | Possible values enterprise, origin | +| openshift_additional_repos | {} | TODO | Dependencies ------------ diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml index fc562c42c..f15dc16d1 100644 --- a/roles/openshift_sanitize_inventory/tasks/main.yml +++ b/roles/openshift_sanitize_inventory/tasks/main.yml @@ -1,6 +1,18 @@ --- +- name: Abort when conflicting deployment type variables are set + when: + - deployment_type is defined + - openshift_deployment_type is defined + - openshift_deployment_type != deployment_type + fail: + msg: |- + openshift_deployment_type is set to "{{ openshift_deployment_type }}". + deployment_type is set to "{{ deployment_type }}". + To avoid unexpected results, this conflict is not allowed. + deployment_type is deprecated in favor of openshift_deployment_type. + Please specify only openshift_deployment_type, or make both the same. + - name: Standardize on latest variable names - no_log: True # keep task description legible set_fact: # goal is to deprecate deployment_type in favor of openshift_deployment_type. # both will be accepted for now, but code should refer to the new name. @@ -8,8 +20,15 @@ deployment_type: "{{ openshift_deployment_type | default(deployment_type) | default | string }}" openshift_deployment_type: "{{ openshift_deployment_type | default(deployment_type) | default | string }}" +- name: Abort when deployment type is invalid + # this variable is required; complain early and clearly if it is invalid. + when: openshift_deployment_type not in known_openshift_deployment_types + fail: + msg: |- + Please set openshift_deployment_type to one of: + {{ known_openshift_deployment_types | join(', ') }} + - name: Normalize openshift_release - no_log: True # keep task description legible set_fact: # Normalize release if provided, e.g. "v3.5" => "3.5" # Currently this is not required to be defined for all installs, and the @@ -19,10 +38,11 @@ openshift_release: "{{ openshift_release | string | regex_replace('^v', '') }}" when: openshift_release is defined -- name: Ensure a valid deployment type has been given. - # this variable is required; complain early and clearly if it is invalid. - when: openshift_deployment_type not in known_openshift_deployment_types +- name: Abort when openshift_release is invalid + when: + - openshift_release is defined + - not openshift_release | match('\d+(\.\d+){1,3}$') fail: msg: |- - Please set openshift_deployment_type to one of: - {{ known_openshift_deployment_types | join(', ') }} + openshift_release is "{{ openshift_release }}" which is not a valid version string. + Please set it to a version string like "3.4". diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md new file mode 100644 index 000000000..cf0fb94c9 --- /dev/null +++ b/roles/openshift_storage_glusterfs/README.md @@ -0,0 +1,60 @@ +OpenShift GlusterFS Cluster +=========================== + +OpenShift GlusterFS Cluster Installation + +Requirements +------------ + +* Ansible 2.2 + +Role Variables +-------------- + +From this role: + +| Name | Default value | | +|--------------------------------------------------|-------------------------|-----------------------------------------| +| openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready +| openshift_storage_glusterfs_namespace | 'default' | Namespace in which to create GlusterFS resources +| openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized +| openshift_storage_glusterfs_nodeselector | 'storagenode=glusterfs' | Selector to determine which nodes will host GlusterFS pods in native mode +| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7' +| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods +| openshift_storage_glusterfs_wipe | False | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.** +| openshift_storage_glusterfs_heketi_is_native | True | heketi should be containerized +| openshift_storage_glusterfs_heketi_image | 'heketi/heketi' | Container image to use for heketi pods, enterprise default is 'rhgs3/rhgs-volmanager-rhel7' +| openshift_storage_glusterfs_heketi_version | 'latest' | Container image version to use for heketi pods +| openshift_storage_glusterfs_heketi_admin_key | '' | String to use as secret key for performing heketi commands as admin +| openshift_storage_glusterfs_heketi_user_key | '' | String to use as secret key for performing heketi commands as user that can only view or modify volumes +| openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi +| openshift_storage_glusterfs_heketi_url | Undefined | URL for the heketi REST API, dynamically determined in native mode +| openshift_storage_glusterfs_heketi_wipe | False | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe` + +Dependencies +------------ + +* os_firewall +* openshift_hosted_facts +* openshift_repos +* lib_openshift + +Example Playbook +---------------- + +``` +- name: Configure GlusterFS hosts + hosts: oo_first_master + roles: + - role: openshift_storage_glusterfs +``` + +License +------- + +Apache License, Version 2.0 + +Author Information +------------------ + +Jose A. Rivera (jarrpa@redhat.com) diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml new file mode 100644 index 000000000..ade850747 --- /dev/null +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -0,0 +1,17 @@ +--- +openshift_storage_glusterfs_timeout: 300 +openshift_storage_glusterfs_namespace: 'default' +openshift_storage_glusterfs_is_native: True +openshift_storage_glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector_label | default('storagenode=glusterfs') | map_from_pairs }}" +openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}" +openshift_storage_glusterfs_version: 'latest' +openshift_storage_glusterfs_wipe: False +openshift_storage_glusterfs_heketi_is_native: True +openshift_storage_glusterfs_heketi_is_missing: True +openshift_storage_glusterfs_heketi_deploy_is_missing: True +openshift_storage_glusterfs_heketi_image: "{{ 'rhgs3/rhgs-volmanager-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'heketi/heketi' | quote }}" +openshift_storage_glusterfs_heketi_version: 'latest' +openshift_storage_glusterfs_heketi_admin_key: '' +openshift_storage_glusterfs_heketi_user_key: '' +openshift_storage_glusterfs_heketi_topology_load: True +openshift_storage_glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_wipe }}" diff --git a/roles/openshift_storage_glusterfs/files/v1.6/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v1.6/deploy-heketi-template.yml new file mode 100644 index 000000000..c9945be13 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v1.6/deploy-heketi-template.yml @@ -0,0 +1,115 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: deploy-heketi + labels: + glusterfs: heketi-template + deploy-heketi: support + annotations: + description: Bootstrap Heketi installation + tags: glusterfs,heketi,installation +labels: + template: deploy-heketi +objects: +- kind: Service + apiVersion: v1 + metadata: + name: deploy-heketi + labels: + glusterfs: deploy-heketi-service + deploy-heketi: support + annotations: + description: Exposes Heketi service + spec: + ports: + - name: deploy-heketi + port: 8080 + targetPort: 8080 + selector: + name: deploy-heketi +- kind: Route + apiVersion: v1 + metadata: + name: deploy-heketi + labels: + glusterfs: deploy-heketi-route + deploy-heketi: support + spec: + to: + kind: Service + name: deploy-heketi +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: deploy-heketi + labels: + glusterfs: deploy-heketi-dc + deploy-heketi: support + annotations: + description: Defines how to deploy Heketi + spec: + replicas: 1 + selector: + name: deploy-heketi + triggers: + - type: ConfigChange + strategy: + type: Recreate + template: + metadata: + name: deploy-heketi + labels: + name: deploy-heketi + glusterfs: deploy-heketi-pod + deploy-heketi: support + spec: + serviceAccountName: heketi-service-account + containers: + - name: deploy-heketi + image: ${IMAGE_NAME}:${IMAGE_VERSION} + env: + - name: HEKETI_USER_KEY + value: ${HEKETI_USER_KEY} + - name: HEKETI_ADMIN_KEY + value: ${HEKETI_ADMIN_KEY} + - name: HEKETI_EXECUTOR + value: kubernetes + - name: HEKETI_FSTAB + value: /var/lib/heketi/fstab + - name: HEKETI_SNAPSHOT_LIMIT + value: '14' + - name: HEKETI_KUBE_GLUSTER_DAEMONSET + value: '1' + ports: + - containerPort: 8080 + volumeMounts: + - name: db + mountPath: /var/lib/heketi + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 3 + httpGet: + path: /hello + port: 8080 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 30 + httpGet: + path: /hello + port: 8080 + volumes: + - name: db +parameters: +- name: HEKETI_USER_KEY + displayName: Heketi User Secret + description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY + displayName: Heketi Administrator Secret + description: Set secret for administration of the Heketi service as user _admin_ +- name: IMAGE_NAME + displayName: GlusterFS container name + required: True +- name: IMAGE_VERSION + displayName: GlusterFS container versiona + required: True diff --git a/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-registry-service.yml b/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-registry-service.yml new file mode 100644 index 000000000..3f8d8f507 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-registry-service.yml @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: glusterfs-registry-endpoints +spec: + ports: + - port: 1 +status: + loadBalancer: {} diff --git a/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-template.yml new file mode 100644 index 000000000..c66705752 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-template.yml @@ -0,0 +1,128 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: glusterfs + labels: + glusterfs: template + annotations: + description: GlusterFS DaemonSet template + tags: glusterfs +objects: +- kind: DaemonSet + apiVersion: extensions/v1beta1 + metadata: + name: glusterfs + labels: + glusterfs: daemonset + annotations: + description: GlusterFS DaemonSet + tags: glusterfs + spec: + selector: + matchLabels: + glusterfs-node: pod + template: + metadata: + name: glusterfs + labels: + glusterfs-node: pod + spec: + nodeSelector: + storagenode: glusterfs + hostNetwork: true + containers: + - name: glusterfs + image: ${IMAGE_NAME}:${IMAGE_VERSION} + imagePullPolicy: IfNotPresent + volumeMounts: + - name: glusterfs-heketi + mountPath: "/var/lib/heketi" + - name: glusterfs-run + mountPath: "/run" + - name: glusterfs-lvm + mountPath: "/run/lvm" + - name: glusterfs-etc + mountPath: "/etc/glusterfs" + - name: glusterfs-logs + mountPath: "/var/log/glusterfs" + - name: glusterfs-config + mountPath: "/var/lib/glusterd" + - name: glusterfs-dev + mountPath: "/dev" + - name: glusterfs-misc + mountPath: "/var/lib/misc/glusterfsd" + - name: glusterfs-cgroup + mountPath: "/sys/fs/cgroup" + readOnly: true + - name: glusterfs-ssl + mountPath: "/etc/ssl" + readOnly: true + securityContext: + capabilities: {} + privileged: true + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 100 + exec: + command: + - "/bin/bash" + - "-c" + - systemctl status glusterd.service + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 100 + exec: + command: + - "/bin/bash" + - "-c" + - systemctl status glusterd.service + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + resources: {} + terminationMessagePath: "/dev/termination-log" + volumes: + - name: glusterfs-heketi + hostPath: + path: "/var/lib/heketi" + - name: glusterfs-run + emptyDir: {} + - name: glusterfs-lvm + hostPath: + path: "/run/lvm" + - name: glusterfs-etc + hostPath: + path: "/etc/glusterfs" + - name: glusterfs-logs + hostPath: + path: "/var/log/glusterfs" + - name: glusterfs-config + hostPath: + path: "/var/lib/glusterd" + - name: glusterfs-dev + hostPath: + path: "/dev" + - name: glusterfs-misc + hostPath: + path: "/var/lib/misc/glusterfsd" + - name: glusterfs-cgroup + hostPath: + path: "/sys/fs/cgroup" + - name: glusterfs-ssl + hostPath: + path: "/etc/ssl" + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} +parameters: +- name: IMAGE_NAME + displayName: GlusterFS container name + required: True +- name: IMAGE_VERSION + displayName: GlusterFS container versiona + required: True diff --git a/roles/openshift_storage_glusterfs/files/v1.6/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v1.6/heketi-template.yml new file mode 100644 index 000000000..df045c170 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v1.6/heketi-template.yml @@ -0,0 +1,113 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: heketi + labels: + glusterfs: heketi-template + annotations: + description: Heketi service deployment template + tags: glusterfs,heketi +labels: + template: heketi +objects: +- kind: Service + apiVersion: v1 + metadata: + name: heketi + labels: + glusterfs: heketi-service + annotations: + description: Exposes Heketi service + spec: + ports: + - name: heketi + port: 8080 + targetPort: 8080 + selector: + glusterfs: heketi-pod +- kind: Route + apiVersion: v1 + metadata: + name: heketi + labels: + glusterfs: heketi-route + spec: + to: + kind: Service + name: heketi +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: heketi + labels: + glusterfs: heketi-dc + annotations: + description: Defines how to deploy Heketi + spec: + replicas: 1 + selector: + glusterfs: heketi-pod + triggers: + - type: ConfigChange + strategy: + type: Recreate + template: + metadata: + name: heketi + labels: + glusterfs: heketi-pod + spec: + serviceAccountName: heketi-service-account + containers: + - name: heketi + image: ${IMAGE_NAME}:${IMAGE_VERSION} + imagePullPolicy: IfNotPresent + env: + - name: HEKETI_USER_KEY + value: ${HEKETI_USER_KEY} + - name: HEKETI_ADMIN_KEY + value: ${HEKETI_ADMIN_KEY} + - name: HEKETI_EXECUTOR + value: kubernetes + - name: HEKETI_FSTAB + value: /var/lib/heketi/fstab + - name: HEKETI_SNAPSHOT_LIMIT + value: '14' + - name: HEKETI_KUBE_GLUSTER_DAEMONSET + value: '1' + ports: + - containerPort: 8080 + volumeMounts: + - name: db + mountPath: /var/lib/heketi + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 3 + httpGet: + path: /hello + port: 8080 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 30 + httpGet: + path: /hello + port: 8080 + volumes: + - name: db + glusterfs: + endpoints: heketi-storage-endpoints + path: heketidbstorage +parameters: +- name: HEKETI_USER_KEY + displayName: Heketi User Secret + description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY + displayName: Heketi Administrator Secret + description: Set secret for administration of the Heketi service as user _admin_ +- name: IMAGE_NAME + displayName: GlusterFS container name + required: True +- name: IMAGE_VERSION + displayName: GlusterFS container versiona + required: True diff --git a/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py b/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py new file mode 100644 index 000000000..88801e487 --- /dev/null +++ b/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py @@ -0,0 +1,23 @@ +''' + Openshift Storage GlusterFS class that provides useful filters used in GlusterFS +''' + + +def map_from_pairs(source, delim="="): + ''' Returns a dict given the source and delim delimited ''' + if source == '': + return dict() + + return dict(source.split(delim) for item in source.split(",")) + + +# pylint: disable=too-few-public-methods +class FilterModule(object): + ''' OpenShift Storage GlusterFS Filters ''' + + # pylint: disable=no-self-use, too-few-public-methods + def filters(self): + ''' Returns the names of the filters provided by this class ''' + return { + 'map_from_pairs': map_from_pairs + } diff --git a/roles/openshift_storage_glusterfs/meta/main.yml b/roles/openshift_storage_glusterfs/meta/main.yml new file mode 100644 index 000000000..aab9851f9 --- /dev/null +++ b/roles/openshift_storage_glusterfs/meta/main.yml @@ -0,0 +1,15 @@ +--- +galaxy_info: + author: Jose A. Rivera + description: OpenShift GlusterFS Cluster + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 2.2 + platforms: + - name: EL + versions: + - 7 +dependencies: +- role: openshift_hosted_facts +- role: openshift_repos +- role: lib_openshift diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml new file mode 100644 index 000000000..26ca5eebf --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml @@ -0,0 +1,107 @@ +--- +- assert: + that: "openshift_storage_glusterfs_nodeselector.keys() | count == 1" + msg: Only one GlusterFS nodeselector key pair should be provided + +- assert: + that: "groups.oo_glusterfs_to_config | count >= 3" + msg: There must be at least three GlusterFS nodes specified + +- name: Delete pre-existing GlusterFS resources + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: "template,daemonset" + name: glusterfs + state: absent + when: openshift_storage_glusterfs_wipe + +- name: Unlabel any existing GlusterFS nodes + oc_label: + name: "{{ item }}" + kind: node + state: absent + labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}" + with_items: "{{ groups.all }}" + when: openshift_storage_glusterfs_wipe + +- name: Delete pre-existing GlusterFS config + file: + path: /var/lib/glusterd + state: absent + delegate_to: "{{ item }}" + with_items: "{{ groups.oo_glusterfs_to_config }}" + when: openshift_storage_glusterfs_wipe + +- name: Get GlusterFS storage devices state + command: "pvdisplay -C --noheadings -o pv_name,vg_name {% for device in hostvars[item].glusterfs_devices %}{{ device }} {% endfor %}" + register: devices_info + delegate_to: "{{ item }}" + with_items: "{{ groups.oo_glusterfs_to_config }}" + failed_when: False + when: openshift_storage_glusterfs_wipe + + # Runs "vgremove -fy <vg>; pvremove -fy <pv>" for every device found to be a physical volume. +- name: Clear GlusterFS storage device contents + shell: "{% for line in item.stdout_lines %}{% set fields = line.split() %}{% if fields | count > 1 %}vgremove -fy {{ fields[1] }}; {% endif %}pvremove -fy {{ fields[0] }}; {% endfor %}" + delegate_to: "{{ item.item }}" + with_items: "{{ devices_info.results }}" + when: + - openshift_storage_glusterfs_wipe + - item.stdout_lines | count > 0 + +- name: Add service accounts to privileged SCC + oc_adm_policy_user: + user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:{{ item }}" + resource_kind: scc + resource_name: privileged + state: present + with_items: + - 'default' + - 'router' + +- name: Label GlusterFS nodes + oc_label: + name: "{{ glusterfs_host }}" + kind: node + state: add + labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}" + with_items: "{{ groups.oo_glusterfs_to_config }}" + loop_control: + loop_var: glusterfs_host + +- name: Copy GlusterFS DaemonSet template + copy: + src: "{{ openshift.common.examples_content_version }}/glusterfs-template.yml" + dest: "{{ mktemp.stdout }}/glusterfs-template.yml" + +- name: Create GlusterFS template + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: template + name: glusterfs + state: present + files: + - "{{ mktemp.stdout }}/glusterfs-template.yml" + +- name: Deploy GlusterFS pods + oc_process: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + template_name: "glusterfs" + create: True + params: + IMAGE_NAME: "{{ openshift_storage_glusterfs_image }}" + IMAGE_VERSION: "{{ openshift_storage_glusterfs_version }}" + +- name: Wait for GlusterFS pods + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: pod + state: list + selector: "glusterfs-node=pod" + register: glusterfs_pods + until: + - "glusterfs_pods.results.results[0]['items'] | count > 0" + # There must be as many pods with 'Ready' staus True as there are nodes expecting those pods + - "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == groups.oo_glusterfs_to_config | count" + delay: 10 + retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml new file mode 100644 index 000000000..9f092d5d5 --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml @@ -0,0 +1,48 @@ +--- +- name: Delete pre-existing GlusterFS registry resources + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: "{{ item.kind }}" + name: "{{ item.name | default(omit) }}" + selector: "{{ item.selector | default(omit) }}" + state: absent + with_items: + - kind: "svc,ep" + name: "glusterfs-registry-endpoints" + failed_when: False + +- name: Generate GlusterFS registry endpoints + template: + src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-endpoints.yml.j2" + dest: "{{ mktemp.stdout }}/glusterfs-registry-endpoints.yml" + +- name: Copy GlusterFS registry service + copy: + src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-service.yml" + dest: "{{ mktemp.stdout }}/glusterfs-registry-service.yml" + +- name: Create GlusterFS registry endpoints + oc_obj: + namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" + state: present + kind: endpoints + name: glusterfs-registry-endpoints + files: + - "{{ mktemp.stdout }}/glusterfs-registry-endpoints.yml" + +- name: Create GlusterFS registry service + oc_obj: + namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" + state: present + kind: service + name: glusterfs-registry-endpoints + files: + - "{{ mktemp.stdout }}/glusterfs-registry-service.yml" + +- name: Check if GlusterFS registry volume exists + command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume list" + register: registry_volume + +- name: Create GlusterFS registry volume + command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}" + when: "'{{ openshift.hosted.registry.storage.glusterfs.path }}' not in registry_volume.stdout" diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml new file mode 100644 index 000000000..76ae1db75 --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml @@ -0,0 +1,41 @@ +--- +- name: Copy initial heketi resource files + copy: + src: "{{ openshift.common.examples_content_version }}/{{ item }}" + dest: "{{ mktemp.stdout }}/{{ item }}" + with_items: + - "deploy-heketi-template.yml" + +- name: Create deploy-heketi resources + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: template + name: deploy-heketi + state: present + files: + - "{{ mktemp.stdout }}/deploy-heketi-template.yml" + +- name: Deploy deploy-heketi pod + oc_process: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + template_name: "deploy-heketi" + create: True + params: + IMAGE_NAME: "{{ openshift_storage_glusterfs_heketi_image }}" + IMAGE_VERSION: "{{ openshift_storage_glusterfs_heketi_version }}" + HEKETI_USER_KEY: "{{ openshift_storage_glusterfs_heketi_user_key }}" + HEKETI_ADMIN_KEY: "{{ openshift_storage_glusterfs_heketi_admin_key }}" + +- name: Wait for deploy-heketi pod + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: pod + state: list + selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support" + register: heketi_pod + until: + - "heketi_pod.results.results[0]['items'] | count > 0" + # Pod's 'Ready' status must be True + - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" + delay: 10 + retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml new file mode 100644 index 000000000..84b85e95d --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml @@ -0,0 +1,109 @@ +--- +- name: Create heketi DB volume + command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' setup-openshift-heketi-storage --listfile {{ mktemp.stdout }}/heketi-storage.json" + register: setup_storage + failed_when: False + +# This is used in the subsequent task +- name: Copy the admin client config + command: > + cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig + changed_when: False + check_mode: no + +# Need `command` here because heketi-storage.json contains multiple objects. +- name: Copy heketi DB to GlusterFS volume + command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ openshift_storage_glusterfs_namespace }}" + when: "setup_storage.rc == 0" + +- name: Wait for copy job to finish + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: job + state: list + name: "heketi-storage-copy-job" + register: heketi_job + until: + - "'results' in heketi_job.results and heketi_job.results.results | count > 0" + # Pod's 'Complete' status must be True + - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1" + delay: 10 + retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + failed_when: + - "'results' in heketi_job.results" + - "heketi_job.results.results | count > 0" + # Fail when pod's 'Failed' status is True + - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Failed'}) | map('bool') | select | list | count == 1" + when: "setup_storage.rc == 0" + +- name: Delete deploy resources + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: "{{ item.kind }}" + name: "{{ item.name | default(omit) }}" + selector: "{{ item.selector | default(omit) }}" + state: absent + with_items: + - kind: "template,route,service,jobs,dc,secret" + selector: "deploy-heketi" + failed_when: False + +- name: Copy heketi template + copy: + src: "{{ openshift.common.examples_content_version }}/heketi-template.yml" + dest: "{{ mktemp.stdout }}/heketi-template.yml" + +- name: Create heketi resources + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: template + name: heketi + state: present + files: + - "{{ mktemp.stdout }}/heketi-template.yml" + +- name: Deploy heketi pod + oc_process: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + template_name: "heketi" + create: True + params: + IMAGE_NAME: "{{ openshift_storage_glusterfs_heketi_image }}" + IMAGE_VERSION: "{{ openshift_storage_glusterfs_heketi_version }}" + HEKETI_USER_KEY: "{{ openshift_storage_glusterfs_heketi_user_key }}" + HEKETI_ADMIN_KEY: "{{ openshift_storage_glusterfs_heketi_admin_key }}" + +- name: Wait for heketi pod + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: pod + state: list + selector: "glusterfs=heketi-pod" + register: heketi_pod + until: + - "heketi_pod.results.results[0]['items'] | count > 0" + # Pod's 'Ready' status must be True + - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" + delay: 10 + retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + +- name: Determine heketi URL + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + state: list + kind: ep + selector: "glusterfs=heketi-service" + register: heketi_url + until: + - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''" + - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''" + delay: 10 + retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + +- name: Set heketi URL + set_fact: + openshift_storage_glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}" + +- name: Verify heketi service + command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' cluster list" + changed_when: False diff --git a/roles/openshift_storage_glusterfs/tasks/main.yml b/roles/openshift_storage_glusterfs/tasks/main.yml new file mode 100644 index 000000000..265a3cc6e --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/main.yml @@ -0,0 +1,182 @@ +--- +- name: Create temp directory for doing work in + command: mktemp -d /tmp/openshift-glusterfs-ansible-XXXXXX + register: mktemp + changed_when: False + check_mode: no + +- name: Verify target namespace exists + oc_project: + state: present + name: "{{ openshift_storage_glusterfs_namespace }}" + when: openshift_storage_glusterfs_is_native or openshift_storage_glusterfs_heketi_is_native + +- include: glusterfs_deploy.yml + when: openshift_storage_glusterfs_is_native + +- name: Make sure heketi-client is installed + package: name=heketi-client state=present + +- name: Delete pre-existing heketi resources + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: "{{ item.kind }}" + name: "{{ item.name | default(omit) }}" + selector: "{{ item.selector | default(omit) }}" + state: absent + with_items: + - kind: "template,route,service,jobs,dc,secret" + selector: "deploy-heketi" + - kind: "template,route,dc,service" + name: "heketi" + - kind: "svc,ep" + name: "heketi-storage-endpoints" + - kind: "sa" + name: "heketi-service-account" + failed_when: False + when: openshift_storage_glusterfs_heketi_wipe + +- name: Wait for deploy-heketi pods to terminate + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: pod + state: list + selector: "glusterfs=deploy-heketi-pod" + register: heketi_pod + until: "heketi_pod.results.results[0]['items'] | count == 0" + delay: 10 + retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + when: openshift_storage_glusterfs_heketi_wipe + +- name: Wait for heketi pods to terminate + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: pod + state: list + selector: "glusterfs=heketi-pod" + register: heketi_pod + until: "heketi_pod.results.results[0]['items'] | count == 0" + delay: 10 + retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + when: openshift_storage_glusterfs_heketi_wipe + +- name: Create heketi service account + oc_serviceaccount: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + name: heketi-service-account + state: present + when: openshift_storage_glusterfs_heketi_is_native + +- name: Add heketi service account to privileged SCC + oc_adm_policy_user: + user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:heketi-service-account" + resource_kind: scc + resource_name: privileged + state: present + when: openshift_storage_glusterfs_heketi_is_native + +- name: Allow heketi service account to view/edit pods + oc_adm_policy_user: + user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:heketi-service-account" + resource_kind: role + resource_name: edit + state: present + when: openshift_storage_glusterfs_heketi_is_native + +- name: Check for existing deploy-heketi pod + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + state: list + kind: pod + selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support" + register: heketi_pod + when: openshift_storage_glusterfs_heketi_is_native + +- name: Check if need to deploy deploy-heketi + set_fact: + openshift_storage_glusterfs_heketi_deploy_is_missing: False + when: + - "openshift_storage_glusterfs_heketi_is_native" + - "heketi_pod.results.results[0]['items'] | count > 0" + # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True + - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" + +- name: Check for existing heketi pod + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + state: list + kind: pod + selector: "glusterfs=heketi-pod" + register: heketi_pod + when: openshift_storage_glusterfs_heketi_is_native + +- name: Check if need to deploy heketi + set_fact: + openshift_storage_glusterfs_heketi_is_missing: False + when: + - "openshift_storage_glusterfs_heketi_is_native" + - "heketi_pod.results.results[0]['items'] | count > 0" + # heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True + - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" + +- include: heketi_deploy_part1.yml + when: + - openshift_storage_glusterfs_heketi_is_native + - openshift_storage_glusterfs_heketi_deploy_is_missing + - openshift_storage_glusterfs_heketi_is_missing + +- name: Determine heketi URL + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + state: list + kind: ep + selector: "glusterfs in (deploy-heketi-service, heketi-service)" + register: heketi_url + until: + - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''" + - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''" + delay: 10 + retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + when: + - openshift_storage_glusterfs_heketi_is_native + - openshift_storage_glusterfs_heketi_url is undefined + +- name: Set heketi URL + set_fact: + openshift_storage_glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}" + when: + - openshift_storage_glusterfs_heketi_is_native + - openshift_storage_glusterfs_heketi_url is undefined + +- name: Verify heketi service + command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' cluster list" + changed_when: False + +- name: Generate topology file + template: + src: "{{ openshift.common.examples_content_version }}/topology.json.j2" + dest: "{{ mktemp.stdout }}/topology.json" + when: + - openshift_storage_glusterfs_is_native + - openshift_storage_glusterfs_heketi_topology_load + +- name: Load heketi topology + command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' topology load --json={{ mktemp.stdout }}/topology.json 2>&1" + register: topology_load + failed_when: "topology_load.rc != 0 or 'Unable' in topology_load.stdout" + when: + - openshift_storage_glusterfs_is_native + - openshift_storage_glusterfs_heketi_topology_load + +- include: heketi_deploy_part2.yml + when: openshift_storage_glusterfs_heketi_is_native and openshift_storage_glusterfs_heketi_is_missing + +- include: glusterfs_registry.yml + when: "openshift.hosted.registry.storage.kind == 'glusterfs'" + +- name: Delete temp directory + file: + name: "{{ mktemp.stdout }}" + state: absent + changed_when: False + check_mode: no diff --git a/roles/openshift_storage_glusterfs/templates/v1.6/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.6/glusterfs-registry-endpoints.yml.j2 new file mode 100644 index 000000000..d72d085c9 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.6/glusterfs-registry-endpoints.yml.j2 @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Endpoints +metadata: + name: glusterfs-registry-endpoints +subsets: +- addresses: +{% for node in groups.oo_glusterfs_to_config %} + - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }} +{% endfor %} + ports: + - port: 1 diff --git a/roles/openshift_storage_glusterfs/templates/v1.6/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v1.6/topology.json.j2 new file mode 100644 index 000000000..eb5b4544f --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.6/topology.json.j2 @@ -0,0 +1,39 @@ +{ + "clusters": [ +{%- set clusters = {} -%} +{%- for node in groups.oo_glusterfs_to_config -%} + {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%} + {%- if cluster in clusters -%} + {%- set _dummy = clusters[cluster].append(node) -%} + {%- else -%} + {%- set _dummy = clusters.update({cluster: [ node, ]}) -%} + {%- endif -%} +{%- endfor -%} +{%- for cluster in clusters -%} + { + "nodes": [ +{%- for node in clusters[cluster] -%} + { + "node": { + "hostnames": { + "manage": [ + "{{ hostvars[node].glusterfs_hostname | default(hostvars[node].openshift.common.hostname) }}" + ], + "storage": [ + "{{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}" + ] + }, + "zone": {{ hostvars[node].glusterfs_zone | default(1) }} + }, + "devices": [ +{%- for device in hostvars[node].glusterfs_devices -%} + "{{ device }}"{% if not loop.last %},{% endif %} +{%- endfor -%} + ] + }{% if not loop.last %},{% endif %} +{%- endfor -%} + ] + }{% if not loop.last %},{% endif %} +{%- endfor -%} + ] +} diff --git a/test-requirements.txt b/test-requirements.txt index 805828e1c..585cca0b9 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,11 +1,14 @@ +# Versions are pinned to prevent pypi releases arbitrarily breaking +# tests with new APIs/semantics. We want to update versions deliberately. + # flake8 must be listed before pylint to avoid dependency conflicts -flake8 -flake8-mutable -flake8-print -pylint -setuptools-lint -yamllint -coverage -mock -pytest -pytest-cov +flake8==3.3.0 +flake8-mutable==1.1.0 +flake8-print==2.0.2 +pylint==1.6.5 +setuptools-lint==0.5.2 +yamllint==1.6.1 +coverage==4.3.4 +mock==2.0.0 +pytest==3.0.7 +pytest-cov==2.4.0 @@ -11,7 +11,7 @@ skip_install=True deps = -rrequirements.txt -rtest-requirements.txt - py35-flake8: flake8-bugbear + py35-flake8: flake8-bugbear==17.3.0 commands = unit: pip install -e utils |