diff options
120 files changed, 1019 insertions, 459 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index 22bf0496f..d9999ac9f 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.6.66-1 ./ +3.6.67-1 ./ diff --git a/docs/pull_requests.md b/docs/pull_requests.md index 953563fb2..fcc3e275c 100644 --- a/docs/pull_requests.md +++ b/docs/pull_requests.md @@ -43,6 +43,15 @@ simplifying the workflow towards a single infrastructure in the future. job is also posted to the Pull Request as comments and summarized at the bottom of the Pull Request page. +### Fedora tests + +There are a set of tests that run on Fedora infrastructure. They are started +automatically with every pull request. + +They are implemented using the [`redhat-ci` framework](https://github.com/jlebon/redhat-ci). + +To re-run tests, write a comment containing `bot, retest this please`. + ## Triggering merge After a PR is properly reviewed and a set of diff --git a/inventory/byo/hosts.byo.native-glusterfs.example b/inventory/byo/hosts.byo.native-glusterfs.example new file mode 100644 index 000000000..2dbb57d40 --- /dev/null +++ b/inventory/byo/hosts.byo.native-glusterfs.example @@ -0,0 +1,51 @@ +# This is an example of a bring your own (byo) host inventory for a cluster +# with natively hosted, containerized GlusterFS storage. +# +# This inventory may be used with the byo/config.yml playbook to deploy a new +# cluster with GlusterFS storage, which will use that storage to create a +# volume that will provide backend storage for a hosted Docker registry. +# +# This inventory may also be used with byo/openshift-glusterfs/config.yml to +# deploy GlusterFS storage on an existing cluster. With this playbook, the +# registry backend volume will be created but the administrator must then +# either deploy a hosted registry or change an existing hosted registry to use +# that volume. +# +# There are additional configuration parameters that can be specified to +# control the deployment and state of a GlusterFS cluster. Please see the +# documentation in playbooks/byo/openshift-glusterfs/README.md and +# roles/openshift_storage_glusterfs/README.md for additional details. + +[OSEv3:children] +masters +nodes +# Specify there will be GlusterFS nodes +glusterfs + +[OSEv3:vars] +ansible_ssh_user=root +deployment_type=origin +# Specify that we want to use GlusterFS storage for a hosted registry +openshift_hosted_registry_storage_kind=glusterfs + +[masters] +master node=True storage=True master=True + +[nodes] +master node=True storage=True master=True openshift_schedulable=False +# A hosted registry, by default, will only be deployed on nodes labeled +# "region=infra". +node0 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True +node1 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True +node2 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True + +# Specify the glusterfs group, which contains the nodes that will host +# GlusterFS storage pods. At a minimum, each node must have a +# "glusterfs_devices" variable defined. This variable is a list of block +# devices the node will have access to that is intended solely for use as +# GlusterFS storage. These block devices must be bare (e.g. have no data, not +# be marked as LVM PVs), and will be formatted. +[glusterfs] +node0 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' +node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' +node2 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example index cb878036d..6ec8b9317 100644 --- a/inventory/byo/hosts.origin.example +++ b/inventory/byo/hosts.origin.example @@ -438,9 +438,6 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57 #openshift_hosted_registry_storage_volume_size=10Gi # -# Native GlusterFS Registry Storage -#openshift_hosted_registry_storage_kind=glusterfs -# # AWS S3 # S3 bucket must already exist. #openshift_hosted_registry_storage_kind=object diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example index 06635d69c..05945f586 100644 --- a/inventory/byo/hosts.ose.example +++ b/inventory/byo/hosts.ose.example @@ -438,9 +438,6 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57 #openshift_hosted_registry_storage_volume_size=10Gi # -# Native GlusterFS Registry Storage -#openshift_hosted_registry_storage_kind=glusterfs -# # AWS S3 # # S3 bucket must already exist. diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 4564f2f66..016e86b85 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -9,7 +9,7 @@ %global __requires_exclude ^/usr/bin/ansible-playbook$ Name: openshift-ansible -Version: 3.6.66 +Version: 3.6.67 Release: 1%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 @@ -274,6 +274,12 @@ Atomic OpenShift Utilities includes %changelog +* Wed May 10 2017 Scott Dodson <sdodson@redhat.com> 3.6.67-1 +- byo: correct option name (gscrivan@redhat.com) +- Fail if rpm version != docker image version (jchaloup@redhat.com) +- Perform package upgrades in one transaction (sdodson@redhat.com) +- Properly fail if OpenShift RPM version is undefined (rteague@redhat.com) + * Wed May 10 2017 Scott Dodson <sdodson@redhat.com> 3.6.66-1 - Fix issue with Travis-CI using old pip version (rteague@redhat.com) - Remove vim configuration from Python files (rhcarvalho@gmail.com) diff --git a/playbooks/byo/openshift-cluster/cluster_hosts.yml b/playbooks/byo/openshift-cluster/cluster_hosts.yml index 268a65415..9d086b7b6 100644 --- a/playbooks/byo/openshift-cluster/cluster_hosts.yml +++ b/playbooks/byo/openshift-cluster/cluster_hosts.yml @@ -15,6 +15,8 @@ g_nfs_hosts: "{{ groups.nfs | default([]) }}" g_glusterfs_hosts: "{{ groups.glusterfs | default([]) }}" +g_glusterfs_registry_hosts: "{{ groups.glusterfs_registry | default(g_glusterfs_hosts) }}" + g_all_hosts: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts) | union(g_lb_hosts) | union(g_nfs_hosts) | union(g_new_node_hosts)| union(g_new_master_hosts) diff --git a/playbooks/byo/openshift-glusterfs/README.md b/playbooks/byo/openshift-glusterfs/README.md new file mode 100644 index 000000000..f62aea229 --- /dev/null +++ b/playbooks/byo/openshift-glusterfs/README.md @@ -0,0 +1,98 @@ +# OpenShift GlusterFS Playbooks + +These playbooks are intended to enable the use of GlusterFS volumes by pods in +OpenShift. While they try to provide a sane set of defaults they do cover a +variety of scenarios and configurations, so read carefully. :) + +## Playbook: config.yml + +This is the main playbook that integrates GlusterFS into a new or existing +OpenShift cluster. It will also, if specified, configure a hosted Docker +registry with GlusterFS backend storage. + +This playbook requires the `glusterfs` group to exist in the Ansible inventory +file. The hosts in this group are the nodes of the GlusterFS cluster. + + * If this is a newly configured cluster each host must have a + `glusterfs_devices` variable defined, each of which must be a list of block + storage devices intended for use only by the GlusterFS cluster. If this is + also an external GlusterFS cluster, you must specify + `openshift_storage_glusterfs_is_native=False`. If the cluster is to be + managed by an external heketi service you must also specify + `openshift_storage_glusterfs_heketi_is_native=False` and + `openshift_storage_glusterfs_heketi_url=<URL>` with the URL to the heketi + service. All these variables are specified in `[OSEv3:vars]`, + * If this is an existing cluster you do not need to specify a list of block + devices but you must specify the following variables in `[OSEv3:vars]`: + * `openshift_storage_glusterfs_is_missing=False` + * `openshift_storage_glusterfs_heketi_is_missing=False` + +By default, pods for a native GlusterFS cluster will be created in the +`default` namespace. To change this, specify +`openshift_storage_glusterfs_namespace=<other namespace>` in `[OSEv3:vars]`. + +To configure the deployment of a Docker registry with GlusterFS backend +storage, specify `openshift_hosted_registry_storage_kind=glusterfs` in +`[OSEv3:vars]`. To create a separate GlusterFS cluster for use only by the +registry, specify a `glusterfs_registry` group that is populated as the +`glusterfs` is with the nodes for the separate cluster. If no +`glusterfs_registry` group is specified, the cluster defined by the `glusterfs` +group will be used. + +To swap an existing hosted registry's backend storage for a GlusterFS volume, +specify `openshift_hosted_registry_storage_glusterfs_swap=True`. To +additoinally copy any existing contents from an existing hosted registry, +specify `openshift_hosted_registry_storage_glusterfs_swapcopy=True`. + +**NOTE:** For each namespace that is to have access to GlusterFS volumes an +Enpoints resource pointing to the GlusterFS cluster nodes and a corresponding +Service resource must be created. If dynamic provisioning using StorageClasses +is configure, these resources are created automatically in the namespaces that +require them. This playbook also takes care of creating these resources in the +namespaces used for deployment. + +An example of a minimal inventory file: +``` +[OSEv3:children] +masters +nodes +glusterfs + +[OSEv3:vars] +ansible_ssh_user=root +deployment_type=origin + +[masters] +master + +[nodes] +node0 +node1 +node2 + +[glusterfs] +node0 glusterfs_devices='[ "/dev/sdb" ]' +node1 glusterfs_devices='[ "/dev/sdb", "/dev/sdc" ]' +node2 glusterfs_devices='[ "/dev/sdd" ]' +``` + +## Playbook: registry.yml + +This playbook is intended for admins who want to deploy a hosted Docker +registry with GlusterFS backend storage on an existing OpenShift cluster. It +has all the same requirements and behaviors as `config.yml`. + +## Role: openshift_storage_glusterfs + +The bulk of the work is done by the `openshift_storage_glusterfs` role. This +role can handle the deployment of GlusterFS (if it is to be hosted on the +OpenShift cluster), the registration of GlusterFS nodes (hosted or standalone), +and (if specified) integration as backend storage for a hosted Docker registry. + +See the documentation in the role's directory for further details. + +## Role: openshift_hosted + +The `openshift_hosted` role recognizes `glusterfs` as a possible storage +backend for a hosted docker registry. It will also, if configured, handle the +swap of an existing registry's backend storage to a GlusterFS volume. diff --git a/playbooks/byo/openshift-glusterfs/config.yml b/playbooks/byo/openshift-glusterfs/config.yml new file mode 100644 index 000000000..3f11f3991 --- /dev/null +++ b/playbooks/byo/openshift-glusterfs/config.yml @@ -0,0 +1,10 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-cluster/std_include.yml + tags: + - always + +- include: ../../common/openshift-glusterfs/config.yml diff --git a/playbooks/byo/openshift-glusterfs/filter_plugins b/playbooks/byo/openshift-glusterfs/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/byo/openshift-glusterfs/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/byo/openshift-glusterfs/lookup_plugins b/playbooks/byo/openshift-glusterfs/lookup_plugins new file mode 120000 index 000000000..ac79701db --- /dev/null +++ b/playbooks/byo/openshift-glusterfs/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/byo/openshift-glusterfs/registry.yml b/playbooks/byo/openshift-glusterfs/registry.yml new file mode 100644 index 000000000..6ee6febdb --- /dev/null +++ b/playbooks/byo/openshift-glusterfs/registry.yml @@ -0,0 +1,10 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-cluster/std_include.yml + tags: + - always + +- include: ../../common/openshift-glusterfs/registry.yml diff --git a/playbooks/byo/openshift-glusterfs/roles b/playbooks/byo/openshift-glusterfs/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/byo/openshift-glusterfs/roles @@ -0,0 +1 @@ +../../../roles
\ No newline at end of file diff --git a/playbooks/byo/openshift-preflight/check.yml b/playbooks/byo/openshift-preflight/check.yml index 04a55308a..eb763221f 100644 --- a/playbooks/byo/openshift-preflight/check.yml +++ b/playbooks/byo/openshift-preflight/check.yml @@ -1,8 +1,9 @@ --- - include: ../openshift-cluster/initialize_groups.yml -- hosts: g_all_hosts - name: run OpenShift health checks +- name: Run OpenShift health checks + # Temporarily reverting to OSEv3 until group standardization is complete + hosts: OSEv3 roles: - openshift_health_checker post_tasks: diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml index 75b606e61..a8c1c3a88 100644 --- a/playbooks/byo/openshift_facts.yml +++ b/playbooks/byo/openshift_facts.yml @@ -8,7 +8,8 @@ - always - name: Gather Cluster facts - hosts: g_all_hosts + # Temporarily reverting to OSEv3 until group standardization is complete + hosts: OSEv3 roles: - openshift_facts tasks: diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml index aec87cf82..1b14ff32e 100644 --- a/playbooks/byo/rhel_subscribe.yml +++ b/playbooks/byo/rhel_subscribe.yml @@ -4,7 +4,8 @@ - always - name: Subscribe hosts, update repos and update OS packages - hosts: g_all_hosts + # Temporarily reverting to OSEv3 until group standardization is complete + hosts: OSEv3 roles: - role: rhel_subscribe when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 239bb211b..c320b80ed 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -3,9 +3,15 @@ tags: - always -- include: disable_excluder.yml +- name: Disable excluders + hosts: oo_masters_to_config:oo_nodes_to_config tags: - always + gather_facts: no + tasks: + - include_role: + name: openshift_excluder + tasks_from: disable - include: ../openshift-etcd/config.yml tags: @@ -39,6 +45,12 @@ tags: - hosted -- include: reset_excluder.yml +- name: Re-enable excluder if it was previously enabled + hosts: oo_masters_to_config:oo_nodes_to_config tags: - always + gather_facts: no + tasks: + - include_role: + name: openshift_excluder + tasks_from: enable diff --git a/playbooks/common/openshift-cluster/disable_excluder.yml b/playbooks/common/openshift-cluster/disable_excluder.yml deleted file mode 100644 index f664c51c9..000000000 --- a/playbooks/common/openshift-cluster/disable_excluder.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -- name: Disable excluders - hosts: oo_masters_to_config:oo_nodes_to_config - gather_facts: no - tasks: - - # During installation the excluders are installed with present state. - # So no pre-validation check here as the excluders are either to be installed (present = latest) - # or they are not going to be updated if already installed - - # disable excluders based on their status - - include_role: - name: openshift_excluder - tasks_from: disable - vars: - openshift_excluder_package_state: present - docker_excluder_package_state: present diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml index 17a177644..46932b27f 100644 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -155,5 +155,5 @@ groups: oo_glusterfs_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_glusterfs_hosts | default([]) }}" + with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts) | default([]) }}" changed_when: no diff --git a/playbooks/common/openshift-cluster/reset_excluder.yml b/playbooks/common/openshift-cluster/reset_excluder.yml deleted file mode 100644 index eaa8ce39c..000000000 --- a/playbooks/common/openshift-cluster/reset_excluder.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Re-enable excluder if it was previously enabled - hosts: oo_masters_to_config:oo_nodes_to_config - gather_facts: no - tasks: - - include_role: - name: openshift_excluder - tasks_from: enable diff --git a/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml b/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml index 02042c1ef..35da3b6c3 100644 --- a/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml +++ b/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml @@ -1,14 +1,11 @@ --- -- name: Record excluder state and disable - hosts: oo_masters_to_config:oo_nodes_to_config +- name: Disable excluders + hosts: oo_masters_to_config gather_facts: no tasks: - # verify the excluders can be upgraded - include_role: name: openshift_excluder tasks_from: verify_upgrade - - # disable excluders based on their status - include_role: name: openshift_excluder tasks_from: disable diff --git a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml new file mode 100644 index 000000000..847c22085 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml @@ -0,0 +1,14 @@ +--- +- name: Disable excluders + hosts: oo_nodes_to_config + gather_facts: no + tasks: + - include_role: + name: openshift_excluder + tasks_from: verify_upgrade + - include_role: + name: openshift_excluder + tasks_from: disable + vars: + openshift_excluder_package_state: latest + docker_excluder_package_state: latest diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh b/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh deleted file mode 120000 index 641e04e44..000000000 --- a/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh +++ /dev/null @@ -1 +0,0 @@ -../roles/etcd/files/etcdctl.sh
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index 0d7cdb227..fff199f42 100644 --- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -97,6 +97,12 @@ - not grep_plugin_order_override | skipped - grep_plugin_order_override.rc == 0 -- include: ../reset_excluder.yml +- name: Re-enable excluder if it was previously enabled + hosts: oo_masters_to_config tags: - always + gather_facts: no + tasks: + - include_role: + name: openshift_excluder + tasks_from: enable diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml index 03ac02e9f..164baca81 100644 --- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml @@ -1,27 +1,39 @@ --- -# We verified latest rpm available is suitable, so just yum update. +# When we update package "a-${version}" and a requires b >= ${version} if we +# don't specify the version of b yum will choose the latest version of b +# available and the whole set of dependencies end up at the latest version. +# Since the package module, unlike the yum module, doesn't flatten a list +# of packages into one transaction we need to do that explicitly. The ansible +# core team tells us not to rely on yum module transaction flattening anyway. + +# TODO: If the sdn package isn't already installed this will install it, we +# should fix that -# Master package upgrade ends up depending on node and sdn packages, we need to be explicit -# with all versions to avoid yum from accidentally jumping to something newer than intended: - name: Upgrade master packages - package: name={{ item }} state=present - when: component == "master" - with_items: - - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" + package: name={{ master_pkgs | join(',') }} state=present + vars: + master_pkgs: + - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version}}" + - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" + - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" + - PyYAML + when: + - component == "master" + - not openshift.common.is_atomic | bool - name: Upgrade node packages - package: name={{ item }} state=present - when: component == "node" - with_items: - - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" - -- name: Ensure python-yaml present for config upgrade - package: name=PyYAML state=present - when: not openshift.common.is_atomic | bool + package: name={{ node_pkgs | join(',') }} state=present + vars: + node_pkgs: + - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" + - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" + - PyYAML + when: + - component == "node" + - not openshift.common.is_atomic | bool diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index e9f894942..2a5ac0aef 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -47,6 +47,12 @@ until: node_schedulable|succeeded when: node_unschedulable|changed -- include: ../reset_excluder.yml +- name: Re-enable excluder if it was previously enabled + hosts: oo_nodes_to_config tags: - always + gather_facts: no + tasks: + - include_role: + name: openshift_excluder + tasks_from: enable diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml index be18c1edd..d81a13ef2 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml @@ -46,7 +46,11 @@ tags: - pre_upgrade -- include: ../disable_excluder.yml +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml index 20dffb44b..8a692d02b 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml @@ -54,7 +54,7 @@ tags: - pre_upgrade -- include: ../disable_excluder.yml +- include: ../disable_master_excluders.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml index 14aaf70d6..2d30bba94 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml @@ -47,7 +47,7 @@ tags: - pre_upgrade -- include: ../disable_excluder.yml +- include: ../disable_node_excluders.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml index 5d6455bef..e9ff47f32 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml @@ -46,7 +46,11 @@ tags: - pre_upgrade -- include: ../disable_excluder.yml +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml index c76920586..d4ae8d8b4 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml @@ -54,7 +54,7 @@ tags: - pre_upgrade -- include: ../disable_excluder.yml +- include: ../disable_master_excluders.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml index f397f6015..ae205b172 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml @@ -47,7 +47,7 @@ tags: - pre_upgrade -- include: ../disable_excluder.yml +- include: ../disable_node_excluders.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml index 7cedfb1ca..1269634d1 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml @@ -46,12 +46,14 @@ tags: - pre_upgrade -- include: ../disable_excluder.yml +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml tags: - pre_upgrade -# Note: During upgrade the openshift excluder is not unexcluded inside the initialize_openshift_version.yml play. -# So it is necessary to run the play after running disable_excluder.yml. - include: ../../initialize_openshift_version.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml index 0198074ed..21c075678 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml @@ -54,7 +54,7 @@ tags: - pre_upgrade -- include: ../disable_excluder.yml +- include: ../disable_master_excluders.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml index 2b16875f4..e67e169fc 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml @@ -47,7 +47,7 @@ tags: - pre_upgrade -- include: ../disable_excluder.yml +- include: ../disable_node_excluders.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml index 4604bdc8b..a1b1f3301 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -46,12 +46,14 @@ tags: - pre_upgrade -- include: ../disable_excluder.yml +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml tags: - pre_upgrade -# Note: During upgrade the openshift excluder is not unexcluded inside the initialize_openshift_version.yml play. -# So it is necassary to run the play after running disable_excluder.yml. - include: ../../initialize_openshift_version.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index a09097ed9..af6e1f71b 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -54,7 +54,7 @@ tags: - pre_upgrade -- include: ../disable_excluder.yml +- include: ../disable_master_excluders.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml index 7640f2116..285c18b7b 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml @@ -47,7 +47,7 @@ tags: - pre_upgrade -- include: ../disable_excluder.yml +- include: ../disable_node_excluders.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml index 75faf5ba8..1efdfb336 100644 --- a/playbooks/common/openshift-glusterfs/config.yml +++ b/playbooks/common/openshift-glusterfs/config.yml @@ -12,7 +12,9 @@ - service: glusterfs_bricks port: "49152-49251/tcp" roles: - - os_firewall + - role: os_firewall + when: + - openshift_storage_glusterfs_is_native | default(True) - name: Configure GlusterFS hosts: oo_first_master diff --git a/playbooks/common/openshift-glusterfs/registry.yml b/playbooks/common/openshift-glusterfs/registry.yml new file mode 100644 index 000000000..80cf7529e --- /dev/null +++ b/playbooks/common/openshift-glusterfs/registry.yml @@ -0,0 +1,49 @@ +--- +- include: config.yml + +- name: Initialize GlusterFS registry PV and PVC vars + hosts: oo_first_master + tags: hosted + tasks: + - set_fact: + glusterfs_pv: [] + glusterfs_pvc: [] + + - set_fact: + glusterfs_pv: + - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-volume" + capacity: "{{ openshift.hosted.registry.storage.volume.size }}" + access_modes: "{{ openshift.hosted.registry.storage.access.modes }}" + storage: + glusterfs: + endpoints: "{{ openshift.hosted.registry.storage.glusterfs.endpoints }}" + path: "{{ openshift.hosted.registry.storage.glusterfs.path }}" + readOnly: "{{ openshift.hosted.registry.storage.glusterfs.readOnly }}" + glusterfs_pvc: + - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim" + capacity: "{{ openshift.hosted.registry.storage.volume.size }}" + access_modes: "{{ openshift.hosted.registry.storage.access.modes }}" + when: openshift.hosted.registry.storage.glusterfs.swap + +- name: Create persistent volumes + hosts: oo_first_master + tags: + - hosted + vars: + persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups, glusterfs_pv) }}" + persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims(glusterfs_pvc) }}" + roles: + - role: openshift_persistent_volumes + when: persistent_volumes | union(glusterfs_pv) | length > 0 or persistent_volume_claims | union(glusterfs_pvc) | length > 0 + +- name: Create Hosted Resources + hosts: oo_first_master + tags: + - hosted + pre_tasks: + - set_fact: + openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" + openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" + when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master" + roles: + - role: openshift_hosted diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml index ab0045a39..49594d294 100644 --- a/playbooks/common/openshift-master/scaleup.yml +++ b/playbooks/common/openshift-master/scaleup.yml @@ -60,9 +60,15 @@ - openshift_facts - openshift_docker -- include: ../openshift-cluster/disable_excluder.yml +- name: Disable excluders + hosts: oo_masters_to_config tags: - always + gather_facts: no + tasks: + - include_role: + name: openshift_excluder + tasks_from: disable - include: ../openshift-master/config.yml @@ -70,6 +76,12 @@ - include: ../openshift-node/config.yml -- include: ../openshift-cluster/reset_excluder.yml +- name: Re-enable excluder if it was previously enabled + hosts: oo_masters_to_config tags: - always + gather_facts: no + tasks: + - include_role: + name: openshift_excluder + tasks_from: enable diff --git a/playbooks/common/openshift-node/scaleup.yml b/playbooks/common/openshift-node/scaleup.yml index c31aca62b..d94df553c 100644 --- a/playbooks/common/openshift-node/scaleup.yml +++ b/playbooks/common/openshift-node/scaleup.yml @@ -27,12 +27,24 @@ - openshift_facts - openshift_docker -- include: ../openshift-cluster/disable_excluder.yml +- name: Disable excluders + hosts: oo_nodes_to_config tags: - always + gather_facts: no + tasks: + - include_role: + name: openshift_excluder + tasks_from: disable - include: ../openshift-node/config.yml -- include: ../openshift-cluster/reset_excluder.yml +- name: Re-enable excluder if it was previously enabled + hosts: oo_nodes_to_config tags: - always + gather_facts: no + tasks: + - include_role: + name: openshift_excluder + tasks_from: enable diff --git a/roles/calico/templates/calico.service.j2 b/roles/calico/templates/calico.service.j2 index b882a5597..7a1236392 100644 --- a/roles/calico/templates/calico.service.j2 +++ b/roles/calico/templates/calico.service.j2 @@ -1,7 +1,7 @@ [Unit] Description=calico -After=docker.service -Requires=docker.service +After={{ openshift.docker.service_name }}.service +Requires={{ openshift.docker.service_name }}.service [Service] Restart=always diff --git a/roles/contiv/templates/aci-gw.service b/roles/contiv/templates/aci-gw.service index 8e4b66fbe..4506d2231 100644 --- a/roles/contiv/templates/aci-gw.service +++ b/roles/contiv/templates/aci-gw.service @@ -1,6 +1,6 @@ [Unit] Description=Contiv ACI gw -After=auditd.service systemd-user-sessions.service time-sync.target docker.service +After=auditd.service systemd-user-sessions.service time-sync.target {{ openshift.docker.service_name }}.service [Service] ExecStart={{ bin_dir }}/aci_gw.sh start diff --git a/roles/dns/templates/named.service.j2 b/roles/dns/templates/named.service.j2 index 566739f25..6e0a7a640 100644 --- a/roles/dns/templates/named.service.j2 +++ b/roles/dns/templates/named.service.j2 @@ -1,7 +1,7 @@ [Unit] -Requires=docker.service -After=docker.service -PartOf=docker.service +Requires={{ openshift.docker.service_name }}.service +After={{ openshift.docker.service_name }}.service +PartOf={{ openshift.docker.service_name }}.service [Service] Type=simple @@ -12,4 +12,4 @@ ExecStart=/usr/bin/docker run --name bind -p 53:53/udp -v /var/log:/var/log -v / ExecStop=/usr/bin/docker stop bind [Install] -WantedBy=docker.service +WantedBy={{ openshift.docker.service_name }}.service diff --git a/roles/docker/README.md b/roles/docker/README.md index f25ca03cd..4a9f21f22 100644 --- a/roles/docker/README.md +++ b/roles/docker/README.md @@ -3,6 +3,8 @@ Docker Ensures docker package or system container is installed, and optionally raises timeout for systemd-udevd.service to 5 minutes. +daemon.json items may be found at https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file + Requirements ------------ diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml index 10fb5772c..e101730d2 100644 --- a/roles/docker/tasks/package_docker.yml +++ b/roles/docker/tasks/package_docker.yml @@ -46,7 +46,7 @@ template: dest: "{{ docker_systemd_dir }}/custom.conf" src: custom.conf.j2 - when: not os_firewall_use_firewalld | default(True) | bool + when: not os_firewall_use_firewalld | default(False) | bool - stat: path=/etc/sysconfig/docker register: docker_check diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml index 722232a9b..a461c479a 100644 --- a/roles/docker/tasks/systemcontainer_docker.yml +++ b/roles/docker/tasks/systemcontainer_docker.yml @@ -121,6 +121,7 @@ l_docker_log_options: "{{ docker_log_options | default({}) | to_json }}" l_docker_additional_registries: "{{ docker_additional_registries | default([]) | to_json }}" l_docker_blocked_registries: "{{ docker_blocked_registries | default([]) | to_json }}" + l_docker_selinux_enabled: "{{ docker_selinux_enabled | default(true) | to_json }}" # Configure container-engine using the daemon.json file - name: Configure Container Engine diff --git a/roles/docker/templates/daemon.json b/roles/docker/templates/daemon.json index 7ea8164b3..c607e6afe 100644 --- a/roles/docker/templates/daemon.json +++ b/roles/docker/templates/daemon.json @@ -1,66 +1,21 @@ - { - "api-cors-header": "", "authorization-plugins": ["rhel-push-plugin"], - "bip": "", - "bridge": "", - "cgroup-parent": "", - "cluster-store": "", - "cluster-store-opts": {}, - "cluster-advertise": "", - "debug": true, - "default-gateway": "", - "default-gateway-v6": "", "default-runtime": "oci", "containerd": "/run/containerd.sock", - "default-ulimits": {}, "disable-legacy-registry": false, - "dns": [], - "dns-opts": [], - "dns-search": [], "exec-opts": ["native.cgroupdriver=systemd"], - "exec-root": "", - "fixed-cidr": "", - "fixed-cidr-v6": "", - "graph": "", - "group": "", - "hosts": [], - "icc": false, "insecure-registries": {{ l_docker_insecure_registries }}, - "ip": "0.0.0.0", - "iptables": false, - "ipv6": false, - "ip-forward": false, - "ip-masq": false, - "labels": [], - "live-restore": true, {% if docker_log_driver is defined %} "log-driver": "{{ docker_log_driver }}", {%- endif %} - "log-level": "", "log-opts": {{ l_docker_log_options }}, - "max-concurrent-downloads": 3, - "max-concurrent-uploads": 5, - "mtu": 0, - "oom-score-adjust": -500, - "pidfile": "", - "raw-logs": false, - "registry-mirrors": [], "runtimes": { "oci": { "path": "/usr/libexec/docker/docker-runc-current" } }, - "selinux-enabled": {{ docker_selinux_enabled|default(true) }}, - "storage-driver": "", - "storage-opts": [], - "tls": true, - "tlscacert": "", - "tlscert": "", - "tlskey": "", - "tlsverify": true, - "userns-remap": "", + "selinux-enabled": {{ l_docker_selinux_enabled | lower }}, "add-registry": {{ l_docker_additional_registries }}, - "blocked-registries": {{ l_docker_blocked_registries }}, + "block-registry": {{ l_docker_blocked_registries }}, "userland-proxy-path": "/usr/libexec/docker/docker-proxy-current" } diff --git a/roles/docker/templates/systemcontainercustom.conf.j2 b/roles/docker/templates/systemcontainercustom.conf.j2 index a4fb01d2b..1faad506a 100644 --- a/roles/docker/templates/systemcontainercustom.conf.j2 +++ b/roles/docker/templates/systemcontainercustom.conf.j2 @@ -10,7 +10,7 @@ ENVIRONMENT=HTTPS_PROXY={{ docker_http_proxy }} {%- if "no_proxy" in openshift.common %} ENVIRONMENT=NO_PROXY={{ docker_no_proxy }} {%- endif %} -{%- if os_firewall_use_firewalld|default(true) %} +{%- if os_firewall_use_firewalld|default(false) %} [Unit] Wants=iptables.service After=iptables.service diff --git a/roles/etcd/templates/etcd.docker.service b/roles/etcd/templates/etcd.docker.service index c8ceaa6ba..adeca7a91 100644 --- a/roles/etcd/templates/etcd.docker.service +++ b/roles/etcd/templates/etcd.docker.service @@ -1,8 +1,8 @@ [Unit] Description=The Etcd Server container -After=docker.service -Requires=docker.service -PartOf=docker.service +After={{ openshift.docker.service_name }}.service +Requires={{ openshift.docker.service_name }}.service +PartOf={{ openshift.docker.service_name }}.service [Service] EnvironmentFile={{ etcd_conf_file }} @@ -14,4 +14,4 @@ Restart=always RestartSec=5s [Install] -WantedBy=docker.service +WantedBy={{ openshift.docker.service_name }}.service diff --git a/roles/etcd_common/README.md b/roles/etcd_common/README.md index 131a01490..d1c3a6602 100644 --- a/roles/etcd_common/README.md +++ b/roles/etcd_common/README.md @@ -1,17 +1,21 @@ etcd_common ======================== -TODO +Common resources for dependent etcd roles. E.g. default variables for: +* config directories +* certificates +* ports +* other settings -Requirements ------------- - -TODO +Or `delegated_serial_command` ansible module for executing a command on a remote node. E.g. -Role Variables --------------- +```yaml +- delegated_serial_command: + command: /usr/bin/make_database.sh arg1 arg2 + creates: /path/to/database +``` -TODO +Or etcdctl.yml playbook for installation of `etcdctl` aliases on a node (see example). Dependencies ------------ @@ -21,7 +25,22 @@ openshift-repos Example Playbook ---------------- -TODO +**Drop etcdctl aliases** + +```yaml +- include_role: + name: etcd_common + tasks_from: etcdctl +``` + +**Get access to common variables** + +```yaml +# meta.yml of etcd +... +dependencies: +- { role: etcd_common } +``` License ------- diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py index 7039a0cec..03d3e17c4 100644 --- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py +++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py @@ -166,7 +166,7 @@ class YeditException(Exception): # pragma: no cover class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" - re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" + re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py index ae5806137..7493b5c3d 100644 --- a/roles/lib_openshift/library/oc_adm_manage_node.py +++ b/roles/lib_openshift/library/oc_adm_manage_node.py @@ -152,7 +152,7 @@ class YeditException(Exception): # pragma: no cover class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" - re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" + re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py index 36eb294a8..5e72f5954 100644 --- a/roles/lib_openshift/library/oc_adm_policy_group.py +++ b/roles/lib_openshift/library/oc_adm_policy_group.py @@ -138,7 +138,7 @@ class YeditException(Exception): # pragma: no cover class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" - re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" + re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py index bedd45922..371a3953b 100644 --- a/roles/lib_openshift/library/oc_adm_policy_user.py +++ b/roles/lib_openshift/library/oc_adm_policy_user.py @@ -138,7 +138,7 @@ class YeditException(Exception): # pragma: no cover class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" - re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" + re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py index c6fa85f90..7240521c6 100644 --- a/roles/lib_openshift/library/oc_adm_registry.py +++ b/roles/lib_openshift/library/oc_adm_registry.py @@ -256,7 +256,7 @@ class YeditException(Exception): # pragma: no cover class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" - re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" + re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py index 8a4f93372..a54c62cd4 100644 --- a/roles/lib_openshift/library/oc_adm_router.py +++ b/roles/lib_openshift/library/oc_adm_router.py @@ -281,7 +281,7 @@ class YeditException(Exception): # pragma: no cover class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" - re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" + re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py index d81c29784..78c72ef26 100644 --- a/roles/lib_openshift/library/oc_clusterrole.py +++ b/roles/lib_openshift/library/oc_clusterrole.py @@ -130,7 +130,7 @@ class YeditException(Exception): # pragma: no cover class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" - re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" + re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py index bdcb3f278..c88f56fc6 100644 --- a/roles/lib_openshift/library/oc_configmap.py +++ b/roles/lib_openshift/library/oc_configmap.py @@ -136,7 +136,7 @@ class YeditException(Exception): # pragma: no cover class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" - re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" + re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py index be1b3a01e..17e3f7dde 100644 --- a/roles/lib_openshift/library/oc_edit.py +++ b/roles/lib_openshift/library/oc_edit.py @@ -180,7 +180,7 @@ class YeditException(Exception): # pragma: no cover class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" - re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" + re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py index 4ac6e4aeb..18ab97bc0 100644 --- a/roles/lib_openshift/library/oc_env.py +++ b/roles/lib_openshift/library/oc_env.py @@ -147,7 +147,7 @@ class YeditException(Exception): # pragma: no cover class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" - re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" + re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py index b6f058340..88c6ef209 100644 --- a/roles/lib_openshift/library/oc_group.py +++ b/roles/lib_openshift/library/oc_group.py @@ -120,7 +120,7 @@ class YeditException(Exception): # pragma: no cover class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" - re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" + re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py index c094c9472..45860cbe5 100644 --- a/roles/lib_openshift/library/oc_image.py +++ b/roles/lib_openshift/library/oc_image.py @@ -139,7 +139,7 @@ class YeditException(Exception): # pragma: no cover class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" - re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" + re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py index a76dd44c4..65923a698 100644 --- a/roles/lib_openshift/library/oc_label.py +++ b/roles/lib_openshift/library/oc_label.py @@ -156,7 +156,7 @@ class YeditException(Exception): # pragma: no cover class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" - re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" + re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py index e12137b51..1d75a21b9 100644 --- a/roles/lib_openshift/library/oc_obj.py +++ b/roles/lib_openshift/library/oc_obj.py @@ -159,7 +159,7 @@ class YeditException(Exception): # pragma: no cover class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" - re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" + re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments @@ -1548,7 +1548,7 @@ class OCObject(OpenShiftCLI): if state == 'absent': # verify its not in our results if (params['name'] is not None or params['selector'] is not None) and \ - (len(api_rval['results']) == 0 or len(api_rval['results'][0].getattr('items', [])) == 0): + (len(api_rval['results']) == 0 or len(api_rval['results'][0].get('items', [])) == 0): return {'changed': False, 'state': state} if check_mode: diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py index aeb4e5686..d9ce5679b 100644 --- a/roles/lib_openshift/library/oc_objectvalidator.py +++ b/roles/lib_openshift/library/oc_objectvalidator.py @@ -91,7 +91,7 @@ class YeditException(Exception): # pragma: no cover class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" - re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" + re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py index f7aa8c0d2..8e1ffe90f 100644 --- a/roles/lib_openshift/library/oc_process.py +++ b/roles/lib_openshift/library/oc_process.py @@ -148,7 +148,7 @@ class YeditException(Exception): # pragma: no cover class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" - re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" + re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py index b044a47ce..a06852fd8 100644 --- a/roles/lib_openshift/library/oc_project.py +++ b/roles/lib_openshift/library/oc_project.py @@ -145,7 +145,7 @@ class YeditException(Exception): # pragma: no cover class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" - re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" + re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py index 8604cc2f3..79673452d 100644 --- a/roles/lib_openshift/library/oc_pvc.py +++ b/roles/lib_openshift/library/oc_pvc.py @@ -140,7 +140,7 @@ class YeditException(Exception): # pragma: no cover class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" - re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" + re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py index fef48daf0..ad705a6c5 100644 --- a/roles/lib_openshift/library/oc_route.py +++ b/roles/lib_openshift/library/oc_route.py @@ -190,7 +190,7 @@ class YeditException(Exception): # pragma: no cover class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" - re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" + re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py index 384df0ee3..291ac8b19 100644 --- a/roles/lib_openshift/library/oc_scale.py +++ b/roles/lib_openshift/library/oc_scale.py @@ -134,7 +134,7 @@ class YeditException(Exception): # pragma: no cover class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" - re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" + re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py index 443750c5d..df28df2bc 100644 --- a/roles/lib_openshift/library/oc_secret.py +++ b/roles/lib_openshift/library/oc_secret.py @@ -180,7 +180,7 @@ class YeditException(Exception): # pragma: no cover class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" - re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" + re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py index 7537bdb5b..e98f83cc3 100644 --- a/roles/lib_openshift/library/oc_service.py +++ b/roles/lib_openshift/library/oc_service.py @@ -186,7 +186,7 @@ class YeditException(Exception): # pragma: no cover class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" - re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" + re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py index 03a4dd3b9..f00e9e4f6 100644 --- a/roles/lib_openshift/library/oc_serviceaccount.py +++ b/roles/lib_openshift/library/oc_serviceaccount.py @@ -132,7 +132,7 @@ class YeditException(Exception): # pragma: no cover class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" - re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" + re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py index db1010694..6691495a6 100644 --- a/roles/lib_openshift/library/oc_serviceaccount_secret.py +++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py @@ -132,7 +132,7 @@ class YeditException(Exception): # pragma: no cover class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" - re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" + re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py index c3885c1ac..72f2fbf03 100644 --- a/roles/lib_openshift/library/oc_user.py +++ b/roles/lib_openshift/library/oc_user.py @@ -192,7 +192,7 @@ class YeditException(Exception): # pragma: no cover class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" - re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" + re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py index 5c4596c09..bc3340a94 100644 --- a/roles/lib_openshift/library/oc_version.py +++ b/roles/lib_openshift/library/oc_version.py @@ -104,7 +104,7 @@ class YeditException(Exception): # pragma: no cover class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" - re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" + re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py index 5a507348c..9dec0a6d4 100644 --- a/roles/lib_openshift/library/oc_volume.py +++ b/roles/lib_openshift/library/oc_volume.py @@ -80,6 +80,18 @@ options: required: false default: False aliases: [] + name: + description: + - Name of the object that is being queried. + required: false + default: None + aliases: [] + vol_name: + description: + - Name of the volume that is being queried. + required: false + default: None + aliases: [] namespace: description: - The name of the namespace where the object lives @@ -169,7 +181,7 @@ class YeditException(Exception): # pragma: no cover class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" - re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" + re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments diff --git a/roles/lib_openshift/src/class/oc_obj.py b/roles/lib_openshift/src/class/oc_obj.py index 89ee2f5a0..6f0da3d5c 100644 --- a/roles/lib_openshift/src/class/oc_obj.py +++ b/roles/lib_openshift/src/class/oc_obj.py @@ -117,7 +117,7 @@ class OCObject(OpenShiftCLI): if state == 'absent': # verify its not in our results if (params['name'] is not None or params['selector'] is not None) and \ - (len(api_rval['results']) == 0 or len(api_rval['results'][0].getattr('items', [])) == 0): + (len(api_rval['results']) == 0 or len(api_rval['results'][0].get('items', [])) == 0): return {'changed': False, 'state': state} if check_mode: diff --git a/roles/lib_openshift/src/doc/volume b/roles/lib_openshift/src/doc/volume index 1d04afeef..43ff78c9f 100644 --- a/roles/lib_openshift/src/doc/volume +++ b/roles/lib_openshift/src/doc/volume @@ -29,6 +29,18 @@ options: required: false default: False aliases: [] + name: + description: + - Name of the object that is being queried. + required: false + default: None + aliases: [] + vol_name: + description: + - Name of the volume that is being queried. + required: false + default: None + aliases: [] namespace: description: - The name of the namespace where the object lives diff --git a/roles/lib_utils/library/yedit.py b/roles/lib_utils/library/yedit.py index 284a54066..baf72fe47 100644 --- a/roles/lib_utils/library/yedit.py +++ b/roles/lib_utils/library/yedit.py @@ -213,7 +213,7 @@ class YeditException(Exception): class Yedit(object): ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" - re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" + re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments diff --git a/roles/lib_utils/src/class/yedit.py b/roles/lib_utils/src/class/yedit.py index e0a27012f..957c35a06 100644 --- a/roles/lib_utils/src/class/yedit.py +++ b/roles/lib_utils/src/class/yedit.py @@ -11,7 +11,7 @@ class YeditException(Exception): class Yedit(object): ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" - re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" + re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 911d72412..914e46c05 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -2167,7 +2167,9 @@ class OpenShiftFacts(object): glusterfs=dict( endpoints='glusterfs-registry-endpoints', path='glusterfs-registry-volume', - readOnly=False), + readOnly=False, + swap=False, + swapcopy=True), host=None, access=dict( modes=['ReadWriteMany'] diff --git a/roles/openshift_hosted/README.md b/roles/openshift_hosted/README.md index 6d576df71..3e5d7f860 100644 --- a/roles/openshift_hosted/README.md +++ b/roles/openshift_hosted/README.md @@ -28,6 +28,14 @@ From this role: | openshift_hosted_registry_selector | region=infra | Node selector used when creating registry. The OpenShift registry will only be deployed to nodes matching this selector. | | openshift_hosted_registry_cert_expire_days | `730` (2 years) | Validity of the certificates in days. Works only with OpenShift version 1.5 (3.5) and later. | +If you specify `openshift_hosted_registry_kind=glusterfs`, the following +variables also control configuration behavior: + +| Name | Default value | Description | +|----------------------------------------------|---------------|------------------------------------------------------------------------------| +| openshift_hosted_registry_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume | +| openshift_hosted_registry_glusterfs_swapcopy | True | If swapping, also copy the current contents of the registry volume | + Dependencies ------------ diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml index 6e691c26f..751489958 100644 --- a/roles/openshift_hosted/tasks/registry/registry.yml +++ b/roles/openshift_hosted/tasks/registry/registry.yml @@ -61,7 +61,7 @@ name: "{{ openshift_hosted_registry_serviceaccount }}" namespace: "{{ openshift_hosted_registry_namespace }}" -- name: Grant the registry serivce account access to the appropriate scc +- name: Grant the registry service account access to the appropriate scc oc_adm_policy_user: user: "system:serviceaccount:{{ openshift_hosted_registry_namespace }}:{{ openshift_hosted_registry_serviceaccount }}" namespace: "{{ openshift_hosted_registry_namespace }}" @@ -126,4 +126,4 @@ - include: storage/glusterfs.yml when: - - openshift.hosted.registry.storage.kind | default(none) == 'glusterfs' + - openshift.hosted.registry.storage.kind | default(none) == 'glusterfs' or openshift.hosted.registry.storage.glusterfs.swap diff --git a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml index b18b24266..e6bb196b8 100644 --- a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml +++ b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml @@ -1,10 +1,18 @@ --- +- name: Get registry DeploymentConfig + oc_obj: + namespace: "{{ openshift_hosted_registry_namespace }}" + state: list + kind: dc + name: "{{ openshift_hosted_registry_name }}" + register: registry_dc + - name: Wait for registry pods oc_obj: namespace: "{{ openshift_hosted_registry_namespace }}" state: list kind: pod - selector: "{{ openshift_hosted_registry_name }}={{ openshift_hosted_registry_namespace }}" + selector: "{% for label, value in registry_dc.results.results[0].spec.selector.iteritems() %}{{ label }}={{ value }}{% if not loop.last %},{% endif %}{% endfor %}" register: registry_pods until: - "registry_pods.results.results[0]['items'] | count > 0" @@ -38,6 +46,39 @@ mode: "2775" recurse: True +- block: + - name: Activate registry maintenance mode + oc_env: + namespace: "{{ openshift_hosted_registry_namespace }}" + name: "{{ openshift_hosted_registry_name }}" + env_vars: + - REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED: 'true' + + - name: Get first registry pod name + set_fact: + registry_pod_name: "{{ registry_pods.results.results[0]['items'][0].metadata.name }}" + + - name: Copy current registry contents to new GlusterFS volume + command: "oc rsync {{ registry_pod_name }}:/registry/ {{ mktemp.stdout }}/" + when: openshift.hosted.registry.storage.glusterfs.swapcopy + + - name: Swap new GlusterFS registry volume + oc_volume: + namespace: "{{ openshift_hosted_registry_namespace }}" + name: "{{ openshift_hosted_registry_name }}" + vol_name: registry-storage + mount_type: pvc + claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim" + + - name: Deactivate registry maintenance mode + oc_env: + namespace: "{{ openshift_hosted_registry_namespace }}" + name: "{{ openshift_hosted_registry_name }}" + state: absent + env_vars: + - REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED: 'true' + when: openshift.hosted.registry.storage.glusterfs.swap + - name: Unmount registry volume mount: state: unmounted diff --git a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml index 28feac4e6..8fe02444e 100644 --- a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml @@ -103,9 +103,9 @@ parameters: - description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"' name: IMAGE_PREFIX value: "registry.access.redhat.com/openshift3/" - - description: 'Specify image version; e.g. for "registry.access.redhat.com/openshift3/registry-console:3.5", set version "3.5"' + - description: 'Specify image version; e.g. for "registry.access.redhat.com/openshift3/registry-console:3.6", set version "3.6"' name: IMAGE_VERSION - value: "3.5" + value: "3.6" - description: "The public URL for the Openshift OAuth Provider, e.g. https://openshift.example.com:8443" name: OPENSHIFT_OAUTH_PROVIDER_URL required: true diff --git a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 index 5385df3b7..72182fcdd 100644 --- a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 +++ b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 @@ -1,7 +1,7 @@ [Unit] -After=docker.service -Requires=docker.service -PartOf=docker.service +After={{ openshift.docker.service_name }}.service +Requires={{ openshift.docker.service_name }}.service +PartOf={{ openshift.docker.service_name }}.service [Service] ExecStartPre=-/usr/bin/docker rm -f openshift_loadbalancer @@ -14,4 +14,4 @@ Restart=always RestartSec=5s [Install] -WantedBy=docker.service +WantedBy={{ openshift.docker.service_name }}.service diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml index c05cc5f98..76dfe518e 100644 --- a/roles/openshift_logging/defaults/main.yml +++ b/roles/openshift_logging/defaults/main.yml @@ -72,7 +72,7 @@ openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nod openshift_logging_fluentd_cpu_limit: 100m openshift_logging_fluentd_memory_limit: 512Mi openshift_logging_fluentd_es_copy: false -openshift_logging_fluentd_use_journal: "{{ openshift_hosted_logging_use_journal | default('') }}" +openshift_logging_fluentd_use_journal: "{{ openshift_hosted_logging_use_journal if openshift_hosted_logging_use_journal is defined else (docker_log_driver == 'journald') | ternary(True, False) if docker_log_driver is defined else (openshift.docker.log_driver == 'journald') | ternary(True, False) if openshift.docker.log_driver is defined else openshift.docker.options | search('--log-driver=journald') if openshift.docker.options is defined else default(omit) }}" openshift_logging_fluentd_journal_source: "{{ openshift_hosted_logging_journal_source | default('') }}" openshift_logging_fluentd_journal_read_from_head: "{{ openshift_hosted_logging_journal_read_from_head | default('') }}" openshift_logging_fluentd_hosts: ['--all'] diff --git a/roles/openshift_logging/templates/fluentd.j2 b/roles/openshift_logging/templates/fluentd.j2 index d13691259..5c93d823e 100644 --- a/roles/openshift_logging/templates/fluentd.j2 +++ b/roles/openshift_logging/templates/fluentd.j2 @@ -59,6 +59,9 @@ spec: - name: dockercfg mountPath: /etc/sysconfig/docker readOnly: true + - name: dockerdaemoncfg + mountPath: /etc/docker + readOnly: true {% if openshift_logging_use_mux_client | bool %} - name: muxcerts mountPath: /etc/fluent/muxkeys @@ -154,6 +157,9 @@ spec: - name: dockercfg hostPath: path: /etc/sysconfig/docker + - name: dockerdaemoncfg + hostPath: + path: /etc/docker {% if openshift_logging_use_mux_client | bool %} - name: muxcerts secret: diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml index 58fabddeb..cfa860edf 100644 --- a/roles/openshift_master/tasks/systemd_units.yml +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -32,6 +32,15 @@ - not openshift.common.is_master_system_container | bool register: create_master_unit_file +- name: Install Master service file + template: + dest: "/etc/systemd/system/{{ openshift.common.service_type }}-master.service" + src: "{{ openshift.common.service_type }}-master.service" + register: create_master_unit_file + when: + - not openshift.common.is_containerized | bool + - (openshift.master.ha is not defined or not openshift.master.ha) | bool + - command: systemctl daemon-reload when: create_master_unit_file | changed diff --git a/roles/openshift_master/templates/atomic-openshift-master.service b/roles/openshift_master/templates/atomic-openshift-master.service new file mode 100644 index 000000000..02af4dd16 --- /dev/null +++ b/roles/openshift_master/templates/atomic-openshift-master.service @@ -0,0 +1,23 @@ +[Unit] +Description=Atomic OpenShift Master +Documentation=https://github.com/openshift/origin +After=network-online.target +After=etcd.service +Before=atomic-openshift-node.service +Requires=network-online.target + +[Service] +Type=notify +EnvironmentFile=/etc/sysconfig/atomic-openshift-master +Environment=GOTRACEBACK=crash +ExecStart=/usr/bin/openshift start master --config=${CONFIG_FILE} $OPTIONS +LimitNOFILE=131072 +LimitCORE=infinity +WorkingDirectory=/var/lib/origin/ +SyslogIdentifier=atomic-openshift-master +Restart=always +RestartSec=5s + +[Install] +WantedBy=multi-user.target +WantedBy=atomic-openshift-node.service diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 index 155abd970..897ee7285 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 @@ -4,9 +4,9 @@ Documentation=https://github.com/openshift/origin After=etcd_container.service Wants=etcd_container.service Before={{ openshift.common.service_type }}-node.service -After=docker.service -PartOf=docker.service -Requires=docker.service +After={{ openshift.docker.service_name }}.service +PartOf={{ openshift.docker.service_name }}.service +Requires={{ openshift.docker.service_name }}.service [Service] EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api @@ -23,5 +23,5 @@ Restart=always RestartSec=5s [Install] -WantedBy=docker.service +WantedBy={{ openshift.docker.service_name }}.service WantedBy={{ openshift.common.service_type }}-node.service diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 index 088e8db43..451f3436a 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 @@ -3,9 +3,9 @@ Description=Atomic OpenShift Master Controllers Documentation=https://github.com/openshift/origin Wants={{ openshift.common.service_type }}-master-api.service After={{ openshift.common.service_type }}-master-api.service -After=docker.service -Requires=docker.service -PartOf=docker.service +After={{ openshift.docker.service_name }}.service +Requires={{ openshift.docker.service_name }}.service +PartOf={{ openshift.docker.service_name }}.service [Service] EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers @@ -22,4 +22,4 @@ Restart=always RestartSec=5s [Install] -WantedBy=docker.service +WantedBy={{ openshift.docker.service_name }}.service diff --git a/roles/openshift_master/templates/master_docker/master.docker.service.j2 b/roles/openshift_master/templates/master_docker/master.docker.service.j2 index 13381cd1a..7f40cb042 100644 --- a/roles/openshift_master/templates/master_docker/master.docker.service.j2 +++ b/roles/openshift_master/templates/master_docker/master.docker.service.j2 @@ -1,7 +1,7 @@ [Unit] -After=docker.service -Requires=docker.service -PartOf=docker.service +After={{ openshift.docker.service_name }}.service +Requires={{ openshift.docker.service_name }}.service +PartOf={{ openshift.docker.service_name }}.service After=etcd_container.service Wants=etcd_container.service @@ -15,4 +15,4 @@ Restart=always RestartSec=5s [Install] -WantedBy=docker.service +WantedBy={{ openshift.docker.service_name }}.service diff --git a/roles/openshift_master/templates/origin-master.service b/roles/openshift_master/templates/origin-master.service new file mode 100644 index 000000000..cf79dda02 --- /dev/null +++ b/roles/openshift_master/templates/origin-master.service @@ -0,0 +1,23 @@ +[Unit] +Description=Origin Master Service +Documentation=https://github.com/openshift/origin +After=network-online.target +After=etcd.service +Before=origin-node.service +Requires=network-online.target + +[Service] +Type=notify +EnvironmentFile=/etc/sysconfig/origin-master +Environment=GOTRACEBACK=crash +ExecStart=/usr/bin/openshift start master --config=${CONFIG_FILE} $OPTIONS +LimitNOFILE=131072 +LimitCORE=infinity +WorkingDirectory=/var/lib/origin/ +SyslogIdentifier=origin-master +Restart=always +RestartSec=5s + +[Install] +WantedBy=multi-user.target +WantedBy=origin-node.service diff --git a/roles/openshift_metrics/README.md b/roles/openshift_metrics/README.md index f4c61a75e..84503217b 100644 --- a/roles/openshift_metrics/README.md +++ b/roles/openshift_metrics/README.md @@ -76,7 +76,7 @@ openshift_metrics_<COMPONENT>_(limits|requests)_(memory|cpu): <VALUE> ``` e.g ``` -openshift_metrics_cassandra_limits_memory: 1G +openshift_metrics_cassandra_limits_memory: 1Gi openshift_metrics_hawkular_requests_cpu: 100 ``` diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml index 52482d09b..a0fbf7dfc 100644 --- a/roles/openshift_node/tasks/systemd_units.yml +++ b/roles/openshift_node/tasks/systemd_units.yml @@ -25,6 +25,13 @@ - openshift.common.is_containerized | bool - not openshift.common.is_node_system_container | bool +- name: Install Node service file + template: + dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service" + src: "{{ openshift.common.service_type }}-node.service" + register: install_node_result + when: not openshift.common.is_containerized | bool + - name: Create the openvswitch service env file template: src: openvswitch.sysconfig.j2 @@ -115,6 +122,5 @@ - name: Reload systemd units command: systemctl daemon-reload - when: (openshift.common.is_containerized | bool and (install_node_result | changed or install_ovs_sysconfig | changed or install_node_dep_result | changed)) or install_oom_fix_result | changed notify: - restart node diff --git a/roles/openshift_node/templates/atomic-openshift-node.service b/roles/openshift_node/templates/atomic-openshift-node.service new file mode 100644 index 000000000..80232094a --- /dev/null +++ b/roles/openshift_node/templates/atomic-openshift-node.service @@ -0,0 +1,22 @@ +[Unit] +Description=Atomic OpenShift Node +After={{ openshift.docker.service_name }}.service +After=openvswitch.service +Wants={{ openshift.docker.service_name }}.service +Documentation=https://github.com/openshift/origin + +[Service] +Type=notify +EnvironmentFile=/etc/sysconfig/atomic-openshift-node +Environment=GOTRACEBACK=crash +ExecStart=/usr/bin/openshift start node --config=${CONFIG_FILE} $OPTIONS +LimitNOFILE=65536 +LimitCORE=infinity +WorkingDirectory=/var/lib/origin/ +SyslogIdentifier=atomic-openshift-node +Restart=always +RestartSec=5s +OOMScoreAdjust=-999 + +[Install] +WantedBy=multi-user.target diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service index 0fb34cffd..4c47f8c0d 100644 --- a/roles/openshift_node/templates/openshift.docker.node.dep.service +++ b/roles/openshift_node/templates/openshift.docker.node.dep.service @@ -1,6 +1,6 @@ [Unit] -Requires=docker.service -After=docker.service +Requires={{ openshift.docker.service_name }}.service +After={{ openshift.docker.service_name }}.service PartOf={{ openshift.common.service_type }}-node.service Before={{ openshift.common.service_type }}-node.service diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service index c42bdb7c3..06782cb8b 100644 --- a/roles/openshift_node/templates/openshift.docker.node.service +++ b/roles/openshift_node/templates/openshift.docker.node.service @@ -1,9 +1,9 @@ [Unit] After={{ openshift.common.service_type }}-master.service -After=docker.service +After={{ openshift.docker.service_name }}.service After=openvswitch.service -PartOf=docker.service -Requires=docker.service +PartOf={{ openshift.docker.service_name }}.service +Requires={{ openshift.docker.service_name }}.service {% if openshift.common.use_openshift_sdn %} Requires=openvswitch.service After=ovsdb-server.service @@ -25,4 +25,4 @@ Restart=always RestartSec=5s [Install] -WantedBy=docker.service +WantedBy={{ openshift.docker.service_name }}.service diff --git a/roles/openshift_node/templates/openvswitch.docker.service b/roles/openshift_node/templates/openvswitch.docker.service index 1e1f8967d..34aaaabd6 100644 --- a/roles/openshift_node/templates/openvswitch.docker.service +++ b/roles/openshift_node/templates/openvswitch.docker.service @@ -1,7 +1,7 @@ [Unit] -After=docker.service -Requires=docker.service -PartOf=docker.service +After={{ openshift.docker.service_name }}.service +Requires={{ openshift.docker.service_name }}.service +PartOf={{ openshift.docker.service_name }}.service [Service] EnvironmentFile=/etc/sysconfig/openvswitch @@ -14,4 +14,4 @@ Restart=always RestartSec=5s [Install] -WantedBy=docker.service +WantedBy={{ openshift.docker.service_name }}.service diff --git a/roles/openshift_node/templates/origin-node.service b/roles/openshift_node/templates/origin-node.service new file mode 100644 index 000000000..8047301e6 --- /dev/null +++ b/roles/openshift_node/templates/origin-node.service @@ -0,0 +1,21 @@ +[Unit] +Description=Origin Node +After={{ openshift.docker.service_name }}.service +Wants={{ openshift.docker.service_name }}.service +Documentation=https://github.com/openshift/origin + +[Service] +Type=notify +EnvironmentFile=/etc/sysconfig/origin-node +Environment=GOTRACEBACK=crash +ExecStart=/usr/bin/openshift start node --config=${CONFIG_FILE} $OPTIONS +LimitNOFILE=65536 +LimitCORE=infinity +WorkingDirectory=/var/lib/origin/ +SyslogIdentifier=origin-node +Restart=always +RestartSec=5s +OOMScoreAdjust=-999 + +[Install] +WantedBy=multi-user.target diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service index 0fb34cffd..4c47f8c0d 100644 --- a/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service +++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service @@ -1,6 +1,6 @@ [Unit] -Requires=docker.service -After=docker.service +Requires={{ openshift.docker.service_name }}.service +After={{ openshift.docker.service_name }}.service PartOf={{ openshift.common.service_type }}-node.service Before={{ openshift.common.service_type }}-node.service diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.service index 0ff398152..a9b393652 100644 --- a/roles/openshift_node_upgrade/templates/openshift.docker.node.service +++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.service @@ -1,9 +1,9 @@ [Unit] After={{ openshift.common.service_type }}-master.service -After=docker.service +After={{ openshift.docker.service_name }}.service After=openvswitch.service -PartOf=docker.service -Requires=docker.service +PartOf={{ openshift.docker.service_name }}.service +Requires={{ openshift.docker.service_name }}.service {% if openshift.common.use_openshift_sdn %} Requires=openvswitch.service {% endif %} @@ -23,4 +23,4 @@ Restart=always RestartSec=5s [Install] -WantedBy=docker.service +WantedBy={{ openshift.docker.service_name }}.service diff --git a/roles/openshift_node_upgrade/templates/openvswitch.docker.service b/roles/openshift_node_upgrade/templates/openvswitch.docker.service index 1e1f8967d..34aaaabd6 100644 --- a/roles/openshift_node_upgrade/templates/openvswitch.docker.service +++ b/roles/openshift_node_upgrade/templates/openvswitch.docker.service @@ -1,7 +1,7 @@ [Unit] -After=docker.service -Requires=docker.service -PartOf=docker.service +After={{ openshift.docker.service_name }}.service +Requires={{ openshift.docker.service_name }}.service +PartOf={{ openshift.docker.service_name }}.service [Service] EnvironmentFile=/etc/sysconfig/openvswitch @@ -14,4 +14,4 @@ Restart=always RestartSec=5s [Install] -WantedBy=docker.service +WantedBy={{ openshift.docker.service_name }}.service diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md index cf0fb94c9..7b310dbf8 100644 --- a/roles/openshift_storage_glusterfs/README.md +++ b/roles/openshift_storage_glusterfs/README.md @@ -8,10 +8,24 @@ Requirements * Ansible 2.2 +Host Groups +----------- + +The following group is expected to be populated for this role to run: + +* `[glusterfs]` + +Additionally, the following group may be specified either in addition to or +instead of the above group to deploy a GlusterFS cluster for use by a natively +hosted Docker registry: + +* `[glusterfs_registry]` + Role Variables -------------- -From this role: +This role has the following variables that control the integration of a +GlusterFS cluster into a new or existing OpenShift cluster: | Name | Default value | | |--------------------------------------------------|-------------------------|-----------------------------------------| @@ -31,6 +45,25 @@ From this role: | openshift_storage_glusterfs_heketi_url | Undefined | URL for the heketi REST API, dynamically determined in native mode | openshift_storage_glusterfs_heketi_wipe | False | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe` +Each role variable also has a corresponding variable to optionally configure a +separate GlusterFS cluster for use as storage for an integrated Docker +registry. These variables start with the prefix +`openshift_storage_glusterfs_registry_` and, for the most part, default to the +values in their corresponding non-registry variables. The following variables +are an exception: + +| Name | Default value | | +|---------------------------------------------------|-----------------------|-----------------------------------------| +| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'default' +| openshift_storage_glusterfs_registry_nodeselector | 'storagenode=registry'| This allows for the logical separation of the registry GlusterFS cluster from any regular-use GlusterFS clusters + +Additionally, this role's behavior responds to the following registry-specific +variable: + +| Name | Default value | Description | +|----------------------------------------------|---------------|------------------------------------------------------------------------------| +| openshift_hosted_registry_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume | + Dependencies ------------ @@ -47,6 +80,7 @@ Example Playbook hosts: oo_first_master roles: - role: openshift_storage_glusterfs + when: groups.oo_glusterfs_to_config | default([]) | count > 0 ``` License diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml index ade850747..ebe9ca30b 100644 --- a/roles/openshift_storage_glusterfs/defaults/main.yml +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -2,7 +2,7 @@ openshift_storage_glusterfs_timeout: 300 openshift_storage_glusterfs_namespace: 'default' openshift_storage_glusterfs_is_native: True -openshift_storage_glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector_label | default('storagenode=glusterfs') | map_from_pairs }}" +openshift_storage_glusterfs_nodeselector: 'storagenode=glusterfs' openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}" openshift_storage_glusterfs_version: 'latest' openshift_storage_glusterfs_wipe: False @@ -15,3 +15,22 @@ openshift_storage_glusterfs_heketi_admin_key: '' openshift_storage_glusterfs_heketi_user_key: '' openshift_storage_glusterfs_heketi_topology_load: True openshift_storage_glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_wipe }}" +openshift_storage_glusterfs_heketi_url: "{{ omit }}" + +openshift_storage_glusterfs_registry_timeout: "{{ openshift_storage_glusterfs_timeout }}" +openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" +openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}" +openshift_storage_glusterfs_registry_nodeselector: 'storagenode=registry' +openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}" +openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}" +openshift_storage_glusterfs_registry_wipe: "{{ openshift_storage_glusterfs_wipe }}" +openshift_storage_glusterfs_registry_heketi_is_native: "{{ openshift_storage_glusterfs_heketi_is_native }}" +openshift_storage_glusterfs_registry_heketi_is_missing: "{{ openshift_storage_glusterfs_heketi_is_missing }}" +openshift_storage_glusterfs_registry_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing }}" +openshift_storage_glusterfs_registry_heketi_image: "{{ openshift_storage_glusterfs_heketi_image }}" +openshift_storage_glusterfs_registry_heketi_version: "{{ openshift_storage_glusterfs_heketi_version }}" +openshift_storage_glusterfs_registry_heketi_admin_key: "{{ openshift_storage_glusterfs_heketi_admin_key }}" +openshift_storage_glusterfs_registry_heketi_user_key: "{{ openshift_storage_glusterfs_heketi_user_key }}" +openshift_storage_glusterfs_registry_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}" +openshift_storage_glusterfs_registry_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}" +openshift_storage_glusterfs_registry_heketi_url: "{{ openshift_storage_glusterfs_heketi_url | default(omit) }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml new file mode 100644 index 000000000..fa5fa2cb0 --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml @@ -0,0 +1,166 @@ +--- +- name: Verify target namespace exists + oc_project: + state: present + name: "{{ glusterfs_namespace }}" + when: glusterfs_is_native or glusterfs_heketi_is_native + +- include: glusterfs_deploy.yml + when: glusterfs_is_native + +- name: Make sure heketi-client is installed + package: name=heketi-client state=present + +- name: Delete pre-existing heketi resources + oc_obj: + namespace: "{{ glusterfs_namespace }}" + kind: "{{ item.kind }}" + name: "{{ item.name | default(omit) }}" + selector: "{{ item.selector | default(omit) }}" + state: absent + with_items: + - kind: "template,route,service,dc,jobs,secret" + selector: "deploy-heketi" + - kind: "template,route,service,dc" + name: "heketi" + - kind: "svc,ep" + name: "heketi-storage-endpoints" + - kind: "sa" + name: "heketi-service-account" + failed_when: False + when: glusterfs_heketi_wipe + +- name: Wait for deploy-heketi pods to terminate + oc_obj: + namespace: "{{ glusterfs_namespace }}" + kind: pod + state: list + selector: "glusterfs=deploy-heketi-pod" + register: heketi_pod + until: "heketi_pod.results.results[0]['items'] | count == 0" + delay: 10 + retries: "{{ (glusterfs_timeout / 10) | int }}" + when: glusterfs_heketi_wipe + +- name: Wait for heketi pods to terminate + oc_obj: + namespace: "{{ glusterfs_namespace }}" + kind: pod + state: list + selector: "glusterfs=heketi-pod" + register: heketi_pod + until: "heketi_pod.results.results[0]['items'] | count == 0" + delay: 10 + retries: "{{ (glusterfs_timeout / 10) | int }}" + when: glusterfs_heketi_wipe + +- name: Create heketi service account + oc_serviceaccount: + namespace: "{{ glusterfs_namespace }}" + name: heketi-service-account + state: present + when: glusterfs_heketi_is_native + +- name: Add heketi service account to privileged SCC + oc_adm_policy_user: + user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-service-account" + resource_kind: scc + resource_name: privileged + state: present + when: glusterfs_heketi_is_native + +- name: Allow heketi service account to view/edit pods + oc_adm_policy_user: + user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-service-account" + resource_kind: role + resource_name: edit + state: present + when: glusterfs_heketi_is_native + +- name: Check for existing deploy-heketi pod + oc_obj: + namespace: "{{ glusterfs_namespace }}" + state: list + kind: pod + selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support" + register: heketi_pod + when: glusterfs_heketi_is_native + +- name: Check if need to deploy deploy-heketi + set_fact: + glusterfs_heketi_deploy_is_missing: False + when: + - "glusterfs_heketi_is_native" + - "heketi_pod.results.results[0]['items'] | count > 0" + # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True + - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" + +- name: Check for existing heketi pod + oc_obj: + namespace: "{{ glusterfs_namespace }}" + state: list + kind: pod + selector: "glusterfs=heketi-pod" + register: heketi_pod + when: glusterfs_heketi_is_native + +- name: Check if need to deploy heketi + set_fact: + glusterfs_heketi_is_missing: False + when: + - "glusterfs_heketi_is_native" + - "heketi_pod.results.results[0]['items'] | count > 0" + # heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True + - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" + +- include: heketi_deploy_part1.yml + when: + - glusterfs_heketi_is_native + - glusterfs_heketi_deploy_is_missing + - glusterfs_heketi_is_missing + +- name: Determine heketi URL + oc_obj: + namespace: "{{ glusterfs_namespace }}" + state: list + kind: ep + selector: "glusterfs in (deploy-heketi-service, heketi-service)" + register: heketi_url + until: + - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''" + - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''" + delay: 10 + retries: "{{ (glusterfs_timeout / 10) | int }}" + when: + - glusterfs_heketi_is_native + - glusterfs_heketi_url is undefined + +- name: Set heketi URL + set_fact: + glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}" + when: + - glusterfs_heketi_is_native + - glusterfs_heketi_url is undefined + +- name: Verify heketi service + command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' cluster list" + changed_when: False + +- name: Generate topology file + template: + src: "{{ openshift.common.examples_content_version }}/topology.json.j2" + dest: "{{ mktemp.stdout }}/topology.json" + when: + - glusterfs_heketi_topology_load + +- name: Load heketi topology + command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' topology load --json={{ mktemp.stdout }}/topology.json 2>&1" + register: topology_load + failed_when: "topology_load.rc != 0 or 'Unable' in topology_load.stdout" + when: + - glusterfs_heketi_topology_load + +- include: heketi_deploy_part2.yml + when: + - glusterfs_heketi_is_native + - glusterfs_heketi_is_missing diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml new file mode 100644 index 000000000..451990240 --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml @@ -0,0 +1,22 @@ +--- +- set_fact: + glusterfs_timeout: "{{ openshift_storage_glusterfs_timeout }}" + glusterfs_namespace: "{{ openshift_storage_glusterfs_namespace }}" + glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native }}" + glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | map_from_pairs }}" + glusterfs_image: "{{ openshift_storage_glusterfs_image }}" + glusterfs_version: "{{ openshift_storage_glusterfs_version }}" + glusterfs_wipe: "{{ openshift_storage_glusterfs_wipe }}" + glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_heketi_is_native }}" + glusterfs_heketi_is_missing: "{{ openshift_storage_glusterfs_heketi_is_missing }}" + glusterfs_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing }}" + glusterfs_heketi_image: "{{ openshift_storage_glusterfs_heketi_image }}" + glusterfs_heketi_version: "{{ openshift_storage_glusterfs_heketi_version }}" + glusterfs_heketi_admin_key: "{{ openshift_storage_glusterfs_heketi_admin_key }}" + glusterfs_heketi_user_key: "{{ openshift_storage_glusterfs_heketi_user_key }}" + glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}" + glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}" + glusterfs_heketi_url: "{{ openshift_storage_glusterfs_heketi_url }}" + glusterfs_nodes: "{{ g_glusterfs_hosts }}" + +- include: glusterfs_common.yml diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml index 2b35e5137..579112349 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml @@ -1,44 +1,44 @@ --- - assert: - that: "openshift_storage_glusterfs_nodeselector.keys() | count == 1" + that: "glusterfs_nodeselector.keys() | count == 1" msg: Only one GlusterFS nodeselector key pair should be provided - assert: - that: "groups.oo_glusterfs_to_config | count >= 3" + that: "glusterfs_nodes | count >= 3" msg: There must be at least three GlusterFS nodes specified - name: Delete pre-existing GlusterFS resources oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: "template,daemonset" name: glusterfs state: absent - when: openshift_storage_glusterfs_wipe + when: glusterfs_wipe - name: Unlabel any existing GlusterFS nodes oc_label: name: "{{ item }}" kind: node state: absent - labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}" + labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}" with_items: "{{ groups.all }}" - when: openshift_storage_glusterfs_wipe + when: glusterfs_wipe - name: Delete pre-existing GlusterFS config file: path: /var/lib/glusterd state: absent delegate_to: "{{ item }}" - with_items: "{{ groups.oo_glusterfs_to_config | default([]) }}" - when: openshift_storage_glusterfs_wipe + with_items: "{{ glusterfs_nodes | default([]) }}" + when: glusterfs_wipe - name: Get GlusterFS storage devices state command: "pvdisplay -C --noheadings -o pv_name,vg_name {% for device in hostvars[item].glusterfs_devices %}{{ device }} {% endfor %}" register: devices_info delegate_to: "{{ item }}" - with_items: "{{ groups.oo_glusterfs_to_config | default([]) }}" + with_items: "{{ glusterfs_nodes | default([]) }}" failed_when: False - when: openshift_storage_glusterfs_wipe + when: glusterfs_wipe # Runs "vgremove -fy <vg>; pvremove -fy <pv>" for every device found to be a physical volume. - name: Clear GlusterFS storage device contents @@ -46,12 +46,12 @@ delegate_to: "{{ item.item }}" with_items: "{{ devices_info.results }}" when: - - openshift_storage_glusterfs_wipe + - glusterfs_wipe - item.stdout_lines | count > 0 - name: Add service accounts to privileged SCC oc_adm_policy_user: - user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:{{ item }}" + user: "system:serviceaccount:{{ glusterfs_namespace }}:{{ item }}" resource_kind: scc resource_name: privileged state: present @@ -64,8 +64,8 @@ name: "{{ glusterfs_host }}" kind: node state: add - labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}" - with_items: "{{ groups.oo_glusterfs_to_config | default([]) }}" + labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}" + with_items: "{{ glusterfs_nodes | default([]) }}" loop_control: loop_var: glusterfs_host @@ -76,7 +76,7 @@ - name: Create GlusterFS template oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: template name: glusterfs state: present @@ -85,16 +85,16 @@ - name: Deploy GlusterFS pods oc_process: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" template_name: "glusterfs" create: True params: - IMAGE_NAME: "{{ openshift_storage_glusterfs_image }}" - IMAGE_VERSION: "{{ openshift_storage_glusterfs_version }}" + IMAGE_NAME: "{{ glusterfs_image }}" + IMAGE_VERSION: "{{ glusterfs_version }}" - name: Wait for GlusterFS pods oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: pod state: list selector: "glusterfs-node=pod" @@ -102,6 +102,6 @@ until: - "glusterfs_pods.results.results[0]['items'] | count > 0" # There must be as many pods with 'Ready' staus True as there are nodes expecting those pods - - "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == groups.oo_glusterfs_to_config | count" + - "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == glusterfs_nodes | count" delay: 10 - retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + retries: "{{ (glusterfs_timeout / 10) | int }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml index 6d02d2090..392f4b65b 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml @@ -1,7 +1,30 @@ --- +- set_fact: + glusterfs_timeout: "{{ openshift_storage_glusterfs_registry_timeout }}" + glusterfs_namespace: "{{ openshift_storage_glusterfs_registry_namespace }}" + glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native }}" + glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | map_from_pairs }}" + glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}" + glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}" + glusterfs_wipe: "{{ openshift_storage_glusterfs_registry_wipe }}" + glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_registry_heketi_is_native }}" + glusterfs_heketi_is_missing: "{{ openshift_storage_glusterfs_registry_heketi_is_missing }}" + glusterfs_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_registry_heketi_deploy_is_missing }}" + glusterfs_heketi_image: "{{ openshift_storage_glusterfs_registry_heketi_image }}" + glusterfs_heketi_version: "{{ openshift_storage_glusterfs_registry_heketi_version }}" + glusterfs_heketi_admin_key: "{{ openshift_storage_glusterfs_registry_heketi_admin_key }}" + glusterfs_heketi_user_key: "{{ openshift_storage_glusterfs_registry_heketi_user_key }}" + glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_registry_heketi_topology_load }}" + glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_registry_heketi_wipe }}" + glusterfs_heketi_url: "{{ openshift_storage_glusterfs_registry_heketi_url }}" + glusterfs_nodes: "{{ g_glusterfs_registry_hosts }}" + +- include: glusterfs_common.yml + when: g_glusterfs_registry_hosts != g_glusterfs_hosts + - name: Delete pre-existing GlusterFS registry resources oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: "{{ item.kind }}" name: "{{ item.name | default(omit) }}" selector: "{{ item.selector | default(omit) }}" @@ -23,7 +46,7 @@ - name: Create GlusterFS registry endpoints oc_obj: - namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" + namespace: "{{ glusterfs_namespace }}" state: present kind: endpoints name: glusterfs-registry-endpoints @@ -32,7 +55,7 @@ - name: Create GlusterFS registry service oc_obj: - namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" + namespace: "{{ glusterfs_namespace }}" state: present kind: service name: glusterfs-registry-endpoints @@ -40,9 +63,9 @@ - "{{ mktemp.stdout }}/glusterfs-registry-service.yml" - name: Check if GlusterFS registry volume exists - command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume list" + command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' volume list" register: registry_volume - name: Create GlusterFS registry volume - command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}" - when: "'openshift.hosted.registry.storage.glusterfs.path' not in registry_volume.stdout" + command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}" + when: "openshift.hosted.registry.storage.glusterfs.path not in registry_volume.stdout" diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml index 76ae1db75..c14fcfb15 100644 --- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml @@ -8,7 +8,7 @@ - name: Create deploy-heketi resources oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: template name: deploy-heketi state: present @@ -17,18 +17,18 @@ - name: Deploy deploy-heketi pod oc_process: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" template_name: "deploy-heketi" create: True params: - IMAGE_NAME: "{{ openshift_storage_glusterfs_heketi_image }}" - IMAGE_VERSION: "{{ openshift_storage_glusterfs_heketi_version }}" - HEKETI_USER_KEY: "{{ openshift_storage_glusterfs_heketi_user_key }}" - HEKETI_ADMIN_KEY: "{{ openshift_storage_glusterfs_heketi_admin_key }}" + IMAGE_NAME: "{{ glusterfs_heketi_image }}" + IMAGE_VERSION: "{{ glusterfs_heketi_version }}" + HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}" + HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}" - name: Wait for deploy-heketi pod oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: pod state: list selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support" @@ -38,4 +38,4 @@ # Pod's 'Ready' status must be True - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" delay: 10 - retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + retries: "{{ (glusterfs_timeout / 10) | int }}" diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml index 778b5a673..64410a9ab 100644 --- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml @@ -1,6 +1,6 @@ --- - name: Create heketi DB volume - command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' setup-openshift-heketi-storage --listfile {{ mktemp.stdout }}/heketi-storage.json" + command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' setup-openshift-heketi-storage --listfile {{ mktemp.stdout }}/heketi-storage.json" register: setup_storage failed_when: False @@ -13,12 +13,12 @@ # Need `command` here because heketi-storage.json contains multiple objects. - name: Copy heketi DB to GlusterFS volume - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ openshift_storage_glusterfs_namespace }}" + command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ glusterfs_namespace }}" when: setup_storage.rc == 0 - name: Wait for copy job to finish oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: job state: list name: "heketi-storage-copy-job" @@ -28,7 +28,7 @@ # Pod's 'Complete' status must be True - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1" delay: 10 - retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + retries: "{{ (glusterfs_timeout / 10) | int }}" failed_when: - "'results' in heketi_job.results" - "heketi_job.results.results | count > 0" @@ -38,7 +38,7 @@ - name: Delete deploy resources oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: "{{ item.kind }}" name: "{{ item.name | default(omit) }}" selector: "{{ item.selector | default(omit) }}" @@ -55,7 +55,7 @@ - name: Create heketi resources oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: template name: heketi state: present @@ -64,18 +64,18 @@ - name: Deploy heketi pod oc_process: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" template_name: "heketi" create: True params: - IMAGE_NAME: "{{ openshift_storage_glusterfs_heketi_image }}" - IMAGE_VERSION: "{{ openshift_storage_glusterfs_heketi_version }}" - HEKETI_USER_KEY: "{{ openshift_storage_glusterfs_heketi_user_key }}" - HEKETI_ADMIN_KEY: "{{ openshift_storage_glusterfs_heketi_admin_key }}" + IMAGE_NAME: "{{ glusterfs_heketi_image }}" + IMAGE_VERSION: "{{ glusterfs_heketi_version }}" + HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}" + HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}" - name: Wait for heketi pod oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: pod state: list selector: "glusterfs=heketi-pod" @@ -85,11 +85,11 @@ # Pod's 'Ready' status must be True - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" delay: 10 - retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + retries: "{{ (glusterfs_timeout / 10) | int }}" - name: Determine heketi URL oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" state: list kind: ep selector: "glusterfs=heketi-service" @@ -98,12 +98,12 @@ - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''" - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''" delay: 10 - retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + retries: "{{ (glusterfs_timeout / 10) | int }}" - name: Set heketi URL set_fact: - openshift_storage_glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}" + glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}" - name: Verify heketi service - command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' cluster list" + command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' cluster list" changed_when: False diff --git a/roles/openshift_storage_glusterfs/tasks/main.yml b/roles/openshift_storage_glusterfs/tasks/main.yml index 71c4a2732..ebd8db453 100644 --- a/roles/openshift_storage_glusterfs/tasks/main.yml +++ b/roles/openshift_storage_glusterfs/tasks/main.yml @@ -5,174 +5,14 @@ changed_when: False check_mode: no -- name: Verify target namespace exists - oc_project: - state: present - name: "{{ openshift_storage_glusterfs_namespace }}" - when: openshift_storage_glusterfs_is_native or openshift_storage_glusterfs_heketi_is_native - -- include: glusterfs_deploy.yml - when: openshift_storage_glusterfs_is_native - -- name: Make sure heketi-client is installed - package: name=heketi-client state=present - -- name: Delete pre-existing heketi resources - oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" - kind: "{{ item.kind }}" - name: "{{ item.name | default(omit) }}" - selector: "{{ item.selector | default(omit) }}" - state: absent - with_items: - - kind: "template,route,service,jobs,dc,secret" - selector: "deploy-heketi" - - kind: "template,route,dc,service" - name: "heketi" - - kind: "svc,ep" - name: "heketi-storage-endpoints" - - kind: "sa" - name: "heketi-service-account" - failed_when: False - when: openshift_storage_glusterfs_heketi_wipe - -- name: Wait for deploy-heketi pods to terminate - oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" - kind: pod - state: list - selector: "glusterfs=deploy-heketi-pod" - register: heketi_pod - until: "heketi_pod.results.results[0]['items'] | count == 0" - delay: 10 - retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" - when: openshift_storage_glusterfs_heketi_wipe - -- name: Wait for heketi pods to terminate - oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" - kind: pod - state: list - selector: "glusterfs=heketi-pod" - register: heketi_pod - until: "heketi_pod.results.results[0]['items'] | count == 0" - delay: 10 - retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" - when: openshift_storage_glusterfs_heketi_wipe - -- name: Create heketi service account - oc_serviceaccount: - namespace: "{{ openshift_storage_glusterfs_namespace }}" - name: heketi-service-account - state: present - when: openshift_storage_glusterfs_heketi_is_native - -- name: Add heketi service account to privileged SCC - oc_adm_policy_user: - user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:heketi-service-account" - resource_kind: scc - resource_name: privileged - state: present - when: openshift_storage_glusterfs_heketi_is_native - -- name: Allow heketi service account to view/edit pods - oc_adm_policy_user: - user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:heketi-service-account" - resource_kind: role - resource_name: edit - state: present - when: openshift_storage_glusterfs_heketi_is_native - -- name: Check for existing deploy-heketi pod - oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" - state: list - kind: pod - selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support" - register: heketi_pod - when: openshift_storage_glusterfs_heketi_is_native - -- name: Check if need to deploy deploy-heketi - set_fact: - openshift_storage_glusterfs_heketi_deploy_is_missing: False - when: - - "openshift_storage_glusterfs_heketi_is_native" - - "heketi_pod.results.results[0]['items'] | count > 0" - # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True - - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" - -- name: Check for existing heketi pod - oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" - state: list - kind: pod - selector: "glusterfs=heketi-pod" - register: heketi_pod - when: openshift_storage_glusterfs_heketi_is_native - -- name: Check if need to deploy heketi - set_fact: - openshift_storage_glusterfs_heketi_is_missing: False +- include: glusterfs_config.yml when: - - "openshift_storage_glusterfs_heketi_is_native" - - "heketi_pod.results.results[0]['items'] | count > 0" - # heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True - - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" - -- include: heketi_deploy_part1.yml - when: - - openshift_storage_glusterfs_heketi_is_native - - openshift_storage_glusterfs_heketi_deploy_is_missing - - openshift_storage_glusterfs_heketi_is_missing - -- name: Determine heketi URL - oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" - state: list - kind: ep - selector: "glusterfs in (deploy-heketi-service, heketi-service)" - register: heketi_url - until: - - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''" - - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''" - delay: 10 - retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" - when: - - openshift_storage_glusterfs_heketi_is_native - - openshift_storage_glusterfs_heketi_url is undefined - -- name: Set heketi URL - set_fact: - openshift_storage_glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}" - when: - - openshift_storage_glusterfs_heketi_is_native - - openshift_storage_glusterfs_heketi_url is undefined - -- name: Verify heketi service - command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' cluster list" - changed_when: False - -- name: Generate topology file - template: - src: "{{ openshift.common.examples_content_version }}/topology.json.j2" - dest: "{{ mktemp.stdout }}/topology.json" - when: - - openshift_storage_glusterfs_is_native - - openshift_storage_glusterfs_heketi_topology_load - -- name: Load heketi topology - command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' topology load --json={{ mktemp.stdout }}/topology.json 2>&1" - register: topology_load - failed_when: topology_load.rc != 0 or 'Unable' in topology_load.stdout - when: - - openshift_storage_glusterfs_is_native - - openshift_storage_glusterfs_heketi_topology_load - -- include: heketi_deploy_part2.yml - when: openshift_storage_glusterfs_heketi_is_native and openshift_storage_glusterfs_heketi_is_missing + - g_glusterfs_hosts | default([]) | count > 0 - include: glusterfs_registry.yml - when: openshift.hosted.registry.storage.kind == 'glusterfs' + when: + - g_glusterfs_registry_hosts | default([]) | count > 0 + - "openshift.hosted.registry.storage.kind == 'glusterfs' or openshift.hosted.registry.glusterfs.swap" - name: Delete temp directory file: diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2 index d72d085c9..605627ab5 100644 --- a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2 +++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2 @@ -4,7 +4,7 @@ metadata: name: glusterfs-registry-endpoints subsets: - addresses: -{% for node in groups.oo_glusterfs_to_config %} +{% for node in glusterfs_nodes %} - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }} {% endfor %} ports: diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2 index eb5b4544f..33d8f9b36 100644 --- a/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2 +++ b/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2 @@ -1,7 +1,7 @@ { "clusters": [ {%- set clusters = {} -%} -{%- for node in groups.oo_glusterfs_to_config -%} +{%- for node in glusterfs_nodes -%} {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%} {%- if cluster in clusters -%} {%- set _dummy = clusters[cluster].append(node) -%} diff --git a/roles/os_firewall/README.md b/roles/os_firewall/README.md index 43db3cc74..e7ef544f4 100644 --- a/roles/os_firewall/README.md +++ b/roles/os_firewall/README.md @@ -17,7 +17,7 @@ Role Variables | Name | Default | | |---------------------------|---------|----------------------------------------| -| os_firewall_use_firewalld | True | If false, use iptables | +| os_firewall_use_firewalld | False | If false, use iptables | | os_firewall_allow | [] | List of service,port mappings to allow | | os_firewall_deny | [] | List of service, port mappings to deny | diff --git a/roles/os_firewall/defaults/main.yml b/roles/os_firewall/defaults/main.yml index 4c544122f..01859e5fc 100644 --- a/roles/os_firewall/defaults/main.yml +++ b/roles/os_firewall/defaults/main.yml @@ -2,6 +2,6 @@ os_firewall_enabled: True # firewalld is not supported on Atomic Host # https://bugzilla.redhat.com/show_bug.cgi?id=1403331 -os_firewall_use_firewalld: "{{ False if openshift.common.is_atomic | bool else True }}" +os_firewall_use_firewalld: "{{ False }}" os_firewall_allow: [] os_firewall_deny: [] |