diff options
author | OpenShift Bot <eparis+openshiftbot@redhat.com> | 2017-05-10 11:02:00 -0500 |
---|---|---|
committer | GitHub <noreply@github.com> | 2017-05-10 11:02:00 -0500 |
commit | c8e8f8f7b506de67c0beb92534b303451938aeba (patch) | |
tree | 79617833ce7ec83925473af9b5c2de6b227476e9 | |
parent | 5a4365e765e16a4401d10f0bd42a7d3e194d4ab0 (diff) | |
parent | 483870112d3a079709b231f1128b979fc5e60c52 (diff) | |
download | openshift-c8e8f8f7b506de67c0beb92534b303451938aeba.tar.gz openshift-c8e8f8f7b506de67c0beb92534b303451938aeba.tar.bz2 openshift-c8e8f8f7b506de67c0beb92534b303451938aeba.tar.xz openshift-c8e8f8f7b506de67c0beb92534b303451938aeba.zip |
Merge pull request #3969 from jarrpa/glusterfs-registry-too
Merged by openshift-bot
28 files changed, 607 insertions, 233 deletions
diff --git a/inventory/byo/hosts.byo.native-glusterfs.example b/inventory/byo/hosts.byo.native-glusterfs.example new file mode 100644 index 000000000..2dbb57d40 --- /dev/null +++ b/inventory/byo/hosts.byo.native-glusterfs.example @@ -0,0 +1,51 @@ +# This is an example of a bring your own (byo) host inventory for a cluster +# with natively hosted, containerized GlusterFS storage. +# +# This inventory may be used with the byo/config.yml playbook to deploy a new +# cluster with GlusterFS storage, which will use that storage to create a +# volume that will provide backend storage for a hosted Docker registry. +# +# This inventory may also be used with byo/openshift-glusterfs/config.yml to +# deploy GlusterFS storage on an existing cluster. With this playbook, the +# registry backend volume will be created but the administrator must then +# either deploy a hosted registry or change an existing hosted registry to use +# that volume. +# +# There are additional configuration parameters that can be specified to +# control the deployment and state of a GlusterFS cluster. Please see the +# documentation in playbooks/byo/openshift-glusterfs/README.md and +# roles/openshift_storage_glusterfs/README.md for additional details. + +[OSEv3:children] +masters +nodes +# Specify there will be GlusterFS nodes +glusterfs + +[OSEv3:vars] +ansible_ssh_user=root +deployment_type=origin +# Specify that we want to use GlusterFS storage for a hosted registry +openshift_hosted_registry_storage_kind=glusterfs + +[masters] +master node=True storage=True master=True + +[nodes] +master node=True storage=True master=True openshift_schedulable=False +# A hosted registry, by default, will only be deployed on nodes labeled +# "region=infra". +node0 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True +node1 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True +node2 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True + +# Specify the glusterfs group, which contains the nodes that will host +# GlusterFS storage pods. At a minimum, each node must have a +# "glusterfs_devices" variable defined. This variable is a list of block +# devices the node will have access to that is intended solely for use as +# GlusterFS storage. These block devices must be bare (e.g. have no data, not +# be marked as LVM PVs), and will be formatted. +[glusterfs] +node0 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' +node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' +node2 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example index cb878036d..6ec8b9317 100644 --- a/inventory/byo/hosts.origin.example +++ b/inventory/byo/hosts.origin.example @@ -438,9 +438,6 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57 #openshift_hosted_registry_storage_volume_size=10Gi # -# Native GlusterFS Registry Storage -#openshift_hosted_registry_storage_kind=glusterfs -# # AWS S3 # S3 bucket must already exist. #openshift_hosted_registry_storage_kind=object diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example index 06635d69c..05945f586 100644 --- a/inventory/byo/hosts.ose.example +++ b/inventory/byo/hosts.ose.example @@ -438,9 +438,6 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57 #openshift_hosted_registry_storage_volume_size=10Gi # -# Native GlusterFS Registry Storage -#openshift_hosted_registry_storage_kind=glusterfs -# # AWS S3 # # S3 bucket must already exist. diff --git a/playbooks/byo/openshift-cluster/cluster_hosts.yml b/playbooks/byo/openshift-cluster/cluster_hosts.yml index 268a65415..9d086b7b6 100644 --- a/playbooks/byo/openshift-cluster/cluster_hosts.yml +++ b/playbooks/byo/openshift-cluster/cluster_hosts.yml @@ -15,6 +15,8 @@ g_nfs_hosts: "{{ groups.nfs | default([]) }}" g_glusterfs_hosts: "{{ groups.glusterfs | default([]) }}" +g_glusterfs_registry_hosts: "{{ groups.glusterfs_registry | default(g_glusterfs_hosts) }}" + g_all_hosts: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts) | union(g_lb_hosts) | union(g_nfs_hosts) | union(g_new_node_hosts)| union(g_new_master_hosts) diff --git a/playbooks/byo/openshift-glusterfs/README.md b/playbooks/byo/openshift-glusterfs/README.md new file mode 100644 index 000000000..f62aea229 --- /dev/null +++ b/playbooks/byo/openshift-glusterfs/README.md @@ -0,0 +1,98 @@ +# OpenShift GlusterFS Playbooks + +These playbooks are intended to enable the use of GlusterFS volumes by pods in +OpenShift. While they try to provide a sane set of defaults they do cover a +variety of scenarios and configurations, so read carefully. :) + +## Playbook: config.yml + +This is the main playbook that integrates GlusterFS into a new or existing +OpenShift cluster. It will also, if specified, configure a hosted Docker +registry with GlusterFS backend storage. + +This playbook requires the `glusterfs` group to exist in the Ansible inventory +file. The hosts in this group are the nodes of the GlusterFS cluster. + + * If this is a newly configured cluster each host must have a + `glusterfs_devices` variable defined, each of which must be a list of block + storage devices intended for use only by the GlusterFS cluster. If this is + also an external GlusterFS cluster, you must specify + `openshift_storage_glusterfs_is_native=False`. If the cluster is to be + managed by an external heketi service you must also specify + `openshift_storage_glusterfs_heketi_is_native=False` and + `openshift_storage_glusterfs_heketi_url=<URL>` with the URL to the heketi + service. All these variables are specified in `[OSEv3:vars]`, + * If this is an existing cluster you do not need to specify a list of block + devices but you must specify the following variables in `[OSEv3:vars]`: + * `openshift_storage_glusterfs_is_missing=False` + * `openshift_storage_glusterfs_heketi_is_missing=False` + +By default, pods for a native GlusterFS cluster will be created in the +`default` namespace. To change this, specify +`openshift_storage_glusterfs_namespace=<other namespace>` in `[OSEv3:vars]`. + +To configure the deployment of a Docker registry with GlusterFS backend +storage, specify `openshift_hosted_registry_storage_kind=glusterfs` in +`[OSEv3:vars]`. To create a separate GlusterFS cluster for use only by the +registry, specify a `glusterfs_registry` group that is populated as the +`glusterfs` is with the nodes for the separate cluster. If no +`glusterfs_registry` group is specified, the cluster defined by the `glusterfs` +group will be used. + +To swap an existing hosted registry's backend storage for a GlusterFS volume, +specify `openshift_hosted_registry_storage_glusterfs_swap=True`. To +additoinally copy any existing contents from an existing hosted registry, +specify `openshift_hosted_registry_storage_glusterfs_swapcopy=True`. + +**NOTE:** For each namespace that is to have access to GlusterFS volumes an +Enpoints resource pointing to the GlusterFS cluster nodes and a corresponding +Service resource must be created. If dynamic provisioning using StorageClasses +is configure, these resources are created automatically in the namespaces that +require them. This playbook also takes care of creating these resources in the +namespaces used for deployment. + +An example of a minimal inventory file: +``` +[OSEv3:children] +masters +nodes +glusterfs + +[OSEv3:vars] +ansible_ssh_user=root +deployment_type=origin + +[masters] +master + +[nodes] +node0 +node1 +node2 + +[glusterfs] +node0 glusterfs_devices='[ "/dev/sdb" ]' +node1 glusterfs_devices='[ "/dev/sdb", "/dev/sdc" ]' +node2 glusterfs_devices='[ "/dev/sdd" ]' +``` + +## Playbook: registry.yml + +This playbook is intended for admins who want to deploy a hosted Docker +registry with GlusterFS backend storage on an existing OpenShift cluster. It +has all the same requirements and behaviors as `config.yml`. + +## Role: openshift_storage_glusterfs + +The bulk of the work is done by the `openshift_storage_glusterfs` role. This +role can handle the deployment of GlusterFS (if it is to be hosted on the +OpenShift cluster), the registration of GlusterFS nodes (hosted or standalone), +and (if specified) integration as backend storage for a hosted Docker registry. + +See the documentation in the role's directory for further details. + +## Role: openshift_hosted + +The `openshift_hosted` role recognizes `glusterfs` as a possible storage +backend for a hosted docker registry. It will also, if configured, handle the +swap of an existing registry's backend storage to a GlusterFS volume. diff --git a/playbooks/byo/openshift-glusterfs/config.yml b/playbooks/byo/openshift-glusterfs/config.yml new file mode 100644 index 000000000..3f11f3991 --- /dev/null +++ b/playbooks/byo/openshift-glusterfs/config.yml @@ -0,0 +1,10 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-cluster/std_include.yml + tags: + - always + +- include: ../../common/openshift-glusterfs/config.yml diff --git a/playbooks/byo/openshift-glusterfs/filter_plugins b/playbooks/byo/openshift-glusterfs/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/byo/openshift-glusterfs/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/byo/openshift-glusterfs/lookup_plugins b/playbooks/byo/openshift-glusterfs/lookup_plugins new file mode 120000 index 000000000..ac79701db --- /dev/null +++ b/playbooks/byo/openshift-glusterfs/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/byo/openshift-glusterfs/registry.yml b/playbooks/byo/openshift-glusterfs/registry.yml new file mode 100644 index 000000000..6ee6febdb --- /dev/null +++ b/playbooks/byo/openshift-glusterfs/registry.yml @@ -0,0 +1,10 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-cluster/std_include.yml + tags: + - always + +- include: ../../common/openshift-glusterfs/registry.yml diff --git a/playbooks/byo/openshift-glusterfs/roles b/playbooks/byo/openshift-glusterfs/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/byo/openshift-glusterfs/roles @@ -0,0 +1 @@ +../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml index 17a177644..46932b27f 100644 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -155,5 +155,5 @@ groups: oo_glusterfs_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_glusterfs_hosts | default([]) }}" + with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts) | default([]) }}" changed_when: no diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml index 75faf5ba8..1efdfb336 100644 --- a/playbooks/common/openshift-glusterfs/config.yml +++ b/playbooks/common/openshift-glusterfs/config.yml @@ -12,7 +12,9 @@ - service: glusterfs_bricks port: "49152-49251/tcp" roles: - - os_firewall + - role: os_firewall + when: + - openshift_storage_glusterfs_is_native | default(True) - name: Configure GlusterFS hosts: oo_first_master diff --git a/playbooks/common/openshift-glusterfs/registry.yml b/playbooks/common/openshift-glusterfs/registry.yml new file mode 100644 index 000000000..80cf7529e --- /dev/null +++ b/playbooks/common/openshift-glusterfs/registry.yml @@ -0,0 +1,49 @@ +--- +- include: config.yml + +- name: Initialize GlusterFS registry PV and PVC vars + hosts: oo_first_master + tags: hosted + tasks: + - set_fact: + glusterfs_pv: [] + glusterfs_pvc: [] + + - set_fact: + glusterfs_pv: + - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-volume" + capacity: "{{ openshift.hosted.registry.storage.volume.size }}" + access_modes: "{{ openshift.hosted.registry.storage.access.modes }}" + storage: + glusterfs: + endpoints: "{{ openshift.hosted.registry.storage.glusterfs.endpoints }}" + path: "{{ openshift.hosted.registry.storage.glusterfs.path }}" + readOnly: "{{ openshift.hosted.registry.storage.glusterfs.readOnly }}" + glusterfs_pvc: + - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim" + capacity: "{{ openshift.hosted.registry.storage.volume.size }}" + access_modes: "{{ openshift.hosted.registry.storage.access.modes }}" + when: openshift.hosted.registry.storage.glusterfs.swap + +- name: Create persistent volumes + hosts: oo_first_master + tags: + - hosted + vars: + persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups, glusterfs_pv) }}" + persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims(glusterfs_pvc) }}" + roles: + - role: openshift_persistent_volumes + when: persistent_volumes | union(glusterfs_pv) | length > 0 or persistent_volume_claims | union(glusterfs_pvc) | length > 0 + +- name: Create Hosted Resources + hosts: oo_first_master + tags: + - hosted + pre_tasks: + - set_fact: + openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" + openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" + when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master" + roles: + - role: openshift_hosted diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 911d72412..914e46c05 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -2167,7 +2167,9 @@ class OpenShiftFacts(object): glusterfs=dict( endpoints='glusterfs-registry-endpoints', path='glusterfs-registry-volume', - readOnly=False), + readOnly=False, + swap=False, + swapcopy=True), host=None, access=dict( modes=['ReadWriteMany'] diff --git a/roles/openshift_hosted/README.md b/roles/openshift_hosted/README.md index 6d576df71..3e5d7f860 100644 --- a/roles/openshift_hosted/README.md +++ b/roles/openshift_hosted/README.md @@ -28,6 +28,14 @@ From this role: | openshift_hosted_registry_selector | region=infra | Node selector used when creating registry. The OpenShift registry will only be deployed to nodes matching this selector. | | openshift_hosted_registry_cert_expire_days | `730` (2 years) | Validity of the certificates in days. Works only with OpenShift version 1.5 (3.5) and later. | +If you specify `openshift_hosted_registry_kind=glusterfs`, the following +variables also control configuration behavior: + +| Name | Default value | Description | +|----------------------------------------------|---------------|------------------------------------------------------------------------------| +| openshift_hosted_registry_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume | +| openshift_hosted_registry_glusterfs_swapcopy | True | If swapping, also copy the current contents of the registry volume | + Dependencies ------------ diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml index 6e691c26f..751489958 100644 --- a/roles/openshift_hosted/tasks/registry/registry.yml +++ b/roles/openshift_hosted/tasks/registry/registry.yml @@ -61,7 +61,7 @@ name: "{{ openshift_hosted_registry_serviceaccount }}" namespace: "{{ openshift_hosted_registry_namespace }}" -- name: Grant the registry serivce account access to the appropriate scc +- name: Grant the registry service account access to the appropriate scc oc_adm_policy_user: user: "system:serviceaccount:{{ openshift_hosted_registry_namespace }}:{{ openshift_hosted_registry_serviceaccount }}" namespace: "{{ openshift_hosted_registry_namespace }}" @@ -126,4 +126,4 @@ - include: storage/glusterfs.yml when: - - openshift.hosted.registry.storage.kind | default(none) == 'glusterfs' + - openshift.hosted.registry.storage.kind | default(none) == 'glusterfs' or openshift.hosted.registry.storage.glusterfs.swap diff --git a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml index b18b24266..e6bb196b8 100644 --- a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml +++ b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml @@ -1,10 +1,18 @@ --- +- name: Get registry DeploymentConfig + oc_obj: + namespace: "{{ openshift_hosted_registry_namespace }}" + state: list + kind: dc + name: "{{ openshift_hosted_registry_name }}" + register: registry_dc + - name: Wait for registry pods oc_obj: namespace: "{{ openshift_hosted_registry_namespace }}" state: list kind: pod - selector: "{{ openshift_hosted_registry_name }}={{ openshift_hosted_registry_namespace }}" + selector: "{% for label, value in registry_dc.results.results[0].spec.selector.iteritems() %}{{ label }}={{ value }}{% if not loop.last %},{% endif %}{% endfor %}" register: registry_pods until: - "registry_pods.results.results[0]['items'] | count > 0" @@ -38,6 +46,39 @@ mode: "2775" recurse: True +- block: + - name: Activate registry maintenance mode + oc_env: + namespace: "{{ openshift_hosted_registry_namespace }}" + name: "{{ openshift_hosted_registry_name }}" + env_vars: + - REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED: 'true' + + - name: Get first registry pod name + set_fact: + registry_pod_name: "{{ registry_pods.results.results[0]['items'][0].metadata.name }}" + + - name: Copy current registry contents to new GlusterFS volume + command: "oc rsync {{ registry_pod_name }}:/registry/ {{ mktemp.stdout }}/" + when: openshift.hosted.registry.storage.glusterfs.swapcopy + + - name: Swap new GlusterFS registry volume + oc_volume: + namespace: "{{ openshift_hosted_registry_namespace }}" + name: "{{ openshift_hosted_registry_name }}" + vol_name: registry-storage + mount_type: pvc + claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim" + + - name: Deactivate registry maintenance mode + oc_env: + namespace: "{{ openshift_hosted_registry_namespace }}" + name: "{{ openshift_hosted_registry_name }}" + state: absent + env_vars: + - REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED: 'true' + when: openshift.hosted.registry.storage.glusterfs.swap + - name: Unmount registry volume mount: state: unmounted diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md index cf0fb94c9..7b310dbf8 100644 --- a/roles/openshift_storage_glusterfs/README.md +++ b/roles/openshift_storage_glusterfs/README.md @@ -8,10 +8,24 @@ Requirements * Ansible 2.2 +Host Groups +----------- + +The following group is expected to be populated for this role to run: + +* `[glusterfs]` + +Additionally, the following group may be specified either in addition to or +instead of the above group to deploy a GlusterFS cluster for use by a natively +hosted Docker registry: + +* `[glusterfs_registry]` + Role Variables -------------- -From this role: +This role has the following variables that control the integration of a +GlusterFS cluster into a new or existing OpenShift cluster: | Name | Default value | | |--------------------------------------------------|-------------------------|-----------------------------------------| @@ -31,6 +45,25 @@ From this role: | openshift_storage_glusterfs_heketi_url | Undefined | URL for the heketi REST API, dynamically determined in native mode | openshift_storage_glusterfs_heketi_wipe | False | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe` +Each role variable also has a corresponding variable to optionally configure a +separate GlusterFS cluster for use as storage for an integrated Docker +registry. These variables start with the prefix +`openshift_storage_glusterfs_registry_` and, for the most part, default to the +values in their corresponding non-registry variables. The following variables +are an exception: + +| Name | Default value | | +|---------------------------------------------------|-----------------------|-----------------------------------------| +| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'default' +| openshift_storage_glusterfs_registry_nodeselector | 'storagenode=registry'| This allows for the logical separation of the registry GlusterFS cluster from any regular-use GlusterFS clusters + +Additionally, this role's behavior responds to the following registry-specific +variable: + +| Name | Default value | Description | +|----------------------------------------------|---------------|------------------------------------------------------------------------------| +| openshift_hosted_registry_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume | + Dependencies ------------ @@ -47,6 +80,7 @@ Example Playbook hosts: oo_first_master roles: - role: openshift_storage_glusterfs + when: groups.oo_glusterfs_to_config | default([]) | count > 0 ``` License diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml index ade850747..ebe9ca30b 100644 --- a/roles/openshift_storage_glusterfs/defaults/main.yml +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -2,7 +2,7 @@ openshift_storage_glusterfs_timeout: 300 openshift_storage_glusterfs_namespace: 'default' openshift_storage_glusterfs_is_native: True -openshift_storage_glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector_label | default('storagenode=glusterfs') | map_from_pairs }}" +openshift_storage_glusterfs_nodeselector: 'storagenode=glusterfs' openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}" openshift_storage_glusterfs_version: 'latest' openshift_storage_glusterfs_wipe: False @@ -15,3 +15,22 @@ openshift_storage_glusterfs_heketi_admin_key: '' openshift_storage_glusterfs_heketi_user_key: '' openshift_storage_glusterfs_heketi_topology_load: True openshift_storage_glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_wipe }}" +openshift_storage_glusterfs_heketi_url: "{{ omit }}" + +openshift_storage_glusterfs_registry_timeout: "{{ openshift_storage_glusterfs_timeout }}" +openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" +openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}" +openshift_storage_glusterfs_registry_nodeselector: 'storagenode=registry' +openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}" +openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}" +openshift_storage_glusterfs_registry_wipe: "{{ openshift_storage_glusterfs_wipe }}" +openshift_storage_glusterfs_registry_heketi_is_native: "{{ openshift_storage_glusterfs_heketi_is_native }}" +openshift_storage_glusterfs_registry_heketi_is_missing: "{{ openshift_storage_glusterfs_heketi_is_missing }}" +openshift_storage_glusterfs_registry_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing }}" +openshift_storage_glusterfs_registry_heketi_image: "{{ openshift_storage_glusterfs_heketi_image }}" +openshift_storage_glusterfs_registry_heketi_version: "{{ openshift_storage_glusterfs_heketi_version }}" +openshift_storage_glusterfs_registry_heketi_admin_key: "{{ openshift_storage_glusterfs_heketi_admin_key }}" +openshift_storage_glusterfs_registry_heketi_user_key: "{{ openshift_storage_glusterfs_heketi_user_key }}" +openshift_storage_glusterfs_registry_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}" +openshift_storage_glusterfs_registry_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}" +openshift_storage_glusterfs_registry_heketi_url: "{{ openshift_storage_glusterfs_heketi_url | default(omit) }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml new file mode 100644 index 000000000..fa5fa2cb0 --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml @@ -0,0 +1,166 @@ +--- +- name: Verify target namespace exists + oc_project: + state: present + name: "{{ glusterfs_namespace }}" + when: glusterfs_is_native or glusterfs_heketi_is_native + +- include: glusterfs_deploy.yml + when: glusterfs_is_native + +- name: Make sure heketi-client is installed + package: name=heketi-client state=present + +- name: Delete pre-existing heketi resources + oc_obj: + namespace: "{{ glusterfs_namespace }}" + kind: "{{ item.kind }}" + name: "{{ item.name | default(omit) }}" + selector: "{{ item.selector | default(omit) }}" + state: absent + with_items: + - kind: "template,route,service,dc,jobs,secret" + selector: "deploy-heketi" + - kind: "template,route,service,dc" + name: "heketi" + - kind: "svc,ep" + name: "heketi-storage-endpoints" + - kind: "sa" + name: "heketi-service-account" + failed_when: False + when: glusterfs_heketi_wipe + +- name: Wait for deploy-heketi pods to terminate + oc_obj: + namespace: "{{ glusterfs_namespace }}" + kind: pod + state: list + selector: "glusterfs=deploy-heketi-pod" + register: heketi_pod + until: "heketi_pod.results.results[0]['items'] | count == 0" + delay: 10 + retries: "{{ (glusterfs_timeout / 10) | int }}" + when: glusterfs_heketi_wipe + +- name: Wait for heketi pods to terminate + oc_obj: + namespace: "{{ glusterfs_namespace }}" + kind: pod + state: list + selector: "glusterfs=heketi-pod" + register: heketi_pod + until: "heketi_pod.results.results[0]['items'] | count == 0" + delay: 10 + retries: "{{ (glusterfs_timeout / 10) | int }}" + when: glusterfs_heketi_wipe + +- name: Create heketi service account + oc_serviceaccount: + namespace: "{{ glusterfs_namespace }}" + name: heketi-service-account + state: present + when: glusterfs_heketi_is_native + +- name: Add heketi service account to privileged SCC + oc_adm_policy_user: + user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-service-account" + resource_kind: scc + resource_name: privileged + state: present + when: glusterfs_heketi_is_native + +- name: Allow heketi service account to view/edit pods + oc_adm_policy_user: + user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-service-account" + resource_kind: role + resource_name: edit + state: present + when: glusterfs_heketi_is_native + +- name: Check for existing deploy-heketi pod + oc_obj: + namespace: "{{ glusterfs_namespace }}" + state: list + kind: pod + selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support" + register: heketi_pod + when: glusterfs_heketi_is_native + +- name: Check if need to deploy deploy-heketi + set_fact: + glusterfs_heketi_deploy_is_missing: False + when: + - "glusterfs_heketi_is_native" + - "heketi_pod.results.results[0]['items'] | count > 0" + # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True + - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" + +- name: Check for existing heketi pod + oc_obj: + namespace: "{{ glusterfs_namespace }}" + state: list + kind: pod + selector: "glusterfs=heketi-pod" + register: heketi_pod + when: glusterfs_heketi_is_native + +- name: Check if need to deploy heketi + set_fact: + glusterfs_heketi_is_missing: False + when: + - "glusterfs_heketi_is_native" + - "heketi_pod.results.results[0]['items'] | count > 0" + # heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True + - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" + +- include: heketi_deploy_part1.yml + when: + - glusterfs_heketi_is_native + - glusterfs_heketi_deploy_is_missing + - glusterfs_heketi_is_missing + +- name: Determine heketi URL + oc_obj: + namespace: "{{ glusterfs_namespace }}" + state: list + kind: ep + selector: "glusterfs in (deploy-heketi-service, heketi-service)" + register: heketi_url + until: + - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''" + - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''" + delay: 10 + retries: "{{ (glusterfs_timeout / 10) | int }}" + when: + - glusterfs_heketi_is_native + - glusterfs_heketi_url is undefined + +- name: Set heketi URL + set_fact: + glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}" + when: + - glusterfs_heketi_is_native + - glusterfs_heketi_url is undefined + +- name: Verify heketi service + command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' cluster list" + changed_when: False + +- name: Generate topology file + template: + src: "{{ openshift.common.examples_content_version }}/topology.json.j2" + dest: "{{ mktemp.stdout }}/topology.json" + when: + - glusterfs_heketi_topology_load + +- name: Load heketi topology + command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' topology load --json={{ mktemp.stdout }}/topology.json 2>&1" + register: topology_load + failed_when: "topology_load.rc != 0 or 'Unable' in topology_load.stdout" + when: + - glusterfs_heketi_topology_load + +- include: heketi_deploy_part2.yml + when: + - glusterfs_heketi_is_native + - glusterfs_heketi_is_missing diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml new file mode 100644 index 000000000..451990240 --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml @@ -0,0 +1,22 @@ +--- +- set_fact: + glusterfs_timeout: "{{ openshift_storage_glusterfs_timeout }}" + glusterfs_namespace: "{{ openshift_storage_glusterfs_namespace }}" + glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native }}" + glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | map_from_pairs }}" + glusterfs_image: "{{ openshift_storage_glusterfs_image }}" + glusterfs_version: "{{ openshift_storage_glusterfs_version }}" + glusterfs_wipe: "{{ openshift_storage_glusterfs_wipe }}" + glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_heketi_is_native }}" + glusterfs_heketi_is_missing: "{{ openshift_storage_glusterfs_heketi_is_missing }}" + glusterfs_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing }}" + glusterfs_heketi_image: "{{ openshift_storage_glusterfs_heketi_image }}" + glusterfs_heketi_version: "{{ openshift_storage_glusterfs_heketi_version }}" + glusterfs_heketi_admin_key: "{{ openshift_storage_glusterfs_heketi_admin_key }}" + glusterfs_heketi_user_key: "{{ openshift_storage_glusterfs_heketi_user_key }}" + glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}" + glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}" + glusterfs_heketi_url: "{{ openshift_storage_glusterfs_heketi_url }}" + glusterfs_nodes: "{{ g_glusterfs_hosts }}" + +- include: glusterfs_common.yml diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml index 2b35e5137..579112349 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml @@ -1,44 +1,44 @@ --- - assert: - that: "openshift_storage_glusterfs_nodeselector.keys() | count == 1" + that: "glusterfs_nodeselector.keys() | count == 1" msg: Only one GlusterFS nodeselector key pair should be provided - assert: - that: "groups.oo_glusterfs_to_config | count >= 3" + that: "glusterfs_nodes | count >= 3" msg: There must be at least three GlusterFS nodes specified - name: Delete pre-existing GlusterFS resources oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: "template,daemonset" name: glusterfs state: absent - when: openshift_storage_glusterfs_wipe + when: glusterfs_wipe - name: Unlabel any existing GlusterFS nodes oc_label: name: "{{ item }}" kind: node state: absent - labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}" + labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}" with_items: "{{ groups.all }}" - when: openshift_storage_glusterfs_wipe + when: glusterfs_wipe - name: Delete pre-existing GlusterFS config file: path: /var/lib/glusterd state: absent delegate_to: "{{ item }}" - with_items: "{{ groups.oo_glusterfs_to_config | default([]) }}" - when: openshift_storage_glusterfs_wipe + with_items: "{{ glusterfs_nodes | default([]) }}" + when: glusterfs_wipe - name: Get GlusterFS storage devices state command: "pvdisplay -C --noheadings -o pv_name,vg_name {% for device in hostvars[item].glusterfs_devices %}{{ device }} {% endfor %}" register: devices_info delegate_to: "{{ item }}" - with_items: "{{ groups.oo_glusterfs_to_config | default([]) }}" + with_items: "{{ glusterfs_nodes | default([]) }}" failed_when: False - when: openshift_storage_glusterfs_wipe + when: glusterfs_wipe # Runs "vgremove -fy <vg>; pvremove -fy <pv>" for every device found to be a physical volume. - name: Clear GlusterFS storage device contents @@ -46,12 +46,12 @@ delegate_to: "{{ item.item }}" with_items: "{{ devices_info.results }}" when: - - openshift_storage_glusterfs_wipe + - glusterfs_wipe - item.stdout_lines | count > 0 - name: Add service accounts to privileged SCC oc_adm_policy_user: - user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:{{ item }}" + user: "system:serviceaccount:{{ glusterfs_namespace }}:{{ item }}" resource_kind: scc resource_name: privileged state: present @@ -64,8 +64,8 @@ name: "{{ glusterfs_host }}" kind: node state: add - labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}" - with_items: "{{ groups.oo_glusterfs_to_config | default([]) }}" + labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}" + with_items: "{{ glusterfs_nodes | default([]) }}" loop_control: loop_var: glusterfs_host @@ -76,7 +76,7 @@ - name: Create GlusterFS template oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: template name: glusterfs state: present @@ -85,16 +85,16 @@ - name: Deploy GlusterFS pods oc_process: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" template_name: "glusterfs" create: True params: - IMAGE_NAME: "{{ openshift_storage_glusterfs_image }}" - IMAGE_VERSION: "{{ openshift_storage_glusterfs_version }}" + IMAGE_NAME: "{{ glusterfs_image }}" + IMAGE_VERSION: "{{ glusterfs_version }}" - name: Wait for GlusterFS pods oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: pod state: list selector: "glusterfs-node=pod" @@ -102,6 +102,6 @@ until: - "glusterfs_pods.results.results[0]['items'] | count > 0" # There must be as many pods with 'Ready' staus True as there are nodes expecting those pods - - "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == groups.oo_glusterfs_to_config | count" + - "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == glusterfs_nodes | count" delay: 10 - retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + retries: "{{ (glusterfs_timeout / 10) | int }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml index 6d02d2090..392f4b65b 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml @@ -1,7 +1,30 @@ --- +- set_fact: + glusterfs_timeout: "{{ openshift_storage_glusterfs_registry_timeout }}" + glusterfs_namespace: "{{ openshift_storage_glusterfs_registry_namespace }}" + glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native }}" + glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | map_from_pairs }}" + glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}" + glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}" + glusterfs_wipe: "{{ openshift_storage_glusterfs_registry_wipe }}" + glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_registry_heketi_is_native }}" + glusterfs_heketi_is_missing: "{{ openshift_storage_glusterfs_registry_heketi_is_missing }}" + glusterfs_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_registry_heketi_deploy_is_missing }}" + glusterfs_heketi_image: "{{ openshift_storage_glusterfs_registry_heketi_image }}" + glusterfs_heketi_version: "{{ openshift_storage_glusterfs_registry_heketi_version }}" + glusterfs_heketi_admin_key: "{{ openshift_storage_glusterfs_registry_heketi_admin_key }}" + glusterfs_heketi_user_key: "{{ openshift_storage_glusterfs_registry_heketi_user_key }}" + glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_registry_heketi_topology_load }}" + glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_registry_heketi_wipe }}" + glusterfs_heketi_url: "{{ openshift_storage_glusterfs_registry_heketi_url }}" + glusterfs_nodes: "{{ g_glusterfs_registry_hosts }}" + +- include: glusterfs_common.yml + when: g_glusterfs_registry_hosts != g_glusterfs_hosts + - name: Delete pre-existing GlusterFS registry resources oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: "{{ item.kind }}" name: "{{ item.name | default(omit) }}" selector: "{{ item.selector | default(omit) }}" @@ -23,7 +46,7 @@ - name: Create GlusterFS registry endpoints oc_obj: - namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" + namespace: "{{ glusterfs_namespace }}" state: present kind: endpoints name: glusterfs-registry-endpoints @@ -32,7 +55,7 @@ - name: Create GlusterFS registry service oc_obj: - namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" + namespace: "{{ glusterfs_namespace }}" state: present kind: service name: glusterfs-registry-endpoints @@ -40,9 +63,9 @@ - "{{ mktemp.stdout }}/glusterfs-registry-service.yml" - name: Check if GlusterFS registry volume exists - command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume list" + command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' volume list" register: registry_volume - name: Create GlusterFS registry volume - command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}" - when: "'openshift.hosted.registry.storage.glusterfs.path' not in registry_volume.stdout" + command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}" + when: "openshift.hosted.registry.storage.glusterfs.path not in registry_volume.stdout" diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml index 76ae1db75..c14fcfb15 100644 --- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml @@ -8,7 +8,7 @@ - name: Create deploy-heketi resources oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: template name: deploy-heketi state: present @@ -17,18 +17,18 @@ - name: Deploy deploy-heketi pod oc_process: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" template_name: "deploy-heketi" create: True params: - IMAGE_NAME: "{{ openshift_storage_glusterfs_heketi_image }}" - IMAGE_VERSION: "{{ openshift_storage_glusterfs_heketi_version }}" - HEKETI_USER_KEY: "{{ openshift_storage_glusterfs_heketi_user_key }}" - HEKETI_ADMIN_KEY: "{{ openshift_storage_glusterfs_heketi_admin_key }}" + IMAGE_NAME: "{{ glusterfs_heketi_image }}" + IMAGE_VERSION: "{{ glusterfs_heketi_version }}" + HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}" + HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}" - name: Wait for deploy-heketi pod oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: pod state: list selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support" @@ -38,4 +38,4 @@ # Pod's 'Ready' status must be True - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" delay: 10 - retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + retries: "{{ (glusterfs_timeout / 10) | int }}" diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml index 778b5a673..64410a9ab 100644 --- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml @@ -1,6 +1,6 @@ --- - name: Create heketi DB volume - command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' setup-openshift-heketi-storage --listfile {{ mktemp.stdout }}/heketi-storage.json" + command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' setup-openshift-heketi-storage --listfile {{ mktemp.stdout }}/heketi-storage.json" register: setup_storage failed_when: False @@ -13,12 +13,12 @@ # Need `command` here because heketi-storage.json contains multiple objects. - name: Copy heketi DB to GlusterFS volume - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ openshift_storage_glusterfs_namespace }}" + command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ glusterfs_namespace }}" when: setup_storage.rc == 0 - name: Wait for copy job to finish oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: job state: list name: "heketi-storage-copy-job" @@ -28,7 +28,7 @@ # Pod's 'Complete' status must be True - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1" delay: 10 - retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + retries: "{{ (glusterfs_timeout / 10) | int }}" failed_when: - "'results' in heketi_job.results" - "heketi_job.results.results | count > 0" @@ -38,7 +38,7 @@ - name: Delete deploy resources oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: "{{ item.kind }}" name: "{{ item.name | default(omit) }}" selector: "{{ item.selector | default(omit) }}" @@ -55,7 +55,7 @@ - name: Create heketi resources oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: template name: heketi state: present @@ -64,18 +64,18 @@ - name: Deploy heketi pod oc_process: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" template_name: "heketi" create: True params: - IMAGE_NAME: "{{ openshift_storage_glusterfs_heketi_image }}" - IMAGE_VERSION: "{{ openshift_storage_glusterfs_heketi_version }}" - HEKETI_USER_KEY: "{{ openshift_storage_glusterfs_heketi_user_key }}" - HEKETI_ADMIN_KEY: "{{ openshift_storage_glusterfs_heketi_admin_key }}" + IMAGE_NAME: "{{ glusterfs_heketi_image }}" + IMAGE_VERSION: "{{ glusterfs_heketi_version }}" + HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}" + HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}" - name: Wait for heketi pod oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: pod state: list selector: "glusterfs=heketi-pod" @@ -85,11 +85,11 @@ # Pod's 'Ready' status must be True - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" delay: 10 - retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + retries: "{{ (glusterfs_timeout / 10) | int }}" - name: Determine heketi URL oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" state: list kind: ep selector: "glusterfs=heketi-service" @@ -98,12 +98,12 @@ - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''" - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''" delay: 10 - retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + retries: "{{ (glusterfs_timeout / 10) | int }}" - name: Set heketi URL set_fact: - openshift_storage_glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}" + glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}" - name: Verify heketi service - command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' cluster list" + command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' cluster list" changed_when: False diff --git a/roles/openshift_storage_glusterfs/tasks/main.yml b/roles/openshift_storage_glusterfs/tasks/main.yml index 71c4a2732..ebd8db453 100644 --- a/roles/openshift_storage_glusterfs/tasks/main.yml +++ b/roles/openshift_storage_glusterfs/tasks/main.yml @@ -5,174 +5,14 @@ changed_when: False check_mode: no -- name: Verify target namespace exists - oc_project: - state: present - name: "{{ openshift_storage_glusterfs_namespace }}" - when: openshift_storage_glusterfs_is_native or openshift_storage_glusterfs_heketi_is_native - -- include: glusterfs_deploy.yml - when: openshift_storage_glusterfs_is_native - -- name: Make sure heketi-client is installed - package: name=heketi-client state=present - -- name: Delete pre-existing heketi resources - oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" - kind: "{{ item.kind }}" - name: "{{ item.name | default(omit) }}" - selector: "{{ item.selector | default(omit) }}" - state: absent - with_items: - - kind: "template,route,service,jobs,dc,secret" - selector: "deploy-heketi" - - kind: "template,route,dc,service" - name: "heketi" - - kind: "svc,ep" - name: "heketi-storage-endpoints" - - kind: "sa" - name: "heketi-service-account" - failed_when: False - when: openshift_storage_glusterfs_heketi_wipe - -- name: Wait for deploy-heketi pods to terminate - oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" - kind: pod - state: list - selector: "glusterfs=deploy-heketi-pod" - register: heketi_pod - until: "heketi_pod.results.results[0]['items'] | count == 0" - delay: 10 - retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" - when: openshift_storage_glusterfs_heketi_wipe - -- name: Wait for heketi pods to terminate - oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" - kind: pod - state: list - selector: "glusterfs=heketi-pod" - register: heketi_pod - until: "heketi_pod.results.results[0]['items'] | count == 0" - delay: 10 - retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" - when: openshift_storage_glusterfs_heketi_wipe - -- name: Create heketi service account - oc_serviceaccount: - namespace: "{{ openshift_storage_glusterfs_namespace }}" - name: heketi-service-account - state: present - when: openshift_storage_glusterfs_heketi_is_native - -- name: Add heketi service account to privileged SCC - oc_adm_policy_user: - user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:heketi-service-account" - resource_kind: scc - resource_name: privileged - state: present - when: openshift_storage_glusterfs_heketi_is_native - -- name: Allow heketi service account to view/edit pods - oc_adm_policy_user: - user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:heketi-service-account" - resource_kind: role - resource_name: edit - state: present - when: openshift_storage_glusterfs_heketi_is_native - -- name: Check for existing deploy-heketi pod - oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" - state: list - kind: pod - selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support" - register: heketi_pod - when: openshift_storage_glusterfs_heketi_is_native - -- name: Check if need to deploy deploy-heketi - set_fact: - openshift_storage_glusterfs_heketi_deploy_is_missing: False - when: - - "openshift_storage_glusterfs_heketi_is_native" - - "heketi_pod.results.results[0]['items'] | count > 0" - # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True - - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" - -- name: Check for existing heketi pod - oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" - state: list - kind: pod - selector: "glusterfs=heketi-pod" - register: heketi_pod - when: openshift_storage_glusterfs_heketi_is_native - -- name: Check if need to deploy heketi - set_fact: - openshift_storage_glusterfs_heketi_is_missing: False +- include: glusterfs_config.yml when: - - "openshift_storage_glusterfs_heketi_is_native" - - "heketi_pod.results.results[0]['items'] | count > 0" - # heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True - - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" - -- include: heketi_deploy_part1.yml - when: - - openshift_storage_glusterfs_heketi_is_native - - openshift_storage_glusterfs_heketi_deploy_is_missing - - openshift_storage_glusterfs_heketi_is_missing - -- name: Determine heketi URL - oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" - state: list - kind: ep - selector: "glusterfs in (deploy-heketi-service, heketi-service)" - register: heketi_url - until: - - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''" - - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''" - delay: 10 - retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" - when: - - openshift_storage_glusterfs_heketi_is_native - - openshift_storage_glusterfs_heketi_url is undefined - -- name: Set heketi URL - set_fact: - openshift_storage_glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}" - when: - - openshift_storage_glusterfs_heketi_is_native - - openshift_storage_glusterfs_heketi_url is undefined - -- name: Verify heketi service - command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' cluster list" - changed_when: False - -- name: Generate topology file - template: - src: "{{ openshift.common.examples_content_version }}/topology.json.j2" - dest: "{{ mktemp.stdout }}/topology.json" - when: - - openshift_storage_glusterfs_is_native - - openshift_storage_glusterfs_heketi_topology_load - -- name: Load heketi topology - command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' topology load --json={{ mktemp.stdout }}/topology.json 2>&1" - register: topology_load - failed_when: topology_load.rc != 0 or 'Unable' in topology_load.stdout - when: - - openshift_storage_glusterfs_is_native - - openshift_storage_glusterfs_heketi_topology_load - -- include: heketi_deploy_part2.yml - when: openshift_storage_glusterfs_heketi_is_native and openshift_storage_glusterfs_heketi_is_missing + - g_glusterfs_hosts | default([]) | count > 0 - include: glusterfs_registry.yml - when: openshift.hosted.registry.storage.kind == 'glusterfs' + when: + - g_glusterfs_registry_hosts | default([]) | count > 0 + - "openshift.hosted.registry.storage.kind == 'glusterfs' or openshift.hosted.registry.glusterfs.swap" - name: Delete temp directory file: diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2 index d72d085c9..605627ab5 100644 --- a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2 +++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2 @@ -4,7 +4,7 @@ metadata: name: glusterfs-registry-endpoints subsets: - addresses: -{% for node in groups.oo_glusterfs_to_config %} +{% for node in glusterfs_nodes %} - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }} {% endfor %} ports: diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2 index eb5b4544f..33d8f9b36 100644 --- a/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2 +++ b/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2 @@ -1,7 +1,7 @@ { "clusters": [ {%- set clusters = {} -%} -{%- for node in groups.oo_glusterfs_to_config -%} +{%- for node in glusterfs_nodes -%} {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%} {%- if cluster in clusters -%} {%- set _dummy = clusters[cluster].append(node) -%} |