diff options
41 files changed, 324 insertions, 64 deletions
diff --git a/inventory/byo/hosts.byo.glusterfs.external.example b/inventory/byo/hosts.byo.glusterfs.external.example index 140033a39..628d3a3f7 100644 --- a/inventory/byo/hosts.byo.glusterfs.external.example +++ b/inventory/byo/hosts.byo.glusterfs.external.example @@ -51,6 +51,6 @@ node2 node=True openshift_schedulable=True # must be bare (e.g. have no data, not be marked as LVM PVs), and will be # formatted. [glusterfs] -node0 glusterfs_devices='[ "/dev/vdb" ]' -node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc" ]' -node2 glusterfs_devices='[ "/dev/vdd" ]' +node0.local glusterfs_ip='172.0.0.10' glusterfs_devices='[ "/dev/vdb" ]' +node1.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdb", "/dev/vdc" ]' +node2.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdd" ]' diff --git a/inventory/byo/hosts.byo.glusterfs.mixed.example b/inventory/byo/hosts.byo.glusterfs.mixed.example index 96e997dc7..fd47cb9d5 100644 --- a/inventory/byo/hosts.byo.glusterfs.mixed.example +++ b/inventory/byo/hosts.byo.glusterfs.mixed.example @@ -28,7 +28,7 @@ openshift_deployment_type=origin # Specify that we want to use an external GlusterFS cluster and a native # heketi service openshift_storage_glusterfs_is_native=False -openshift_storage_glusterfs_heketi_is_native=False +openshift_storage_glusterfs_heketi_is_native=True # Specify that heketi will use SSH to communicate to the GlusterFS nodes and # the private key file it will use for authentication openshift_storage_glusterfs_heketi_executor=ssh @@ -54,6 +54,6 @@ node2 node=True openshift_schedulable=True # must be bare (e.g. have no data, not be marked as LVM PVs), and will be # formatted. [glusterfs] -node0 glusterfs_devices='[ "/dev/vdb" ]' -node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc" ]' -node2 glusterfs_devices='[ "/dev/vdd" ]' +node0.local glusterfs_ip='172.0.0.10' glusterfs_devices='[ "/dev/vdb" ]' +node1.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdb", "/dev/vdc" ]' +node2.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdd" ]' diff --git a/inventory/byo/hosts.byo.glusterfs.native.example b/inventory/byo/hosts.byo.glusterfs.native.example index dc847a5b2..a3e2570c9 100644 --- a/inventory/byo/hosts.byo.glusterfs.native.example +++ b/inventory/byo/hosts.byo.glusterfs.native.example @@ -1,15 +1,12 @@ # This is an example of a bring your own (byo) host inventory for a cluster -# with natively hosted, containerized GlusterFS storage. +# with natively hosted, containerized GlusterFS storage for applications. It +# will also autmatically create a StorageClass for this purpose. # # This inventory may be used with the byo/config.yml playbook to deploy a new -# cluster with GlusterFS storage, which will use that storage to create a -# volume that will provide backend storage for a hosted Docker registry. +# cluster with GlusterFS storage. # # This inventory may also be used with byo/openshift-glusterfs/config.yml to -# deploy GlusterFS storage on an existing cluster. With this playbook, the -# registry backend volume will be created but the administrator must then -# either deploy a hosted registry or change an existing hosted registry to use -# that volume. +# deploy GlusterFS storage on an existing cluster. # # There are additional configuration parameters that can be specified to # control the deployment and state of a GlusterFS cluster. Please see the @@ -25,8 +22,6 @@ glusterfs [OSEv3:vars] ansible_ssh_user=root openshift_deployment_type=origin -# Specify that we want to use GlusterFS storage for a hosted registry -openshift_hosted_registry_storage_kind=glusterfs [masters] master node=True storage=True master=True @@ -35,9 +30,9 @@ master node=True storage=True master=True master node=True storage=True master=True openshift_schedulable=False # A hosted registry, by default, will only be deployed on nodes labeled # "region=infra". -node0 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True -node1 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True -node2 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True +node0 node=True openshift_schedulable=True +node1 node=True openshift_schedulable=True +node2 node=True openshift_schedulable=True # Specify the glusterfs group, which contains the nodes that will host # GlusterFS storage pods. At a minimum, each node must have a diff --git a/inventory/byo/hosts.byo.glusterfs.registry-only.example b/inventory/byo/hosts.byo.glusterfs.registry-only.example new file mode 100644 index 000000000..999518abe --- /dev/null +++ b/inventory/byo/hosts.byo.glusterfs.registry-only.example @@ -0,0 +1,52 @@ +# This is an example of a bring your own (byo) host inventory for a cluster +# with natively hosted, containerized GlusterFS storage for exclusive use +# as storage for a natively hosted Docker registry. +# +# This inventory may be used with the byo/config.yml playbook to deploy a new +# cluster with GlusterFS storage, which will use that storage to create a +# volume that will provide backend storage for a hosted Docker registry. +# +# This inventory may also be used with byo/openshift-glusterfs/registry.yml to +# deploy GlusterFS storage on an existing cluster. With this playbook, the +# registry backend volume will be created but the administrator must then +# either deploy a hosted registry or change an existing hosted registry to use +# that volume. +# +# There are additional configuration parameters that can be specified to +# control the deployment and state of a GlusterFS cluster. Please see the +# documentation in playbooks/byo/openshift-glusterfs/README.md and +# roles/openshift_storage_glusterfs/README.md for additional details. + +[OSEv3:children] +masters +nodes +# Specify there will be GlusterFS nodes +glusterfs_registry + +[OSEv3:vars] +ansible_ssh_user=root +openshift_deployment_type=origin +# Specify that we want to use GlusterFS storage for a hosted registry +openshift_hosted_registry_storage_kind=glusterfs + +[masters] +master node=True storage=True master=True + +[nodes] +master node=True storage=True master=True openshift_schedulable=False +# A hosted registry, by default, will only be deployed on nodes labeled +# "region=infra". +node0 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True +node1 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True +node2 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True + +# Specify the glusterfs group, which contains the nodes that will host +# GlusterFS storage pods. At a minimum, each node must have a +# "glusterfs_devices" variable defined. This variable is a list of block +# devices the node will have access to that is intended solely for use as +# GlusterFS storage. These block devices must be bare (e.g. have no data, not +# be marked as LVM PVs), and will be formatted. +[glusterfs_registry] +node0 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' +node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' +node2 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' diff --git a/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example b/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example new file mode 100644 index 000000000..1df79301a --- /dev/null +++ b/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example @@ -0,0 +1,63 @@ +# This is an example of a bring your own (byo) host inventory for a cluster +# with natively hosted, containerized GlusterFS storage for both general +# application use and a natively hosted Docker registry. It will also create a +# StorageClass for the general storage. +# +# This inventory may be used with the byo/config.yml playbook to deploy a new +# cluster with GlusterFS storage. +# +# This inventory may also be used with byo/openshift-glusterfs/config.yml to +# deploy GlusterFS storage on an existing cluster. With this playbook, the +# registry backend volume will be created but the administrator must then +# either deploy a hosted registry or change an existing hosted registry to use +# that volume. +# +# There are additional configuration parameters that can be specified to +# control the deployment and state of a GlusterFS cluster. Please see the +# documentation in playbooks/byo/openshift-glusterfs/README.md and +# roles/openshift_storage_glusterfs/README.md for additional details. + +[OSEv3:children] +masters +nodes +# Specify there will be GlusterFS nodes +glusterfs +glusterfs_registry + +[OSEv3:vars] +ansible_ssh_user=root +openshift_deployment_type=origin +# Specify that we want to use GlusterFS storage for a hosted registry +openshift_hosted_registry_storage_kind=glusterfs + +[masters] +master node=True storage=True master=True + +[nodes] +master node=True storage=True master=True openshift_schedulable=False +# It is recommended to not use a single cluster for both general and registry +# storage, so two three-node clusters will be required. +node0 node=True openshift_schedulable=True +node1 node=True openshift_schedulable=True +node2 node=True openshift_schedulable=True +# A hosted registry, by default, will only be deployed on nodes labeled +# "region=infra". +node3 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True +node4 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True +node5 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True + +# Specify the glusterfs group, which contains the nodes that will host +# GlusterFS storage pods. At a minimum, each node must have a +# "glusterfs_devices" variable defined. This variable is a list of block +# devices the node will have access to that is intended solely for use as +# GlusterFS storage. These block devices must be bare (e.g. have no data, not +# be marked as LVM PVs), and will be formatted. +[glusterfs] +node0 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' +node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' +node2 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' + +[glusterfs_registry] +node3 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' +node4 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' +node5 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index a1f541712..58b3a7835 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -325,6 +325,10 @@ service: name=docker state=restarted failed_when: false when: not (container_engine | changed) + register: l_docker_restart_docker_in_pb_result + until: not l_docker_restart_docker_in_pb_result | failed + retries: 3 + delay: 30 - hosts: masters become: yes diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml index 8d64b0521..821a0f30e 100644 --- a/playbooks/aws/openshift-cluster/config.yml +++ b/playbooks/aws/openshift-cluster/config.yml @@ -35,4 +35,3 @@ openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}" openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}" openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}" - openshift_use_dnsmasq: false diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml index 07db071ce..02b8a9d3c 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -52,9 +52,13 @@ - name: Drain Node for Kubelet upgrade command: > - {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --force --delete-local-data --ignore-daemonsets + {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets delegate_to: "{{ groups.oo_first_master.0 }}" when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade + register: l_docker_upgrade_drain_result + until: not l_docker_upgrade_drain_result | failed + retries: 60 + delay: 60 - include: upgrade.yml when: l_docker_upgrade is defined and l_docker_upgrade | bool diff --git a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/restart.yml index 96c729d79..13313377e 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/restart.yml @@ -1,6 +1,10 @@ --- - name: Restart docker service: name=docker state=restarted + register: l_docker_restart_docker_in_upgrade_result + until: not l_docker_restart_docker_in_upgrade_result | failed + retries: 3 + delay: 30 - name: Update docker facts openshift_facts: diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml index 17f8fc6e9..35d000e49 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml @@ -32,7 +32,13 @@ - debug: var=docker_image_count.stdout when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool -- service: name=docker state=stopped +- service: + name: docker + state: stopped + register: l_pb_docker_upgrade_stop_result + until: not l_pb_docker_upgrade_stop_result | failed + retries: 3 + delay: 30 - name: Upgrade Docker package: name=docker{{ '-' + docker_version }} state=present diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index b7f089d99..2b2f10aee 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -296,6 +296,10 @@ command: > {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets delegate_to: "{{ groups.oo_first_master.0 }}" + register: l_upgrade_control_plane_drain_result + until: not l_upgrade_control_plane_drain_result | failed + retries: 60 + delay: 60 roles: - lib_openshift diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index 1d1e440d4..c93a5d89c 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -26,8 +26,12 @@ - name: Drain Node for Kubelet upgrade command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data --ignore-daemonsets + {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets delegate_to: "{{ groups.oo_first_master.0 }}" + register: l_upgrade_nodes_drain_result + until: not l_upgrade_nodes_drain_result | failed + retries: 60 + delay: 60 roles: - lib_openshift diff --git a/playbooks/common/openshift-node/restart.yml b/playbooks/common/openshift-node/restart.yml index 63273cb78..ed2473a43 100644 --- a/playbooks/common/openshift-node/restart.yml +++ b/playbooks/common/openshift-node/restart.yml @@ -11,6 +11,10 @@ service: name: docker state: restarted + register: l_docker_restart_docker_in_node_result + until: not l_docker_restart_docker_in_node_result | failed + retries: 3 + delay: 30 - name: Update docker facts openshift_facts: diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml index 477213f4e..569e00da2 100644 --- a/playbooks/libvirt/openshift-cluster/config.yml +++ b/playbooks/libvirt/openshift-cluster/config.yml @@ -37,4 +37,3 @@ openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}" openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}" openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}" - openshift_use_dnsmasq: false diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml index 25204c8b9..b3797ef96 100644 --- a/roles/ansible_service_broker/tasks/install.yml +++ b/roles/ansible_service_broker/tasks/install.yml @@ -42,6 +42,14 @@ namespace: openshift-ansible-service-broker state: present +- name: Set SA cluster-role + oc_adm_policy_user: + state: present + namespace: "openshift-ansible-service-broker" + resource_kind: cluster-role + resource_name: admin + user: "system:serviceaccount:openshift-ansible-service-broker:asb" + - name: create ansible-service-broker service oc_service: name: asb diff --git a/roles/calico/handlers/main.yml b/roles/calico/handlers/main.yml index 53cecfcc3..67fc0065f 100644 --- a/roles/calico/handlers/main.yml +++ b/roles/calico/handlers/main.yml @@ -8,3 +8,7 @@ systemd: name: "{{ openshift.docker.service_name }}" state: restarted + register: l_docker_restart_docker_in_calico_result + until: not l_docker_restart_docker_in_calico_result | failed + retries: 3 + delay: 30 diff --git a/roles/contiv/tasks/netplugin.yml b/roles/contiv/tasks/netplugin.yml index 0847c92bc..e861a2591 100644 --- a/roles/contiv/tasks/netplugin.yml +++ b/roles/contiv/tasks/netplugin.yml @@ -108,6 +108,10 @@ name: "{{ openshift.docker.service_name }}" state: restarted when: docker_updated|changed + register: l_docker_restart_docker_in_contiv_result + until: not l_docker_restart_docker_in_contiv_result | failed + retries: 3 + delay: 30 - name: Netplugin | Enable Netplugin service: diff --git a/roles/docker/handlers/main.yml b/roles/docker/handlers/main.yml index 3a4f4ba92..591367467 100644 --- a/roles/docker/handlers/main.yml +++ b/roles/docker/handlers/main.yml @@ -6,9 +6,8 @@ state: restarted register: r_docker_restart_docker_result until: not r_docker_restart_docker_result | failed - retries: 1 + retries: 3 delay: 30 - when: not docker_service_status_changed | default(false) | bool - name: restart udev diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml index c82d8659a..5f536005d 100644 --- a/roles/docker/tasks/package_docker.yml +++ b/roles/docker/tasks/package_docker.yml @@ -123,9 +123,12 @@ enabled: yes state: started daemon_reload: yes - register: start_result + register: r_docker_package_docker_start_result + until: not r_docker_package_docker_start_result | failed + retries: 3 + delay: 30 - set_fact: - docker_service_status_changed: start_result | changed + docker_service_status_changed: "{{ r_docker_package_docker_start_result | changed }}" - meta: flush_handlers diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml index d8c5ccfd3..57a84bc2c 100644 --- a/roles/docker/tasks/systemcontainer_docker.yml +++ b/roles/docker/tasks/systemcontainer_docker.yml @@ -46,6 +46,11 @@ state: stopped daemon_reload: yes ignore_errors: True + register: r_docker_systemcontainer_docker_stop_result + until: not r_docker_systemcontainer_docker_stop_result | failed + retries: 3 + delay: 30 + # Set http_proxy, https_proxy, and no_proxy in /etc/atomic.conf # regexp: the line starts with or without #, followed by the string @@ -160,9 +165,12 @@ enabled: yes state: started daemon_reload: yes - register: start_result + register: r_docker_systemcontainer_docker_start_result + until: not r_docker_systemcontainer_docker_start_result | failed + retries: 3 + delay: 30 - set_fact: - docker_service_status_changed: start_result | changed + docker_service_status_changed: "{{ r_docker_systemcontainer_docker_start_result | changed }}" - meta: flush_handlers diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml index c60c2115a..02f5a5f64 100644 --- a/roles/flannel/handlers/main.yml +++ b/roles/flannel/handlers/main.yml @@ -8,3 +8,7 @@ systemd: name: "{{ openshift.docker.service_name }}" state: restarted + register: l_docker_restart_docker_in_flannel_result + until: not l_docker_restart_docker_in_flannel_result | failed + retries: 3 + delay: 30 diff --git a/roles/openshift_examples/tasks/main.yml b/roles/openshift_examples/tasks/main.yml index 551e21e72..1a4562776 100644 --- a/roles/openshift_examples/tasks/main.yml +++ b/roles/openshift_examples/tasks/main.yml @@ -53,7 +53,7 @@ # RHEL and Centos image streams are mutually exclusive - name: Import RHEL streams command: > - {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ item }} + {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ item }} when: openshift_examples_load_rhel | bool with_items: - "{{ rhel_image_streams }}" @@ -63,7 +63,7 @@ - name: Import Centos Image streams command: > - {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ centos_image_streams }} + {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ centos_image_streams }} when: openshift_examples_load_centos | bool register: oex_import_centos_streams failed_when: "'already exists' not in oex_import_centos_streams.stderr and oex_import_centos_streams.rc != 0" @@ -71,7 +71,7 @@ - name: Import db templates command: > - {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ db_templates_base }} + {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ db_templates_base }} when: openshift_examples_load_db_templates | bool register: oex_import_db_templates failed_when: "'already exists' not in oex_import_db_templates.stderr and oex_import_db_templates.rc != 0" @@ -88,7 +88,7 @@ - "{{ quickstarts_base }}/django.json" - name: Remove defunct quickstart templates from openshift namespace - command: "{{ openshift.common.client_binary }} -n openshift delete templates/{{ item }}" + command: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}" with_items: - nodejs-example - cakephp-example @@ -100,7 +100,7 @@ - name: Import quickstart-templates command: > - {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ quickstarts_base }} + {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ quickstarts_base }} when: openshift_examples_load_quickstarts | bool register: oex_import_quickstarts failed_when: "'already exists' not in oex_import_quickstarts.stderr and oex_import_quickstarts.rc != 0" @@ -114,7 +114,7 @@ - "{{ xpaas_templates_base }}/sso70-basic.json" - name: Remove old xPaas templates from openshift namespace - command: "{{ openshift.common.client_binary }} -n openshift delete templates/{{ item }}" + command: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}" with_items: - sso70-basic register: oex_delete_old_xpaas_templates @@ -123,7 +123,7 @@ - name: Import xPaas image streams command: > - {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ xpaas_image_streams }} + {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_image_streams }} when: openshift_examples_load_xpaas | bool register: oex_import_xpaas_streams failed_when: "'already exists' not in oex_import_xpaas_streams.stderr and oex_import_xpaas_streams.rc != 0" @@ -131,7 +131,7 @@ - name: Import xPaas templates command: > - {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ xpaas_templates_base }} + {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_templates_base }} when: openshift_examples_load_xpaas | bool register: oex_import_xpaas_templates failed_when: "'already exists' not in oex_import_xpaas_templates.stderr and oex_import_xpaas_templates.rc != 0" diff --git a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml index c504bfb80..c2954fde1 100644 --- a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml +++ b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml @@ -35,7 +35,7 @@ mount: state: mounted fstype: glusterfs - src: "{% if 'glusterfs_registry' in groups %}{{ groups.glusterfs_registry[0] }}{% else %}{{ groups.glusterfs[0] }}{% endif %}:/{{ openshift.hosted.registry.storage.glusterfs.path }}" + src: "{% if 'glusterfs_registry' in groups %}{% set node = groups.glusterfs_registry[0] %}{% else %}{% set node = groups.glusterfs[0] %}{% endif %}{% if 'glusterfs_hostname' in hostvars[node] %}{{ hostvars[node].glusterfs_hostname }}{% elif 'openshift' in hostvars[node] %}{{ hostvars[node].openshift.node.nodename }}{% else %}{{ node }}{% endif %}:/{{ openshift.hosted.registry.storage.glusterfs.path }}" name: "{{ mktemp.stdout }}" - name: Set registry volume permissions diff --git a/roles/openshift_hosted/templates/registry_config.j2 b/roles/openshift_hosted/templates/registry_config.j2 index 9673841bf..fc9272679 100644 --- a/roles/openshift_hosted/templates/registry_config.j2 +++ b/roles/openshift_hosted/templates/registry_config.j2 @@ -22,7 +22,7 @@ storage: {% endif %} bucket: {{ openshift_hosted_registry_storage_s3_bucket }} encrypt: {{ openshift_hosted_registry_storage_s3_encrypt | default(false) }} -{% if openshift_hosted_registry_storage_s3_kmskeyid %} +{% if openshift_hosted_registry_storage_s3_kmskeyid is defined %} keyid: {{ openshift_hosted_registry_storage_s3_kmskeyid }} {% endif %} secure: true diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml index a6bd12d4e..6b38da7f8 100644 --- a/roles/openshift_node/handlers/main.yml +++ b/roles/openshift_node/handlers/main.yml @@ -4,9 +4,14 @@ name: openvswitch state: restarted when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift.common.use_openshift_sdn | bool + register: l_openshift_node_stop_openvswitch_result + until: not l_openshift_node_stop_openvswitch_result | failed + retries: 3 + delay: 30 notify: - restart openvswitch pause + - name: restart openvswitch pause pause: seconds=15 when: (not skip_node_svc_handlers | default(False) | bool) and openshift.common.is_containerized | bool @@ -15,7 +20,13 @@ systemd: name: "{{ openshift.common.service_type }}-node" state: restarted - when: (not skip_node_svc_handlers | default(False) | bool) and not (node_service_status_changed | default(false) | bool) + register: l_openshift_node_restart_node_result + until: not l_openshift_node_restart_node_result | failed + retries: 3 + delay: 30 + when: + - (not skip_node_svc_handlers | default(False) | bool) + - not (node_service_status_changed | default(false) | bool) - name: reload sysctl.conf command: /sbin/sysctl -p diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 573051504..879f6c207 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -118,8 +118,12 @@ name: openvswitch.service enabled: yes state: started + daemon_reload: yes when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool register: ovs_start_result + until: not ovs_start_result | failed + retries: 3 + delay: 30 - set_fact: ovs_service_status_changed: "{{ ovs_start_result | changed }}" @@ -212,15 +216,27 @@ state: started when: openshift.common.is_containerized | bool + - name: Start and enable node systemd: name: "{{ openshift.common.service_type }}-node" enabled: yes state: started + daemon_reload: yes register: node_start_result until: not node_start_result | failed retries: 1 delay: 30 + ignore_errors: true + +- name: Dump logs from node service if it failed + command: journalctl --no-pager -n 100 {{ openshift.common.service_type }}-node + when: node_start_result | failed + +- name: Abort if node failed to start + fail: + msg: Node failed to start please inspect the logs and try again + when: node_start_result | failed - set_fact: node_service_status_changed: "{{ node_start_result | changed }}" diff --git a/roles/openshift_node_certificates/handlers/main.yml b/roles/openshift_node_certificates/handlers/main.yml index 502f80434..4abe8bcaf 100644 --- a/roles/openshift_node_certificates/handlers/main.yml +++ b/roles/openshift_node_certificates/handlers/main.yml @@ -9,3 +9,7 @@ name: "{{ openshift.docker.service_name }}" state: restarted when: not openshift_certificates_redeploy | default(false) | bool + register: l_docker_restart_docker_in_cert_result + until: not l_docker_restart_docker_in_cert_result | failed + retries: 3 + delay: 30 diff --git a/roles/openshift_node_upgrade/README.md b/roles/openshift_node_upgrade/README.md index 8b388cc6a..4e6229bfb 100644 --- a/roles/openshift_node_upgrade/README.md +++ b/roles/openshift_node_upgrade/README.md @@ -84,6 +84,11 @@ Including an example of how to use your role (for instance, with variables passe command: > {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data --ignore-daemonsets delegate_to: "{{ groups.oo_first_master.0 }}" + register: l_docker_upgrade_drain_result + until: not l_docker_upgrade_drain_result | failed + retries: 60 + delay: 60 + roles: - openshift_facts diff --git a/roles/openshift_node_upgrade/handlers/main.yml b/roles/openshift_node_upgrade/handlers/main.yml index cb51416d4..110dfe5ce 100644 --- a/roles/openshift_node_upgrade/handlers/main.yml +++ b/roles/openshift_node_upgrade/handlers/main.yml @@ -1,7 +1,13 @@ --- - name: restart openvswitch - systemd: name=openvswitch state=restarted + systemd: + name: openvswitch + state: restarted when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift.common.use_openshift_sdn | bool + register: l_openshift_node_upgrade_stop_openvswitch_result + until: not l_openshift_node_upgrade_stop_openvswitch_result | failed + retries: 3 + delay: 30 notify: - restart openvswitch pause @@ -10,5 +16,13 @@ when: (not skip_node_svc_handlers | default(False) | bool) and openshift.common.is_containerized | bool - name: restart node - systemd: name={{ openshift.common.service_type }}-node state=restarted - when: (not skip_node_svc_handlers | default(False) | bool) and not (node_service_status_changed | default(false) | bool) + systemd: + name: "{{ openshift.common.service_type }}-node" + state: restarted + register: l_openshift_node_upgrade_restart_node_result + until: not l_openshift_node_upgrade_restart_node_result | failed + retries: 3 + delay: 30 + when: + - (not skip_node_svc_handlers | default(False) | bool) + - not (node_service_status_changed | default(false) | bool) diff --git a/roles/openshift_node_upgrade/tasks/docker/upgrade.yml b/roles/openshift_node_upgrade/tasks/docker/upgrade.yml index 416cf605a..ebe87d6fd 100644 --- a/roles/openshift_node_upgrade/tasks/docker/upgrade.yml +++ b/roles/openshift_node_upgrade/tasks/docker/upgrade.yml @@ -26,7 +26,13 @@ - debug: var=docker_image_count.stdout when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool -- service: name=docker state=stopped +- service: + name: docker + state: stopped + register: l_openshift_node_upgrade_docker_stop_result + until: not l_openshift_node_upgrade_docker_stop_result | failed + retries: 3 + delay: 30 - name: Upgrade Docker package: name=docker{{ '-' + docker_version }} state=present diff --git a/roles/openshift_node_upgrade/tasks/restart.yml b/roles/openshift_node_upgrade/tasks/restart.yml index 6947223af..f228b6e08 100644 --- a/roles/openshift_node_upgrade/tasks/restart.yml +++ b/roles/openshift_node_upgrade/tasks/restart.yml @@ -19,7 +19,7 @@ state: started register: docker_start_result until: not docker_start_result | failed - retries: 1 + retries: 3 delay: 30 - name: Update docker facts diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml index 4d1a38e61..686857d94 100644 --- a/roles/openshift_service_catalog/tasks/install.yml +++ b/roles/openshift_service_catalog/tasks/install.yml @@ -23,7 +23,7 @@ oc_project: state: present name: "kube-service-catalog" -# node_selector: "{{ openshift_service_catalog_nodeselector | default(null) }}" + node_selector: "" - name: Make kube-service-catalog project network global command: > diff --git a/roles/openshift_service_catalog/tasks/wire_aggregator.yml b/roles/openshift_service_catalog/tasks/wire_aggregator.yml index 55e68dc00..d5291a99a 100644 --- a/roles/openshift_service_catalog/tasks/wire_aggregator.yml +++ b/roles/openshift_service_catalog/tasks/wire_aggregator.yml @@ -147,6 +147,12 @@ value: [/etc/origin/master/openshift-ansible-catalog-console.js] - key: kubernetesMasterConfig.apiServerArguments.runtime-config value: [apis/settings.k8s.io/v1alpha1=true] + - key: admissionConfig.pluginConfig.PodPreset.configuration.kind + value: DefaultAdmissionConfig + - key: admissionConfig.pluginConfig.PodPreset.configuration.apiVersion + value: v1 + - key: admissionConfig.pluginConfig.PodPreset.configuration.disable + value: false register: yedit_output #restart master serially here diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md index 7f4738f1c..b367e7daf 100644 --- a/roles/openshift_storage_glusterfs/README.md +++ b/roles/openshift_storage_glusterfs/README.md @@ -64,7 +64,7 @@ their configuration as GlusterFS nodes: |--------------------|---------------------------|-----------------------------------------| | glusterfs_cluster | 1 | The ID of the cluster this node should belong to. This is useful when a single heketi service is expected to manage multiple distinct clusters. **NOTE:** For natively-hosted clusters, all pods will be in the same OpenShift namespace | glusterfs_hostname | openshift.node.nodename | A hostname (or IP address) that will be used for internal GlusterFS communication -| glusterfs_ip | openshift.common.ip | An IP address that will be used by pods to communicate with the GlusterFS node +| glusterfs_ip | openshift.common.ip | An IP address that will be used by pods to communicate with the GlusterFS node. **NOTE:** Required for external GlusterFS nodes | glusterfs_zone | 1 | A zone number for the node. Zones are used within the cluster for determining how to distribute the bricks of GlusterFS volumes. heketi will try to spread each volumes' bricks as evenly as possible across all zones Role Variables @@ -76,7 +76,7 @@ GlusterFS cluster into a new or existing OpenShift cluster: | Name | Default value | Description | |--------------------------------------------------|-------------------------|-----------------------------------------| | openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready -| openshift_storage_glusterfs_namespace | 'default' | Namespace in which to create GlusterFS resources +| openshift_storage_glusterfs_namespace | 'glusterfs' | Namespace in which to create GlusterFS resources | openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized | openshift_storage_glusterfs_name | 'storage' | A name to identify the GlusterFS cluster, which will be used in resource names | openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name @@ -85,6 +85,7 @@ GlusterFS cluster into a new or existing OpenShift cluster: | openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods | openshift_storage_glusterfs_wipe | False | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.** | openshift_storage_glusterfs_heketi_is_native | True | heketi should be containerized +| openshift_storage_glusterfs_heketi_cli | 'heketi-cli' | Command/Path to invoke the heketi-cli tool **NOTE:** Change this only for **non-native heketi** if heketi-cli is not in the global `$PATH` of the machine running openshift-ansible | openshift_storage_glusterfs_heketi_image | 'heketi/heketi' | Container image to use for heketi pods, enterprise default is 'rhgs3/rhgs-volmanager-rhel7' | openshift_storage_glusterfs_heketi_version | 'latest' | Container image version to use for heketi pods | openshift_storage_glusterfs_heketi_admin_key | auto-generated | String to use as secret key for performing heketi commands as admin @@ -108,7 +109,7 @@ are an exception: | Name | Default value | Description | |-------------------------------------------------------|-----------------------|-----------------------------------------| -| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'default' +| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'glusterfs' | openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters | openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties | openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml index 88e122f55..a846889ca 100644 --- a/roles/openshift_storage_glusterfs/defaults/main.yml +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -1,6 +1,6 @@ --- openshift_storage_glusterfs_timeout: 300 -openshift_storage_glusterfs_namespace: 'default' +openshift_storage_glusterfs_namespace: 'glusterfs' openshift_storage_glusterfs_is_native: True openshift_storage_glusterfs_name: 'storage' openshift_storage_glusterfs_nodeselector: "glusterfs={{ openshift_storage_glusterfs_name }}-host" @@ -8,9 +8,10 @@ openshift_storage_glusterfs_storageclass: True openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}" openshift_storage_glusterfs_version: 'latest' openshift_storage_glusterfs_wipe: False -openshift_storage_glusterfs_heketi_is_native: True +openshift_storage_glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_is_native }}" openshift_storage_glusterfs_heketi_is_missing: True openshift_storage_glusterfs_heketi_deploy_is_missing: True +openshift_storage_glusterfs_heketi_cli: 'heketi-cli' openshift_storage_glusterfs_heketi_image: "{{ 'rhgs3/rhgs-volmanager-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'heketi/heketi' | quote }}" openshift_storage_glusterfs_heketi_version: 'latest' openshift_storage_glusterfs_heketi_admin_key: "{{ omit }}" @@ -26,7 +27,7 @@ openshift_storage_glusterfs_heketi_ssh_sudo: False openshift_storage_glusterfs_heketi_ssh_keyfile: '/dev/null' openshift_storage_glusterfs_registry_timeout: "{{ openshift_storage_glusterfs_timeout }}" -openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" +openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.namespace | default(openshift_storage_glusterfs_namespace) }}" openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}" openshift_storage_glusterfs_registry_name: 'registry' openshift_storage_glusterfs_registry_nodeselector: "glusterfs={{ openshift_storage_glusterfs_registry_name }}-host" @@ -34,9 +35,10 @@ openshift_storage_glusterfs_registry_storageclass: False openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}" openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}" openshift_storage_glusterfs_registry_wipe: "{{ openshift_storage_glusterfs_wipe }}" -openshift_storage_glusterfs_registry_heketi_is_native: "{{ openshift_storage_glusterfs_heketi_is_native }}" +openshift_storage_glusterfs_registry_heketi_is_native: "{{ openshift_storage_glusterfs_registry_is_native }}" openshift_storage_glusterfs_registry_heketi_is_missing: "{{ openshift_storage_glusterfs_heketi_is_missing }}" openshift_storage_glusterfs_registry_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing }}" +openshift_storage_glusterfs_registry_heketi_cli: "{{ openshift_storage_glusterfs_heketi_cli }}" openshift_storage_glusterfs_registry_heketi_image: "{{ openshift_storage_glusterfs_heketi_image }}" openshift_storage_glusterfs_registry_heketi_version: "{{ openshift_storage_glusterfs_heketi_version }}" openshift_storage_glusterfs_registry_heketi_admin_key: "{{ omit }}" @@ -44,9 +46,9 @@ openshift_storage_glusterfs_registry_heketi_user_key: "{{ omit }}" openshift_storage_glusterfs_registry_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}" openshift_storage_glusterfs_registry_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}" openshift_storage_glusterfs_registry_heketi_url: "{{ openshift_storage_glusterfs_heketi_url | default(omit) }}" -openshift_storage_glusterfs_registry_heketi_port: 8080 -openshift_storage_glusterfs_registry_heketi_executor: 'kubernetes' -openshift_storage_glusterfs_registry_heketi_ssh_port: 22 -openshift_storage_glusterfs_registry_heketi_ssh_user: 'root' -openshift_storage_glusterfs_registry_heketi_ssh_sudo: False -openshift_storage_glusterfs_registry_heketi_ssh_keyfile: '/dev/null' +openshift_storage_glusterfs_registry_heketi_port: "{{ openshift_storage_glusterfs_heketi_port }}" +openshift_storage_glusterfs_registry_heketi_executor: "{{ openshift_storage_glusterfs_heketi_executor }}" +openshift_storage_glusterfs_registry_heketi_ssh_port: "{{ openshift_storage_glusterfs_heketi_ssh_port }}" +openshift_storage_glusterfs_registry_heketi_ssh_user: "{{ openshift_storage_glusterfs_heketi_ssh_user }}" +openshift_storage_glusterfs_registry_heketi_ssh_sudo: "{{ openshift_storage_glusterfs_heketi_ssh_sudo }}" +openshift_storage_glusterfs_registry_heketi_ssh_keyfile: "{{ openshift_storage_glusterfs_heketi_ssh_keyfile }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml index 99ad029da..600d8f676 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml @@ -1,4 +1,16 @@ --- +- name: Make sure heketi-client is installed + package: name=heketi-client state=present + when: + - not openshift.common.is_atomic | bool + - not glusterfs_heketi_is_native | bool + +- name: Verify heketi-cli is installed + shell: "command -v {{ glusterfs_heketi_cli }} >/dev/null 2>&1 || { echo >&2 'ERROR: Make sure heketi-cli is available, then re-run the installer'; exit 1; }" + changed_when: False + when: + - not glusterfs_heketi_is_native | bool + - name: Verify target namespace exists oc_project: state: present @@ -173,7 +185,7 @@ - name: Set heketi-cli command set_fact: - glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {% endif %}heketi-cli -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin --secret '{{ glusterfs_heketi_admin_key }}'" + glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {% endif %}{{ glusterfs_heketi_cli }} -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin {% if glusterfs_heketi_admin_key is defined %}--secret '{{ glusterfs_heketi_admin_key }}'{% endif %}" - name: Verify heketi service command: "{{ glusterfs_heketi_client }} cluster list" @@ -203,6 +215,7 @@ data: "{{ glusterfs_heketi_admin_key }}" when: - glusterfs_storageclass + - glusterfs_heketi_admin_key is defined - name: Get heketi route oc_obj: diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml index 76611d936..b54a8e36c 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml @@ -12,6 +12,7 @@ glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_heketi_is_native }}" glusterfs_heketi_is_missing: "{{ openshift_storage_glusterfs_heketi_is_missing }}" glusterfs_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing }}" + glusterfs_heketi_cli: "{{ openshift_storage_glusterfs_heketi_cli }}" glusterfs_heketi_image: "{{ openshift_storage_glusterfs_heketi_image }}" glusterfs_heketi_version: "{{ openshift_storage_glusterfs_heketi_version }}" glusterfs_heketi_admin_key: "{{ openshift_storage_glusterfs_heketi_admin_key }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml index 280088fe4..0b4d1c82b 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml @@ -12,6 +12,7 @@ glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_registry_heketi_is_native }}" glusterfs_heketi_is_missing: "{{ openshift_storage_glusterfs_registry_heketi_is_missing }}" glusterfs_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_registry_heketi_deploy_is_missing }}" + glusterfs_heketi_cli: "{{ openshift_storage_glusterfs_registry_heketi_cli }}" glusterfs_heketi_image: "{{ openshift_storage_glusterfs_registry_heketi_image }}" glusterfs_heketi_version: "{{ openshift_storage_glusterfs_registry_heketi_version }}" glusterfs_heketi_admin_key: "{{ openshift_storage_glusterfs_registry_heketi_admin_key }}" diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml index 5ef4b5c83..37d3e6ba2 100644 --- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml @@ -126,7 +126,7 @@ - name: Set heketi-cli command set_fact: - glusterfs_heketi_client: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} heketi-cli -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'" + glusterfs_heketi_client: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {{ glusterfs_heketi_cli }} -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'" - name: Verify heketi service command: "{{ glusterfs_heketi_client }} cluster list" diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 index 2ec9a9e9a..095fb780f 100644 --- a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 +++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 @@ -7,5 +7,7 @@ provisioner: kubernetes.io/glusterfs parameters: resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" restuser: "admin" +{% if glusterfs_heketi_admin_key is defined %} secretNamespace: "{{ glusterfs_namespace }}" secretName: "heketi-{{ glusterfs_name }}-admin-secret" +{%- endif -%} diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2 index 3aac68e2f..d6c28f6dd 100644 --- a/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2 +++ b/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2 @@ -17,10 +17,20 @@ "node": { "hostnames": { "manage": [ - "{{ hostvars[node].glusterfs_hostname | default(hostvars[node].openshift.node.nodename) }}" +{%- if 'glusterfs_hostname' in hostvars[node] -%} + "{{ hostvars[node].glusterfs_hostname }}" +{%- elif 'openshift' in hostvars[node] -%} + "{{ hostvars[node].openshift.node.nodename }}" +{%- else -%} + "{{ node }}" +{%- endif -%} ], "storage": [ - "{{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}" +{%- if 'glusterfs_ip' in hostvars[node] -%} + "{{ hostvars[node].glusterfs_ip }}" +{%- else -%} + "{{ hostvars[node].openshift.common.ip }}" +{%- endif -%} ] }, "zone": {{ hostvars[node].glusterfs_zone | default(1) }} |