diff options
43 files changed, 339 insertions, 128 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index 6300e1179..bdfa06c4a 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.9.0-0.34.0 ./ +3.9.0-0.35.0 ./ diff --git a/openshift-ansible.spec b/openshift-ansible.spec index ab00e9d0f..23f43dcd5 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -10,7 +10,7 @@ Name: openshift-ansible Version: 3.9.0 -Release: 0.34.0%{?dist} +Release: 0.35.0%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 URL: https://github.com/openshift/openshift-ansible @@ -200,6 +200,26 @@ Atomic OpenShift Utilities includes %changelog +* Wed Jan 31 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.35.0 +- add glusterblock support for ansible (m.judeikis@gmail.com) +- Add a bare minimum localhost hosts file (sdodson@redhat.com) +- copy etcd client certificates for nuage openshift monitor + (siva_teja.areti@nokia.com) +- fix hostvars parameter name (tzumainn@redhat.com) +- remove mountpoint parameter (tzumainn@redhat.com) +- flake cleanup (tzumainn@redhat.com) +- code simplification and lint cleanup (tzumainn@redhat.com) +- Symlink kubectl to oc instead of openshift (mfojtik@redhat.com) +- Rework provisioners vars to support different prefix/version for Origin/OSE + (vrutkovs@redhat.com) +- add cinder mountpoint to inventory (tzumainn@redhat.com) +- allow setting of kibana env vars (jcantril@redhat.com) +- No longer compare with legacy hosted var (ewolinet@redhat.com) +- Preserving ES dc storage type unless overridden by inventory variable + (ewolinet@redhat.com) +- Fix: e2e tests failing due to :1936/metrics unaccessible. + (jmencak@redhat.com) + * Tue Jan 30 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.34.0 - docker_creds: decode docker_config for py3 only if its a string (vrutkovs@redhat.com) diff --git a/playbooks/adhoc/openshift_hosted_logging_efk.yaml b/playbooks/adhoc/openshift_hosted_logging_efk.yaml deleted file mode 100644 index faeb332ad..000000000 --- a/playbooks/adhoc/openshift_hosted_logging_efk.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -- hosts: masters[0] - roles: - - role: openshift_logging - openshift_hosted_logging_cleanup: no - -- name: Update master-config for publicLoggingURL - hosts: masters:!masters[0] - pre_tasks: - - set_fact: - openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ openshift_master_default_subdomain }}" - tasks: - - import_role: - name: openshift_logging - tasks_from: update_master_config - when: openshift_hosted_logging_deploy | default(false) | bool diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index a10fd4bee..c27118f6f 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -72,8 +72,6 @@ # support for optional hooks to be defined. - name: Upgrade master hosts: oo_masters_to_config - vars: - openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" serial: 1 roles: - openshift_facts diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml index fe1fdefff..c8a42322d 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml @@ -64,6 +64,7 @@ - import_playbook: ../upgrade_control_plane.yml vars: openshift_release: '3.8' + openshift_pkg_version: '' when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<') ## 3.8 upgrade complete we should now be able to upgrade to 3.9 diff --git a/playbooks/openshift-hosted/private/openshift_default_storage_class.yml b/playbooks/openshift-hosted/private/openshift_default_storage_class.yml index 62fe0dd60..c59ebcead 100644 --- a/playbooks/openshift-hosted/private/openshift_default_storage_class.yml +++ b/playbooks/openshift-hosted/private/openshift_default_storage_class.yml @@ -3,4 +3,6 @@ hosts: oo_first_master roles: - role: openshift_default_storage_class - when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce' or openshift_cloudprovider_kind == 'openstack') + when: + - openshift_cloudprovider_kind is defined + - openshift_cloudprovider_kind in ['aws','gce','openstack','vsphere'] diff --git a/playbooks/openshift-logging/private/config.yml b/playbooks/openshift-logging/private/config.yml index d6b26647c..07aa8bfde 100644 --- a/playbooks/openshift-logging/private/config.yml +++ b/playbooks/openshift-logging/private/config.yml @@ -24,6 +24,7 @@ - import_role: name: openshift_logging tasks_from: update_master_config + when: not openshift.common.version_gte_3_9 - name: Logging Install Checkpoint End hosts: all diff --git a/playbooks/openshift-master/private/additional_config.yml b/playbooks/openshift-master/private/additional_config.yml index 85be0e600..ca514ed26 100644 --- a/playbooks/openshift-master/private/additional_config.yml +++ b/playbooks/openshift-master/private/additional_config.yml @@ -16,7 +16,6 @@ vars: cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}" etcd_urls: "{{ openshift.master.etcd_urls }}" - openshift_master_ha: "{{ groups.oo_masters | length > 1 }}" omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}" roles: - role: openshift_project_request_template diff --git a/playbooks/openshift-master/private/config.yml b/playbooks/openshift-master/private/config.yml index 153ea9993..d2fc2eed8 100644 --- a/playbooks/openshift-master/private/config.yml +++ b/playbooks/openshift-master/private/config.yml @@ -78,7 +78,6 @@ console_url: "{{ openshift_master_console_url | default(None) }}" console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}" public_console_url: "{{ openshift_master_public_console_url | default(None) }}" - ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}" master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}" - name: Inspect state of first master config settings @@ -166,7 +165,6 @@ hosts: oo_masters_to_config any_errors_fatal: true vars: - openshift_master_ha: "{{ openshift.master.ha }}" openshift_master_count: "{{ openshift.master.master_count }}" openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}" openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}" @@ -186,6 +184,7 @@ - role: openshift_buildoverrides - role: nickhammond.logrotate - role: openshift_master + openshift_master_ha: "{{ (groups.oo_masters | length > 1) | bool }}" openshift_master_hosts: "{{ groups.oo_masters_to_config }}" r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}" r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}" diff --git a/playbooks/openshift-master/private/restart.yml b/playbooks/openshift-master/private/restart.yml index 5cb284935..17d90533c 100644 --- a/playbooks/openshift-master/private/restart.yml +++ b/playbooks/openshift-master/private/restart.yml @@ -3,16 +3,13 @@ - name: Restart masters hosts: oo_masters_to_config - vars: - openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" serial: 1 - handlers: - - import_tasks: ../../../roles/openshift_master/handlers/main.yml roles: - openshift_facts post_tasks: - include_tasks: tasks/restart_hosts.yml when: openshift_rolling_restart_mode | default('services') == 'system' - - - include_tasks: tasks/restart_services.yml + - import_role: + name: openshift_master + tasks_from: restart.yml when: openshift_rolling_restart_mode | default('services') == 'services' diff --git a/playbooks/openshift-master/private/scaleup.yml b/playbooks/openshift-master/private/scaleup.yml index 007b23ea3..20ebf70d3 100644 --- a/playbooks/openshift-master/private/scaleup.yml +++ b/playbooks/openshift-master/private/scaleup.yml @@ -8,7 +8,6 @@ - openshift_facts: role: master local_facts: - ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}" master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}" - name: Update master count modify_yaml: diff --git a/playbooks/openshift-master/private/tasks/restart_services.yml b/playbooks/openshift-master/private/tasks/restart_services.yml deleted file mode 100644 index cf2c282e3..000000000 --- a/playbooks/openshift-master/private/tasks/restart_services.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- import_role: - name: openshift_master - tasks_from: restart.yml diff --git a/playbooks/openshift-metrics/private/config.yml b/playbooks/openshift-metrics/private/config.yml index 1e237e3f0..889ea77b1 100644 --- a/playbooks/openshift-metrics/private/config.yml +++ b/playbooks/openshift-metrics/private/config.yml @@ -25,6 +25,7 @@ import_role: name: openshift_metrics tasks_from: update_master_config.yaml + when: not openshift.common.version_gte_3_9 - name: Metrics Install Checkpoint End hosts: all diff --git a/playbooks/openstack/inventory.py b/playbooks/openstack/inventory.py index 76e658eb7..d5a8c3e24 100755 --- a/playbooks/openstack/inventory.py +++ b/playbooks/openstack/inventory.py @@ -15,18 +15,10 @@ import json import shade -def build_inventory(): - '''Build the dynamic inventory.''' - cloud = shade.openstack_cloud() - +def base_openshift_inventory(cluster_hosts): + '''Set the base openshift inventory.''' inventory = {} - # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER` - # environment variable. - cluster_hosts = [ - server for server in cloud.list_servers() - if 'metadata' in server and 'clusterid' in server.metadata] - masters = [server.name for server in cluster_hosts if server.metadata['host-type'] == 'master'] @@ -67,6 +59,34 @@ def build_inventory(): inventory['dns'] = {'hosts': dns} inventory['lb'] = {'hosts': load_balancers} + return inventory + + +def get_docker_storage_mountpoints(volumes): + '''Check volumes to see if they're being used for docker storage''' + docker_storage_mountpoints = {} + for volume in volumes: + if volume.metadata.get('purpose') == "openshift_docker_storage": + for attachment in volume.attachments: + if attachment.server_id in docker_storage_mountpoints: + docker_storage_mountpoints[attachment.server_id].append(attachment.device) + else: + docker_storage_mountpoints[attachment.server_id] = [attachment.device] + return docker_storage_mountpoints + + +def build_inventory(): + '''Build the dynamic inventory.''' + cloud = shade.openstack_cloud() + + # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER` + # environment variable. + cluster_hosts = [ + server for server in cloud.list_servers() + if 'metadata' in server and 'clusterid' in server.metadata] + + inventory = base_openshift_inventory(cluster_hosts) + for server in cluster_hosts: if 'group' in server.metadata: group = server.metadata.group @@ -76,6 +96,9 @@ def build_inventory(): inventory['_meta'] = {'hostvars': {}} + # cinder volumes used for docker storage + docker_storage_mountpoints = get_docker_storage_mountpoints(cloud.list_volumes()) + for server in cluster_hosts: ssh_ip_address = server.public_v4 or server.private_v4 hostvars = { @@ -111,6 +134,11 @@ def build_inventory(): if node_labels: hostvars['openshift_node_labels'] = node_labels + # check for attached docker storage volumes + if 'os-extended-volumes:volumes_attached' in server: + if server.id in docker_storage_mountpoints: + hostvars['docker_storage_mountpoints'] = ' '.join(docker_storage_mountpoints[server.id]) + inventory['_meta']['hostvars'][server.name] = hostvars return inventory diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml index 7b55dda56..c0411d641 100644 --- a/roles/nuage_master/handlers/main.yaml +++ b/roles/nuage_master/handlers/main.yaml @@ -1,9 +1,7 @@ --- - name: restart master api systemd: name={{ openshift_service_type }}-master-api state=restarted - when: > - (openshift_master_ha | bool) and - (not master_api_service_status_changed | default(false)) + when: (not master_api_service_status_changed | default(false)) # TODO: need to fix up ignore_errors here # We retry the controllers because the API may not be 100% initialized yet. @@ -13,7 +11,5 @@ delay: 5 register: result until: result.rc == 0 - when: > - (openshift_master_ha | bool) and - (not master_controllers_service_status_changed | default(false)) + when: (not master_controllers_service_status_changed | default(false)) ignore_errors: yes diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml index 178e0849c..c8d385db5 100644 --- a/roles/openshift_aws/defaults/main.yml +++ b/roles/openshift_aws/defaults/main.yml @@ -44,6 +44,8 @@ openshift_aws_s3_bucket_name: "{{ openshift_aws_clusterid }}-docker-registry" openshift_aws_elb_basename: "{{ openshift_aws_clusterid }}" +openshift_aws_elb_cert_arn: '' + openshift_aws_elb_dict: master: external: @@ -65,7 +67,7 @@ openshift_aws_elb_dict: load_balancer_port: "{{ openshift_master_api_port | default(8443) }}" instance_protocol: ssl instance_port: "{{ openshift_master_api_port | default(8443) }}" - ssl_certificate_id: '' + ssl_certificate_id: "{{ openshift_aws_elb_cert_arn }}" name: "{{ openshift_aws_elb_basename }}-master-external" tags: "{{ openshift_aws_kube_tags }}" internal: diff --git a/roles/openshift_default_storage_class/defaults/main.yml b/roles/openshift_default_storage_class/defaults/main.yml index 014c06641..7ca122fc9 100644 --- a/roles/openshift_default_storage_class/defaults/main.yml +++ b/roles/openshift_default_storage_class/defaults/main.yml @@ -19,6 +19,12 @@ openshift_storageclass_defaults: parameters: fstype: xfs + vsphere: + provisioner: vsphere-volume + name: standard + parameters: + datastore: "{{ openshift_cloudprovider_vsphere_datacenter }}" + openshift_storageclass_default: "true" openshift_storageclass_name: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['name'] }}" openshift_storageclass_provisioner: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['provisioner'] }}" diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index a192bd67e..c438236a4 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -58,6 +58,7 @@ When `openshift_logging_install_logging` is set to `False` the `openshift_loggin - `openshift_logging_kibana_replica_count`: The number of replicas Kibana should be scaled up to. Defaults to 1. - `openshift_logging_kibana_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land. - `openshift_logging_kibana_edge_term_policy`: Insecure Edge Termination Policy. Defaults to Redirect. +- `openshift_logging_kibana_env_vars`: A map of environment variables to add to the kibana deployment config (e.g. {"ELASTICSEARCH_REQUESTTIMEOUT":"30000"}) - `openshift_logging_fluentd_nodeselector`: The node selector that the Fluentd daemonset uses to determine where to deploy to. Defaults to '"logging-infra-fluentd": "true"'. - `openshift_logging_fluentd_cpu_request`: The minimum amount of CPU to allocate for Fluentd collector pods. Defaults to '100m'. diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml index ced7397b5..6be47b1f8 100644 --- a/roles/openshift_logging/tasks/delete_logging.yaml +++ b/roles/openshift_logging/tasks/delete_logging.yaml @@ -140,4 +140,6 @@ console_config_edits: - key: clusterInfo#loggingPublicURL value: "" - when: openshift_web_console_install | default(true) | bool + when: + - openshift_web_console_install | default(true) | bool + - openshift.common.version_gte_3_9 diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index e4883bfa0..c905502ac 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -71,10 +71,17 @@ - set_fact: openshift_logging_es_pvc_prefix="logging-es" when: openshift_logging_es_pvc_prefix == "" +# Using this module for setting this fact because otherwise we were getting a value of "" trying to +# use default() in the set_fact after this which caused us to not correctly evaluate +# openshift_logging_elasticsearch_storage_type +- conditional_set_fact: + facts: "{{ hostvars[inventory_hostname] }}" + vars: + elasticsearch_storage_type: openshift_logging_elasticsearch_storage_type + - set_fact: - elasticsearch_storage_type: "{{ openshift_logging_elasticsearch_storage_type | default('pvc' if ( openshift_logging_es_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs' or openshift_logging_es_pvc_size | length > 0) else 'emptydir') }}" + default_elasticsearch_storage_type: "{{ 'pvc' if ( openshift_logging_es_pvc_dynamic | bool or openshift_logging_storage_kind | default('') == 'nfs' or openshift_logging_es_pvc_size | length > 0) else 'emptydir' }}" -# We don't allow scaling down of ES nodes currently - include_role: name: openshift_logging_elasticsearch vars: @@ -85,7 +92,8 @@ openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}" openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}" - openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}" + openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type | default('pvc' if outer_item.0.volumes['elasticsearch-storage'].persistentVolumeClaim is defined else 'hostmount' if outer_item.0.volumes['elasticsearch-storage'].hostPath is defined else 'emptydir' if outer_item.0.volumes['elasticsearch-storage'].emptyDir is defined else default_elasticsearch_storage_type) }}" + openshift_logging_elasticsearch_hostmount_path: "{{ outer_item.0.volumes['elasticsearch-storage'].hostPath.path if outer_item.0.volumes['elasticsearch-storage'].hostPath is defined else '' }}" openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}" openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name | default() }}" openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector if outer_item.0.nodeSelector | default(None) is none else outer_item.0.nodeSelector }}" @@ -112,7 +120,7 @@ openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}" openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}" - openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}" + openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type | default(default_elasticsearch_storage_type) }}" openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}" openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name | default() }}" @@ -133,7 +141,7 @@ when: openshift_logging_es_ops_pvc_prefix == "" - set_fact: - elasticsearch_storage_type: "{{ openshift_logging_elasticsearch_storage_type | default('pvc' if ( openshift_logging_es_ops_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs' or openshift_logging_es_ops_pvc_size | length > 0) else 'emptydir') }}" + default_elasticsearch_storage_type: "{{ 'pvc' if ( openshift_logging_es_ops_pvc_dynamic | bool or openshift_logging_storage_kind | default('') == 'nfs' or openshift_logging_es_ops_pvc_size | length > 0) else 'emptydir' }}" when: - openshift_logging_use_ops | bool @@ -147,7 +155,8 @@ openshift_logging_elasticsearch_ops_deployment: true openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}" - openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}" + openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type | default('pvc' if outer_item.0.volumes['elasticsearch-storage'].persistentVolumeClaim is defined else 'hostmount' if outer_item.0.volumes['elasticsearch-storage'].hostPath is defined else 'emptydir' if outer_item.0.volumes['elasticsearch-storage'].emptyDir is defined else default_elasticsearch_storage_type) }}" + openshift_logging_elasticsearch_hostmount_path: "{{ outer_item.0.volumes['elasticsearch-storage'].hostPath.path if outer_item.0.volumes['elasticsearch-storage'].hostPath is defined else '' }}" openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}" openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}" openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}" @@ -189,7 +198,7 @@ openshift_logging_elasticsearch_ops_deployment: true openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}" - openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}" + openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type | default(default_elasticsearch_storage_type) }}" openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}" openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}" openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}" @@ -326,4 +335,6 @@ console_config_edits: - key: clusterInfo#loggingPublicURL value: "https://{{ openshift_logging_kibana_hostname }}" - when: openshift_web_console_install | default(true) | bool + when: + - openshift_web_console_install | default(true) | bool + - openshift.common.version_gte_3_9 diff --git a/roles/openshift_logging_kibana/defaults/main.yml b/roles/openshift_logging_kibana/defaults/main.yml index 899193838..b69cbacae 100644 --- a/roles/openshift_logging_kibana/defaults/main.yml +++ b/roles/openshift_logging_kibana/defaults/main.yml @@ -18,6 +18,9 @@ openshift_logging_kibana_es_port: 9200 openshift_logging_kibana_replicas: 1 openshift_logging_kibana_edge_term_policy: Redirect +# map of env. var to add to the kibana deploymentconfig +openshift_logging_kibana_env_vars: {} + # this is used to determine if this is an operations deployment or a non-ops deployment # simply used for naming purposes openshift_logging_kibana_ops_deployment: false diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml index 3c3bd902e..c67235c62 100644 --- a/roles/openshift_logging_kibana/tasks/main.yaml +++ b/roles/openshift_logging_kibana/tasks/main.yaml @@ -251,6 +251,7 @@ kibana_proxy_memory_limit: "{{ openshift_logging_kibana_proxy_memory_limit }}" kibana_replicas: "{{ openshift_logging_kibana_replicas | default (1) }}" kibana_node_selector: "{{ openshift_logging_kibana_nodeselector | default({}) }}" + kibana_env_vars: "{{ openshift_logging_kibana_env_vars | default({}) }}" - name: Set Kibana DC oc_obj: diff --git a/roles/openshift_logging_kibana/templates/kibana.j2 b/roles/openshift_logging_kibana/templates/kibana.j2 index 57d216373..ed05b8458 100644 --- a/roles/openshift_logging_kibana/templates/kibana.j2 +++ b/roles/openshift_logging_kibana/templates/kibana.j2 @@ -70,6 +70,10 @@ spec: resourceFieldRef: containerName: kibana resource: limits.memory +{% for key, value in kibana_env_vars.items() %} + - name: "{{ key }}" + value: "{{ value }}" +{% endfor %} volumeMounts: - name: kibana mountPath: /etc/kibana/keys diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index b12a6b346..41f2ee2a5 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -227,7 +227,7 @@ - pause: seconds: 15 when: - - openshift.master.ha | bool + - openshift_master_ha | bool - name: Start and enable master api all masters systemd: diff --git a/roles/openshift_master/tasks/restart.yml b/roles/openshift_master/tasks/restart.yml index 715347101..f7697067a 100644 --- a/roles/openshift_master/tasks/restart.yml +++ b/roles/openshift_master/tasks/restart.yml @@ -3,7 +3,6 @@ service: name: "{{ openshift_service_type }}-master-api" state: restarted - when: openshift_master_ha | bool - name: Wait for master API to come back online wait_for: host: "{{ openshift.common.hostname }}" @@ -11,12 +10,10 @@ delay: 10 port: "{{ openshift.master.api_port }}" timeout: 600 - when: openshift_master_ha | bool -- name: Restart master controllers - service: - name: "{{ openshift_service_type }}-master-controllers" - state: restarted - # Ignore errrors since it is possible that type != simple for - # pre-3.1.1 installations. - ignore_errors: true - when: openshift_master_ha | bool +# We retry the controllers because the API may not be 100% initialized yet. +- name: restart master controllers + command: "systemctl restart {{ openshift_service_type }}-master-controllers" + retries: 3 + delay: 5 + register: result + until: result.rc == 0 diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml index 6b6c21d71..f05c8968d 100644 --- a/roles/openshift_metrics/tasks/install_metrics.yaml +++ b/roles/openshift_metrics/tasks/install_metrics.yaml @@ -79,7 +79,9 @@ console_config_edits: - key: clusterInfo#metricsPublicURL value: "https://{{ openshift_metrics_hawkular_hostname}}/hawkular/metrics" - when: openshift_web_console_install | default(true) | bool + when: + - openshift_web_console_install | default(true) | bool + - openshift.common.version_gte_3_9 - command: > {{openshift_client_binary}} diff --git a/roles/openshift_metrics/tasks/uninstall_metrics.yaml b/roles/openshift_metrics/tasks/uninstall_metrics.yaml index 1664e9975..ed849916d 100644 --- a/roles/openshift_metrics/tasks/uninstall_metrics.yaml +++ b/roles/openshift_metrics/tasks/uninstall_metrics.yaml @@ -28,4 +28,6 @@ console_config_edits: - key: clusterInfo#metricsPublicURL value: "" - when: openshift_web_console_install | default(true) | bool + when: + - openshift_web_console_install | default(true) | bool + - openshift.common.version_gte_3_9 diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index 0fe4c2035..9f887891b 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -137,6 +137,7 @@ default_r_openshift_node_image_prep_packages: - yum-utils # gluster - glusterfs-fuse +- device-mapper-multipath # nfs - nfs-utils - flannel diff --git a/roles/openshift_node/tasks/storage_plugins/iscsi.yml b/roles/openshift_node/tasks/storage_plugins/iscsi.yml index a8048c42f..72415f9a6 100644 --- a/roles/openshift_node/tasks/storage_plugins/iscsi.yml +++ b/roles/openshift_node/tasks/storage_plugins/iscsi.yml @@ -1,6 +1,32 @@ --- - name: Install iSCSI storage plugin dependencies - package: name=iscsi-initiator-utils state=present + package: + name: "{{ item }}" + state: present when: not openshift_is_atomic | bool register: result until: result is succeeded + with_items: + - iscsi-initiator-utils + - device-mapper-multipath + +- name: restart services + systemd: + name: "{{ item }}" + state: started + enabled: True + with_items: + - multipathd + - rpcbind + +- name: Template multipath configuration + template: + dest: "/etc/multipath.conf" + src: multipath.conf.j2 + backup: true + when: not openshift_is_atomic | bool + +#enable multipath +- name: Enable multipath + command: "mpathconf --enable" + when: not openshift_is_atomic | bool diff --git a/roles/openshift_node/templates/multipath.conf.j2 b/roles/openshift_node/templates/multipath.conf.j2 new file mode 100644 index 000000000..8a0abc2c1 --- /dev/null +++ b/roles/openshift_node/templates/multipath.conf.j2 @@ -0,0 +1,15 @@ +# LIO iSCSI +# TODO: Add env variables for tweaking +devices { + device { + vendor "LIO-ORG" + user_friendly_names "yes" + path_grouping_policy "failover" + path_selector "round-robin 0" + failback immediate + path_checker "tur" + prio "const" + no_path_retry 120 + rr_weight "uniform" + } +} diff --git a/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 b/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 index 32c6b5838..9015c561f 100644 --- a/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 +++ b/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 @@ -1,4 +1,8 @@ +{% if docker_storage_mountpoints is defined %} +DEVS="{{ docker_storage_mountpoints }}" +{% else %} DEVS="{{ openshift_openstack_container_storage_setup.docker_dev }}" +{% endif %} VG="{{ openshift_openstack_container_storage_setup.docker_vg }}" DATA_SIZE="{{ openshift_openstack_container_storage_setup.docker_data_size }}" EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize={{ openshift_openstack_container_storage_setup.docker_dm_basesize }}" diff --git a/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 b/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 index 1bf366bdc..917347073 100644 --- a/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 +++ b/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 @@ -1,4 +1,8 @@ +{% if docker_storage_mountpoints is defined %} +DEVS="{{ docker_storage_mountpoints }}" +{% else %} DEVS="{{ openshift_openstack_container_storage_setup.docker_dev }}" +{% endif %} VG="{{ openshift_openstack_container_storage_setup.docker_vg }}" DATA_SIZE="{{ openshift_openstack_container_storage_setup.docker_data_size }}" STORAGE_DRIVER=overlay2 diff --git a/roles/openshift_openstack/templates/heat_stack.yaml.j2 b/roles/openshift_openstack/templates/heat_stack.yaml.j2 index 8e7c6288a..1d3173022 100644 --- a/roles/openshift_openstack/templates/heat_stack.yaml.j2 +++ b/roles/openshift_openstack/templates/heat_stack.yaml.j2 @@ -418,6 +418,10 @@ resources: protocol: tcp port_range_min: 443 port_range_max: 443 + - direction: ingress + protocol: tcp + port_range_min: 1936 + port_range_max: 1936 cns-secgrp: type: OS::Neutron::SecurityGroup diff --git a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 index 29b09f3c9..9aeecfa74 100644 --- a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 +++ b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 @@ -261,11 +261,12 @@ resources: properties: size: { get_param: volume_size } availability_zone: { get_param: availability_zone } + metadata: + purpose: openshift_docker_storage volume_attachment: type: OS::Cinder::VolumeAttachment properties: volume_id: { get_resource: cinder_volume } instance_uuid: { get_resource: server } - mountpoint: /dev/sdb {% endif %} diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md index f7bd58db3..70a89b0ba 100644 --- a/roles/openshift_storage_glusterfs/README.md +++ b/roles/openshift_storage_glusterfs/README.md @@ -73,49 +73,51 @@ Role Variables This role has the following variables that control the integration of a GlusterFS cluster into a new or existing OpenShift cluster: -| Name | Default value | Description | -|--------------------------------------------------|-------------------------|-----------------------------------------| -| openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready -| openshift_storage_glusterfs_namespace | 'glusterfs' | Namespace/project in which to create GlusterFS resources -| openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized -| openshift_storage_glusterfs_name | 'storage' | A name to identify the GlusterFS cluster, which will be used in resource names -| openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name -| openshift_storage_glusterfs_use_default_selector | False | Whether to use a default node selector for the GlusterFS namespace/project. If False, the namespace/project will have no restricting node selector. If True, uses pre-existing or default (e.g. osm_default_node_selector) node selectors. **NOTE:** If True, nodes which will host GlusterFS pods must already have the additional labels. -| openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster -| openshift_storage_glusterfs_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default -| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7' -| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods -| openshift_storage_glusterfs_block_deploy | True | Deploy glusterblock provisioner service -| openshift_storage_glusterfs_block_image | 'gluster/glusterblock-provisioner'| Container image to use for glusterblock-provisioner pod, enterprise default is 'rhgs3/rhgs-gluster-block-prov-rhel7' -| openshift_storage_glusterfs_block_version | 'latest' | Container image version to use for glusterblock-provisioner pod -| openshift_storage_glusterfs_block_host_vol_create| True | Automatically create GlusterFS volumes to host glusterblock volumes. **NOTE:** If this is False, block-hosting volumes will need to be manually created before glusterblock volumes can be provisioned -| openshift_storage_glusterfs_block_host_vol_size | 100 | Size, in GB, of GlusterFS volumes that will be automatically create to host glusterblock volumes if not enough space is available for a glusterblock volume create request. **NOTE:** This value is effectively an upper limit on the size of glusterblock volumes unless you manually create larger GlusterFS block-hosting volumes -| openshift_storage_glusterfs_block_host_vol_max | 15 | Max number of GlusterFS volumes to host glusterblock volumes -| openshift_storage_glusterfs_s3_deploy | True | Deploy gluster-s3 service -| openshift_storage_glusterfs_s3_image | 'gluster/gluster-object'| Container image to use for gluster-s3 pod, enterprise default is 'rhgs3/rhgs-gluster-s3-server-rhel7' -| openshift_storage_glusterfs_s3_version | 'latest' | Container image version to use for gluster=s3 pod -| openshift_storage_glusterfs_s3_account | Undefined | S3 account name for the S3 service, required for S3 service deployment -| openshift_storage_glusterfs_s3_user | Undefined | S3 user name for the S3 service, required for S3 service deployment -| openshift_storage_glusterfs_s3_password | Undefined | S3 user password for the S3 service, required for S3 service deployment -| openshift_storage_glusterfs_s3_pvc | Dynamic | Name of the GlusterFS-backed PVC which will be used for S3 object data storage, generated from the cluster name and S3 account by default -| openshift_storage_glusterfs_s3_pvc_size | "2Gi" | Size, in Gi, of the GlusterFS-backed PVC which will be used for S3 object data storage -| openshift_storage_glusterfs_s3_meta_pvc | Dynamic | Name of the GlusterFS-backed PVC which will be used for S3 object metadata storage, generated from the cluster name and S3 account by default -| openshift_storage_glusterfs_s3_meta_pvc_size | "1Gi" | Size, in Gi, of the GlusterFS-backed PVC which will be used for S3 object metadata storage -| openshift_storage_glusterfs_wipe | False | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.** -| openshift_storage_glusterfs_heketi_is_native | True | heketi should be containerized -| openshift_storage_glusterfs_heketi_cli | 'heketi-cli' | Command/Path to invoke the heketi-cli tool **NOTE:** Change this only for **non-native heketi** if heketi-cli is not in the global `$PATH` of the machine running openshift-ansible -| openshift_storage_glusterfs_heketi_image | 'heketi/heketi' | Container image to use for heketi pods, enterprise default is 'rhgs3/rhgs-volmanager-rhel7' -| openshift_storage_glusterfs_heketi_version | 'latest' | Container image version to use for heketi pods -| openshift_storage_glusterfs_heketi_admin_key | auto-generated | String to use as secret key for performing heketi commands as admin -| openshift_storage_glusterfs_heketi_user_key | auto-generated | String to use as secret key for performing heketi commands as user that can only view or modify volumes -| openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi -| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the FQDN or IP address to the heketi service. -| openshift_storage_glusterfs_heketi_port | 8080 | TCP port for external heketi service **NOTE:** This has no effect in native mode -| openshift_storage_glusterfs_heketi_executor | 'kubernetes' | Selects how a native heketi service will manage GlusterFS nodes: 'kubernetes' for native nodes, 'ssh' for external nodes -| openshift_storage_glusterfs_heketi_ssh_port | 22 | SSH port for external GlusterFS nodes via native heketi -| openshift_storage_glusterfs_heketi_ssh_user | 'root' | SSH user for external GlusterFS nodes via native heketi -| openshift_storage_glusterfs_heketi_ssh_sudo | False | Whether to sudo (if non-root user) for SSH to external GlusterFS nodes via native heketi -| openshift_storage_glusterfs_heketi_ssh_keyfile | Undefined | Path to a private key file for use with SSH connections to external GlusterFS nodes via native heketi **NOTE:** This must be an absolute path +| Name | Default value | Description | +|--------------------------------------------------------|-------------------------|-----------------------------------------| +| openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready +| openshift_storage_glusterfs_namespace | 'glusterfs' | Namespace/project in which to create GlusterFS resources +| openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized +| openshift_storage_glusterfs_name | 'storage' | A name to identify the GlusterFS cluster, which will be used in resource names +| openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name +| openshift_storage_glusterfs_use_default_selector | False | Whether to use a default node selector for the GlusterFS namespace/project. If False, the namespace/project will have no restricting node selector. If True, uses pre-existing or default (e.g. osm_default_node_selector) node selectors. **NOTE:** If True, nodes which will host GlusterFS pods must already have the additional labels. +| openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster +| openshift_storage_glusterfs_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default +| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7' +| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods +| openshift_storage_glusterfs_block_deploy | True | Deploy glusterblock provisioner service +| openshift_storage_glusterfs_block_image | 'gluster/glusterblock-provisioner'| Container image to use for glusterblock-provisioner pod, enterprise default is 'rhgs3/rhgs-gluster-block-prov-rhel7' +| openshift_storage_glusterfs_block_version | 'latest' | Container image version to use for glusterblock-provisioner pod +| openshift_storage_glusterfs_block_host_vol_create | True | Automatically create GlusterFS volumes to host glusterblock volumes. **NOTE:** If this is False, block-hosting volumes will need to be manually created before glusterblock volumes can be provisioned +| openshift_storage_glusterfs_block_host_vol_size | 100 | Size, in GB, of GlusterFS volumes that will be automatically create to host glusterblock volumes if not enough space is available for a glusterblock volume create request. **NOTE:** This value is effectively an upper limit on the size of glusterblock volumes unless you manually create larger GlusterFS block-hosting volumes +| openshift_storage_glusterfs_block_host_vol_max | 15 | Max number of GlusterFS volumes to host glusterblock volumes +| openshift_storage_glusterfs_block_storageclass | False | Automatically create a StorageClass for each Gluster Block cluster +| openshift_storage_glusterfs_block_storageclass_default | False | Sets the StorageClass for each Gluster Block cluster as default +| openshift_storage_glusterfs_s3_deploy | True | Deploy gluster-s3 service +| openshift_storage_glusterfs_s3_image | 'gluster/gluster-object'| Container image to use for gluster-s3 pod, enterprise default is 'rhgs3/rhgs-gluster-s3-server-rhel7' +| openshift_storage_glusterfs_s3_version | 'latest' | Container image version to use for gluster=s3 pod +| openshift_storage_glusterfs_s3_account | Undefined | S3 account name for the S3 service, required for S3 service deployment +| openshift_storage_glusterfs_s3_user | Undefined | S3 user name for the S3 service, required for S3 service deployment +| openshift_storage_glusterfs_s3_password | Undefined | S3 user password for the S3 service, required for S3 service deployment +| openshift_storage_glusterfs_s3_pvc | Dynamic | Name of the GlusterFS-backed PVC which will be used for S3 object data storage, generated from the cluster name and S3 account by default +| openshift_storage_glusterfs_s3_pvc_size | "2Gi" | Size, in Gi, of the GlusterFS-backed PVC which will be used for S3 object data storage +| openshift_storage_glusterfs_s3_meta_pvc | Dynamic | Name of the GlusterFS-backed PVC which will be used for S3 object metadata storage, generated from the cluster name and S3 account by default +| openshift_storage_glusterfs_s3_meta_pvc_size | "1Gi" | Size, in Gi, of the GlusterFS-backed PVC which will be used for S3 object metadata storage +| openshift_storage_glusterfs_wipe | False | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.** +| openshift_storage_glusterfs_heketi_is_native | True | heketi should be containerized +| openshift_storage_glusterfs_heketi_cli | 'heketi-cli' | Command/Path to invoke the heketi-cli tool **NOTE:** Change this only for **non-native heketi** if heketi-cli is not in the global `$PATH` of the machine running openshift-ansible +| openshift_storage_glusterfs_heketi_image | 'heketi/heketi' | Container image to use for heketi pods, enterprise default is 'rhgs3/rhgs-volmanager-rhel7' +| openshift_storage_glusterfs_heketi_version | 'latest' | Container image version to use for heketi pods +| openshift_storage_glusterfs_heketi_admin_key | auto-generated | String to use as secret key for performing heketi commands as admin +| openshift_storage_glusterfs_heketi_user_key | auto-generated | String to use as secret key for performing heketi commands as user that can only view or modify volumes +| openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi +| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the FQDN or IP address to the heketi service. +| openshift_storage_glusterfs_heketi_port | 8080 | TCP port for external heketi service **NOTE:** This has no effect in native mode +| openshift_storage_glusterfs_heketi_executor | 'kubernetes' | Selects how a native heketi service will manage GlusterFS nodes: 'kubernetes' for native nodes, 'ssh' for external nodes +| openshift_storage_glusterfs_heketi_ssh_port | 22 | SSH port for external GlusterFS nodes via native heketi +| openshift_storage_glusterfs_heketi_ssh_user | 'root' | SSH user for external GlusterFS nodes via native heketi +| openshift_storage_glusterfs_heketi_ssh_sudo | False | Whether to sudo (if non-root user) for SSH to external GlusterFS nodes via native heketi +| openshift_storage_glusterfs_heketi_ssh_keyfile | Undefined | Path to a private key file for use with SSH connections to external GlusterFS nodes via native heketi **NOTE:** This must be an absolute path | openshift_storage_glusterfs_heketi_fstab | '/var/lib/heketi/fstab' | When heketi is native, sets the path to the fstab file on the GlusterFS nodes to update on LVM volume mounts, changes to '/etc/fstab/' when the heketi executor is 'ssh' **NOTE:** This should not need to be changed | openshift_storage_glusterfs_heketi_wipe | False | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe` @@ -126,14 +128,16 @@ registry. These variables start with the prefix values in their corresponding non-registry variables. The following variables are an exception: -| Name | Default value | Description | -|-----------------------------------------------------------|-----------------------|-----------------------------------------| -| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'glusterfs' -| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters -| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties -| openshift_storage_glusterfs_registry_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default -| openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above -| openshift_storage_glusterfs_registry_heketi_user_key | auto-generated | Separate from the above +| Name | Default value | Description | +|-----------------------------------------------------------------|-----------------------|-----------------------------------------| +| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'glusterfs' +| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters +| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties +| openshift_storage_glusterfs_registry_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default +| openshift_storage_glusterfs_registry_block_storageclass | False | It is recommended to not create a StorageClass for Gluster Block clusters serving registry storage, so as to avoid performance penalties +| openshift_storage_glusterfs_registry_block_storageclass_default | False | Sets the StorageClass for each Gluster Block cluster as default +| openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above +| openshift_storage_glusterfs_registry_heketi_user_key | auto-generated | Separate from the above Additionally, this role's behavior responds to several registry-specific variables in the [openshift_hosted role](../openshift_hosted/README.md): diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml index 4cbe262d2..7e751cc7a 100644 --- a/roles/openshift_storage_glusterfs/defaults/main.yml +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -14,6 +14,8 @@ openshift_storage_glusterfs_block_version: 'latest' openshift_storage_glusterfs_block_host_vol_create: True openshift_storage_glusterfs_block_host_vol_size: 100 openshift_storage_glusterfs_block_host_vol_max: 15 +openshift_storage_glusterfs_block_storageclass: False +openshift_storage_glusterfs_block_storageclass_default: False openshift_storage_glusterfs_s3_deploy: True openshift_storage_glusterfs_s3_image: "{{ 'rhgs3/rhgs-gluster-s3-server-rhel7' | quote if openshift_deployment_type == 'openshift-enterprise' else 'gluster/gluster-object' | quote }}" openshift_storage_glusterfs_s3_version: 'latest' @@ -61,6 +63,8 @@ openshift_storage_glusterfs_registry_block_version: "{{ openshift_storage_gluste openshift_storage_glusterfs_registry_block_host_vol_create: "{{ openshift_storage_glusterfs_block_host_vol_create }}" openshift_storage_glusterfs_registry_block_host_vol_size: "{{ openshift_storage_glusterfs_block_host_vol_size }}" openshift_storage_glusterfs_registry_block_host_vol_max: "{{ openshift_storage_glusterfs_block_host_vol_max }}" +openshift_storage_glusterfs_registry_block_storageclass: False +openshift_storage_glusterfs_registry_block_storageclass_default: False openshift_storage_glusterfs_registry_s3_deploy: "{{ openshift_storage_glusterfs_s3_deploy }}" openshift_storage_glusterfs_registry_s3_image: "{{ openshift_storage_glusterfs_s3_image }}" openshift_storage_glusterfs_registry_s3_version: "{{ openshift_storage_glusterfs_s3_version }}" @@ -103,3 +107,9 @@ r_openshift_storage_glusterfs_os_firewall_allow: port: "24008/tcp" - service: glusterfs_bricks port: "49152-49251/tcp" +- service: glusterblockd + port: "24010/tcp" +- service: iscsi-targets + port: "3260/tcp" +- service: rpcbind + port: "111/tcp" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml index 001578406..a5fdae803 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml @@ -315,5 +315,31 @@ - include_tasks: glusterblock_deploy.yml when: glusterfs_block_deploy +- block: + - name: Create heketi block secret + oc_secret: + namespace: "{{ glusterfs_namespace }}" + state: present + name: "heketi-{{ glusterfs_name }}-admin-secret-block" + type: "gluster.org/glusterblock" + force: True + contents: + - path: key + data: "{{ glusterfs_heketi_admin_key }}" + when: glusterfs_heketi_admin_key is defined + - name: Generate Gluster Block StorageClass file + template: + src: "{{ openshift.common.examples_content_version }}/gluster-block-storageclass.yml.j2" + dest: "{{ mktemp.stdout }}/gluster-block-storageclass.yml" + + - name: Create Gluster Block StorageClass + oc_obj: + state: present + kind: storageclass + name: "glusterfs-{{ glusterfs_name }}-block" + files: + - "{{ mktemp.stdout }}/gluster-block-storageclass.yml" + when: glusterfs_block_storageclass + - include_tasks: gluster_s3_deploy.yml when: glusterfs_s3_deploy diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml index a374df0ce..92de1b64d 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml @@ -17,6 +17,8 @@ glusterfs_block_host_vol_create: "{{ openshift_storage_glusterfs_block_host_vol_create }}" glusterfs_block_host_vol_size: "{{ openshift_storage_glusterfs_block_host_vol_size }}" glusterfs_block_host_vol_max: "{{ openshift_storage_glusterfs_block_host_vol_max }}" + glusterfs_block_storageclass: "{{ openshift_storage_glusterfs_block_storageclass | bool }}" + glusterfs_block_storageclass_default: "{{ openshift_storage_glusterfs_block_storageclass_default | bool }}" glusterfs_s3_deploy: "{{ openshift_storage_glusterfs_s3_deploy | bool }}" glusterfs_s3_image: "{{ openshift_storage_glusterfs_s3_image }}" glusterfs_s3_version: "{{ openshift_storage_glusterfs_s3_version }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml index 544a6f491..befacb04f 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml @@ -17,6 +17,8 @@ glusterfs_block_host_vol_create: "{{ openshift_storage_glusterfs_registry_block_host_vol_create }}" glusterfs_block_host_vol_size: "{{ openshift_storage_glusterfs_registry_block_host_vol_size }}" glusterfs_block_host_vol_max: "{{ openshift_storage_glusterfs_registry_block_host_vol_max }}" + glusterfs_block_storageclass: "{{ openshift_storage_glusterfs_registry_block_storageclass | bool }}" + glusterfs_block_storageclass_default: "{{ openshift_storage_glusterfs_registry_block_storageclass_default | bool }}" glusterfs_s3_deploy: "{{ openshift_storage_glusterfs_registry_s3_deploy | bool }}" glusterfs_s3_image: "{{ openshift_storage_glusterfs_registry_s3_image }}" glusterfs_s3_version: "{{ openshift_storage_glusterfs_registry_s3_version }}" diff --git a/roles/openshift_storage_glusterfs/templates/glusterfs.conf b/roles/openshift_storage_glusterfs/templates/glusterfs.conf index dd4d6e6f7..bcc02e217 100644 --- a/roles/openshift_storage_glusterfs/templates/glusterfs.conf +++ b/roles/openshift_storage_glusterfs/templates/glusterfs.conf @@ -1,4 +1,7 @@ #{{ ansible_managed }} dm_thin_pool dm_snapshot -dm_mirror
\ No newline at end of file +dm_mirror +#glusterblock +dm_multipath +target_core_user diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/gluster-block-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/gluster-block-storageclass.yml.j2 new file mode 100644 index 000000000..02ed8fa8d --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/gluster-block-storageclass.yml.j2 @@ -0,0 +1,19 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: glusterfs-{{ glusterfs_name }}-block +{% if glusterfs_block_storageclass_default is defined and glusterfs_block_storageclass_default %} + annotations: + storageclass.kubernetes.io/is-default-class: "true" +{% endif %} +provisioner: gluster.org/glusterblock +parameters: + resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" + restuser: "admin" + chapauthenabled: "true" + hacount: "3" +{% if glusterfs_heketi_admin_key is defined %} + restsecretnamespace: "{{ glusterfs_namespace }}" + restsecretname: "heketi-{{ glusterfs_name }}-admin-secret-block" +{%- endif -%} diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/gluster-block-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/gluster-block-storageclass.yml.j2 new file mode 100644 index 000000000..02ed8fa8d --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.8/gluster-block-storageclass.yml.j2 @@ -0,0 +1,19 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: glusterfs-{{ glusterfs_name }}-block +{% if glusterfs_block_storageclass_default is defined and glusterfs_block_storageclass_default %} + annotations: + storageclass.kubernetes.io/is-default-class: "true" +{% endif %} +provisioner: gluster.org/glusterblock +parameters: + resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" + restuser: "admin" + chapauthenabled: "true" + hacount: "3" +{% if glusterfs_heketi_admin_key is defined %} + restsecretnamespace: "{{ glusterfs_namespace }}" + restsecretname: "heketi-{{ glusterfs_name }}-admin-secret-block" +{%- endif -%} diff --git a/roles/openshift_storage_glusterfs/templates/v3.9/gluster-block-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.9/gluster-block-storageclass.yml.j2 new file mode 100644 index 000000000..02ed8fa8d --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.9/gluster-block-storageclass.yml.j2 @@ -0,0 +1,19 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: glusterfs-{{ glusterfs_name }}-block +{% if glusterfs_block_storageclass_default is defined and glusterfs_block_storageclass_default %} + annotations: + storageclass.kubernetes.io/is-default-class: "true" +{% endif %} +provisioner: gluster.org/glusterblock +parameters: + resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" + restuser: "admin" + chapauthenabled: "true" + hacount: "3" +{% if glusterfs_heketi_admin_key is defined %} + restsecretnamespace: "{{ glusterfs_namespace }}" + restsecretname: "heketi-{{ glusterfs_name }}-admin-secret-block" +{%- endif -%} |