diff options
Diffstat (limited to 'roles')
46 files changed, 711 insertions, 71 deletions
diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml index ed97d539c..7e206ded1 100644 --- a/roles/docker/defaults/main.yml +++ b/roles/docker/defaults/main.yml @@ -1 +1,6 @@ --- +docker_cli_auth_config_path: '/root/.docker' + +oreg_url: '' +oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}" +oreg_auth_credentials_replace: False diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml index bc52ab60c..145b552a6 100644 --- a/roles/docker/tasks/package_docker.yml +++ b/roles/docker/tasks/package_docker.yml @@ -117,6 +117,18 @@ notify: - restart docker +- name: Check for credentials file for registry auth + stat: + path: "{{ docker_cli_auth_config_path }}/config.json" + when: oreg_auth_user is defined + register: docker_cli_auth_credentials_stat + +- name: Create credentials for docker cli registry auth + command: "docker --config={{ docker_cli_auth_config_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" + when: + - oreg_auth_user is defined + - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool + - name: Start the Docker service systemd: name: docker diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml index 24ca0d9f8..0bab0899c 100644 --- a/roles/docker/tasks/systemcontainer_crio.yml +++ b/roles/docker/tasks/systemcontainer_crio.yml @@ -95,7 +95,7 @@ - name: Set to default prepend set_fact: l_crio_image_prepend: "docker.io/gscrivano" - l_crio_image_name: "crio-o-fedora" + l_crio_image_name: "cri-o-fedora" - name: Use Centos based image when distribution is CentOS set_fact: diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index ebfa6bb8f..517e0231d 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -1602,11 +1602,13 @@ def set_builddefaults_facts(facts): builddefaults['git_no_proxy'] = builddefaults['no_proxy'] # If we're actually defining a builddefaults config then create admission_plugin_config # then merge builddefaults[config] structure into admission_plugin_config + + # 'config' is the 'openshift_builddefaults_json' inventory variable if 'config' in builddefaults: if 'admission_plugin_config' not in facts['master']: - facts['master']['admission_plugin_config'] = dict() + # Scaffold out the full expected datastructure + facts['master']['admission_plugin_config'] = {'BuildDefaults': {'configuration': {'env': {}}}} facts['master']['admission_plugin_config'].update(builddefaults['config']) - # if the user didn't actually provide proxy values, delete the proxy env variable defaults. delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env']) return facts diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml index 08c1d849e..712a2a591 100644 --- a/roles/openshift_hosted/defaults/main.yml +++ b/roles/openshift_hosted/defaults/main.yml @@ -5,8 +5,8 @@ r_openshift_hosted_router_use_firewalld: "{{ os_firewall_use_firewalld | default r_openshift_hosted_registry_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" r_openshift_hosted_registry_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" -openshift_hosted_router_wait: "{{ not openshift_master_bootstrap_enabled | default(True) }}" -openshift_hosted_registry_wait: "{{ not openshift_master_bootstrap_enabled | default(True) }}" +openshift_hosted_router_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}" +openshift_hosted_registry_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}" registry_volume_claim: 'registry-claim' diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml index d73c290ff..48f53aef8 100644 --- a/roles/openshift_hosted/tasks/registry/registry.yml +++ b/roles/openshift_hosted/tasks/registry/registry.yml @@ -137,7 +137,7 @@ edits: "{{ openshift_hosted_registry_edits }}" force: "{{ True|bool in openshift_hosted_registry_force }}" -- when: openshift_hosted_registry_wait +- when: openshift_hosted_registry_wait | bool block: - name: Ensure OpenShift registry correctly rolls out (best-effort today) command: | diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml index 68ec7233e..2a42b5a7c 100644 --- a/roles/openshift_hosted/tasks/router/router.yml +++ b/roles/openshift_hosted/tasks/router/router.yml @@ -94,7 +94,7 @@ stats_port: "{{ item.stats_port }}" with_items: "{{ openshift_hosted_routers }}" -- when: openshift_hosted_router_wait +- when: openshift_hosted_router_wait | bool block: - name: Ensure OpenShift router correctly rolls out (best-effort today) command: | diff --git a/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml index 11478263c..72754df2e 100644 --- a/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_PREFIX}registry-console + name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml index 80cc4233b..6811ece28 100644 --- a/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_NAME} + name: ${IMAGE_NAME}:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml index 0e3d006a7..298f8039e 100644 --- a/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_PREFIX}registry-console + name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml index 80cc4233b..6811ece28 100644 --- a/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_NAME} + name: ${IMAGE_NAME}:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml index 28feac4e6..dace26793 100644 --- a/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_PREFIX}registry-console + name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml index 80cc4233b..6811ece28 100644 --- a/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_NAME} + name: ${IMAGE_NAME}:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml index 8bf98ba41..f821efd6b 100644 --- a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_PREFIX}registry-console + name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml index 80cc4233b..6811ece28 100644 --- a/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_NAME} + name: ${IMAGE_NAME}:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml index bbaf76c17..019d836fe 100644 --- a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_PREFIX}registry-console + name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml index 80cc4233b..6811ece28 100644 --- a/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml @@ -89,7 +89,7 @@ objects: - annotations: null from: kind: DockerImage - name: ${IMAGE_NAME} + name: ${IMAGE_NAME}:${IMAGE_VERSION} name: ${IMAGE_VERSION} - kind: OAuthClient apiVersion: v1 diff --git a/roles/openshift_manageiq/vars/main.yml b/roles/openshift_manageiq/vars/main.yml index 7ccc2fc3b..f142f89f0 100644 --- a/roles/openshift_manageiq/vars/main.yml +++ b/roles/openshift_manageiq/vars/main.yml @@ -3,6 +3,9 @@ manage_iq_tasks: - resource_kind: role resource_name: admin user: management-admin +- resource_kind: role + resource_name: admin + user: system:serviceaccount:management-infra:management-admin - resource_kind: cluster-role resource_name: management-infra-admin user: system:serviceaccount:management-infra:management-admin diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index 71bb09a76..4c8d6fdad 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -24,7 +24,7 @@ oreg_url: '' oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}" oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker" oreg_auth_credentials_replace: False - +l_bind_docker_reg_auth: False # NOTE # r_openshift_master_*_default may be defined external to this role. diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 121261e94..894fe8e2b 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -229,21 +229,7 @@ - restart master controllers when: openshift_master_bootstrap_enabled | default(False) -- name: Check for credentials file for registry auth - stat: - path: "{{oreg_auth_credentials_path }}" - when: - - oreg_auth_user is defined - register: master_oreg_auth_credentials_stat - -- name: Create credentials for registry auth - command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" - when: - - oreg_auth_user is defined - - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool - notify: - - restart master api - - restart master controllers +- include: registry_auth.yml - include: set_loopback_context.yml when: diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml new file mode 100644 index 000000000..96b6c614e --- /dev/null +++ b/roles/openshift_master/tasks/registry_auth.yml @@ -0,0 +1,27 @@ +--- +- name: Check for credentials file for registry auth + stat: + path: "{{ oreg_auth_credentials_path }}" + when: oreg_auth_user is defined + register: master_oreg_auth_credentials_stat + +# Container images may need the registry credentials +- name: Setup ro mount of /root/.docker for containerized hosts + set_fact: + l_bind_docker_reg_auth: True + when: + - openshift.common.is_containerized | bool + - oreg_auth_user is defined + - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool + notify: + - restart master api + - restart master controllers + +- name: Create credentials for registry auth + command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" + when: + - oreg_auth_user is defined + - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool + notify: + - restart master api + - restart master controllers diff --git a/roles/openshift_master/tasks/update_etcd_client_urls.yml b/roles/openshift_master/tasks/update_etcd_client_urls.yml new file mode 100644 index 000000000..1ab105808 --- /dev/null +++ b/roles/openshift_master/tasks/update_etcd_client_urls.yml @@ -0,0 +1,8 @@ +--- +- yedit: + src: "{{ openshift.common.config_base }}/master/master-config.yaml" + key: 'etcdClientInfo.urls' + value: "{{ openshift.master.etcd_urls }}" + notify: + - restart master api + - restart master controllers diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 index f06448d71..a184a59f6 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 @@ -12,7 +12,17 @@ Requires={{ openshift.docker.service_name }}.service EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api Environment=GOTRACEBACK=crash ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-api -ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS +ExecStart=/usr/bin/docker run --rm --privileged --net=host \ + --name {{ openshift.common.service_type }}-master-api \ + --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api \ + -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} \ + -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock \ + -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \ + {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \ + -v /etc/pki:/etc/pki:ro \ + {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\ + {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api \ + --config=${CONFIG_FILE} $OPTIONS ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-api LimitNOFILE=131072 diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 index b7f36491b..2ded05f53 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 @@ -11,7 +11,17 @@ PartOf={{ openshift.docker.service_name }}.service EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers Environment=GOTRACEBACK=crash ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-controllers -ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-controllers --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers --config=${CONFIG_FILE} $OPTIONS +ExecStart=/usr/bin/docker run --rm --privileged --net=host \ + --name {{ openshift.common.service_type }}-master-controllers \ + --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers \ + -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \ + {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \ + -v /etc/pki:/etc/pki:ro \ + {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\ + {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers \ + --config=${CONFIG_FILE} $OPTIONS ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-controllers LimitNOFILE=131072 diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index f1e64f3aa..5424a64d2 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -84,7 +84,7 @@ oreg_url: '' oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}" oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker" oreg_auth_credentials_replace: False - +l_bind_docker_reg_auth: False # NOTE # r_openshift_node_*_default may be defined external to this role. diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml index 7af3f54b5..2759188f3 100644 --- a/roles/openshift_node/tasks/config.yml +++ b/roles/openshift_node/tasks/config.yml @@ -2,18 +2,6 @@ - name: Install the systemd units include: systemd_units.yml -- name: Check for tuned package - command: rpm -q tuned - args: - warn: no - register: tuned_installed - changed_when: false - failed_when: false - -- name: Set atomic-guest tuned profile - command: "tuned-adm profile atomic-guest" - when: tuned_installed.rc == 0 and openshift.common.is_atomic | bool - - name: Start and enable openvswitch service systemd: name: openvswitch.service @@ -107,5 +95,9 @@ msg: Node failed to start please inspect the logs and try again when: node_start_result | failed +- name: Setup tuned + include: tuned.yml + static: yes + - set_fact: node_service_status_changed: "{{ node_start_result | changed }}" diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml index 02b8ee67c..265bf2c46 100644 --- a/roles/openshift_node/tasks/install.yml +++ b/roles/openshift_node/tasks/install.yml @@ -1,11 +1,9 @@ --- -# We have to add tuned-profiles in the same transaction otherwise we run into depsolving -# problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging. - when: not openshift.common.is_containerized | bool block: - name: Install Node package package: - name: "{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" + name: "{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" state: present - name: Install sdn-ovs package diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 22ff6dfd2..ff8d1942c 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -76,20 +76,7 @@ include: config.yml when: not openshift_node_bootstrap -- name: Check for credentials file for registry auth - stat: - path: "{{oreg_auth_credentials_path }}" - when: - - oreg_auth_user is defined - register: node_oreg_auth_credentials_stat - -- name: Create credentials for registry auth - command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" - when: - - oreg_auth_user is defined - - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool - notify: - - restart node +- include: registry_auth.yml - name: Configure AWS Cloud Provider Settings lineinfile: diff --git a/roles/openshift_node/tasks/registry_auth.yml b/roles/openshift_node/tasks/registry_auth.yml new file mode 100644 index 000000000..f370bb260 --- /dev/null +++ b/roles/openshift_node/tasks/registry_auth.yml @@ -0,0 +1,25 @@ +--- +- name: Check for credentials file for registry auth + stat: + path: "{{ oreg_auth_credentials_path }}" + when: oreg_auth_user is defined + register: node_oreg_auth_credentials_stat + +# Container images may need the registry credentials +- name: Setup ro mount of /root/.docker for containerized hosts + set_fact: + l_bind_docker_reg_auth: True + when: + - openshift.common.is_containerized | bool + - oreg_auth_user is defined + - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool + notify: + - restart node + +- name: Create credentials for registry auth + command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" + when: + - oreg_auth_user is defined + - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool + notify: + - restart node diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service index 57094f28e..4ab10b95f 100644 --- a/roles/openshift_node/templates/openshift.docker.node.service +++ b/roles/openshift_node/templates/openshift.docker.node.service @@ -21,7 +21,22 @@ EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/ ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1 -ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION} +ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \ + --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node \ + -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} \ + -e HOST=/rootfs -e HOST_ETC=/host-etc \ + -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} \ + -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node \ + {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \ + -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro \ + -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw \ + -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker \ + -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch \ + -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni \ + -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log \ + -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro \ + {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\ + {{ openshift.node.node_image }}:${IMAGE_VERSION} ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf diff --git a/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js b/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js index 1f25cc39f..16a307c06 100644 --- a/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js +++ b/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js @@ -1,2 +1 @@ window.OPENSHIFT_CONSTANTS.ENABLE_TECH_PREVIEW_FEATURE.service_catalog_landing_page = true; -window.OPENSHIFT_CONSTANTS.ENABLE_TECH_PREVIEW_FEATURE.pod_presets = true; diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md index a059745a6..d0bc0e028 100644 --- a/roles/openshift_storage_glusterfs/README.md +++ b/roles/openshift_storage_glusterfs/README.md @@ -76,10 +76,11 @@ GlusterFS cluster into a new or existing OpenShift cluster: | Name | Default value | Description | |--------------------------------------------------|-------------------------|-----------------------------------------| | openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready -| openshift_storage_glusterfs_namespace | 'glusterfs' | Namespace in which to create GlusterFS resources +| openshift_storage_glusterfs_namespace | 'glusterfs' | Namespace/project in which to create GlusterFS resources | openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized | openshift_storage_glusterfs_name | 'storage' | A name to identify the GlusterFS cluster, which will be used in resource names | openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name +| openshift_storage_glusterfs_use_default_selector | False | Whether to use a default node selector for the GlusterFS namespace/project. If False, the namespace/project will have no restricting node selector. If True, uses pre-existing or default (e.g. osm_default_node_selector) node selectors. **NOTE:** If True, nodes which will host GlusterFS pods must already have the additional labels. | openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster | openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7' | openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods @@ -91,7 +92,7 @@ GlusterFS cluster into a new or existing OpenShift cluster: | openshift_storage_glusterfs_heketi_admin_key | auto-generated | String to use as secret key for performing heketi commands as admin | openshift_storage_glusterfs_heketi_user_key | auto-generated | String to use as secret key for performing heketi commands as user that can only view or modify volumes | openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi -| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the full URL to the heketi service. +| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the FQDN or IP address to the heketi service. | openshift_storage_glusterfs_heketi_port | 8080 | TCP port for external heketi service **NOTE:** This has no effect in native mode | openshift_storage_glusterfs_heketi_executor | 'kubernetes' | Selects how a native heketi service will manage GlusterFS nodes: 'kubernetes' for native nodes, 'ssh' for external nodes | openshift_storage_glusterfs_heketi_ssh_port | 22 | SSH port for external GlusterFS nodes via native heketi diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml index 0b3d3aef1..148549887 100644 --- a/roles/openshift_storage_glusterfs/defaults/main.yml +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -3,6 +3,7 @@ openshift_storage_glusterfs_timeout: 300 openshift_storage_glusterfs_is_native: True openshift_storage_glusterfs_name: 'storage' openshift_storage_glusterfs_nodeselector: "glusterfs={{ openshift_storage_glusterfs_name }}-host" +openshift_storage_glusterfs_use_default_selector: False openshift_storage_glusterfs_storageclass: True openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}" openshift_storage_glusterfs_version: 'latest' @@ -31,6 +32,7 @@ openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.na openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}" openshift_storage_glusterfs_registry_name: 'registry' openshift_storage_glusterfs_registry_nodeselector: "glusterfs={{ openshift_storage_glusterfs_registry_name }}-host" +openshift_storage_glusterfs_registry_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}" openshift_storage_glusterfs_registry_storageclass: False openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}" openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}" @@ -58,9 +60,9 @@ r_openshift_storage_glusterfs_os_firewall_deny: [] r_openshift_storage_glusterfs_os_firewall_allow: - service: glusterfs_sshd port: "2222/tcp" -- service: glusterfs_daemon - port: "24007/tcp" - service: glusterfs_management + port: "24007/tcp" +- service: glusterfs_rdma port: "24008/tcp" - service: glusterfs_bricks port: "49152-49251/tcp" diff --git a/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml new file mode 100644 index 000000000..9ebb0d5ec --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml @@ -0,0 +1,143 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: deploy-heketi + labels: + glusterfs: heketi-template + deploy-heketi: support + annotations: + description: Bootstrap Heketi installation + tags: glusterfs,heketi,installation +objects: +- kind: Service + apiVersion: v1 + metadata: + name: deploy-heketi-${CLUSTER_NAME} + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-service + deploy-heketi: support + annotations: + description: Exposes Heketi service + spec: + ports: + - name: deploy-heketi-${CLUSTER_NAME} + port: 8080 + targetPort: 8080 + selector: + glusterfs: deploy-heketi-${CLUSTER_NAME}-pod +- kind: Route + apiVersion: v1 + metadata: + name: ${HEKETI_ROUTE} + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-route + deploy-heketi: support + spec: + to: + kind: Service + name: deploy-heketi-${CLUSTER_NAME} +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: deploy-heketi-${CLUSTER_NAME} + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-dc + deploy-heketi: support + annotations: + description: Defines how to deploy Heketi + spec: + replicas: 1 + selector: + glusterfs: deploy-heketi-${CLUSTER_NAME}-pod + triggers: + - type: ConfigChange + strategy: + type: Recreate + template: + metadata: + name: deploy-heketi + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-pod + deploy-heketi: support + spec: + serviceAccountName: heketi-${CLUSTER_NAME}-service-account + containers: + - name: heketi + image: ${IMAGE_NAME}:${IMAGE_VERSION} + env: + - name: HEKETI_USER_KEY + value: ${HEKETI_USER_KEY} + - name: HEKETI_ADMIN_KEY + value: ${HEKETI_ADMIN_KEY} + - name: HEKETI_EXECUTOR + value: ${HEKETI_EXECUTOR} + - name: HEKETI_FSTAB + value: /var/lib/heketi/fstab + - name: HEKETI_SNAPSHOT_LIMIT + value: '14' + - name: HEKETI_KUBE_GLUSTER_DAEMONSET + value: '1' + - name: HEKETI_KUBE_NAMESPACE + value: ${HEKETI_KUBE_NAMESPACE} + ports: + - containerPort: 8080 + volumeMounts: + - name: db + mountPath: /var/lib/heketi + - name: topology + mountPath: ${TOPOLOGY_PATH} + - name: config + mountPath: /etc/heketi + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 3 + httpGet: + path: /hello + port: 8080 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 30 + httpGet: + path: /hello + port: 8080 + volumes: + - name: db + - name: topology + secret: + secretName: heketi-${CLUSTER_NAME}-topology-secret + - name: config + secret: + secretName: heketi-${CLUSTER_NAME}-config-secret +parameters: +- name: HEKETI_USER_KEY + displayName: Heketi User Secret + description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY + displayName: Heketi Administrator Secret + description: Set secret for administration of the Heketi service as user _admin_ +- name: HEKETI_EXECUTOR + displayName: heketi executor type + description: Set the executor type, kubernetes or ssh + value: kubernetes +- name: HEKETI_KUBE_NAMESPACE + displayName: Namespace + description: Set the namespace where the GlusterFS pods reside + value: default +- name: HEKETI_ROUTE + displayName: heketi route name + description: Set the hostname for the route URL + value: "heketi-glusterfs" +- name: IMAGE_NAME + displayName: heketi container image name + required: True +- name: IMAGE_VERSION + displayName: heketi container image version + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify this heketi service, useful for running multiple heketi instances + value: glusterfs +- name: TOPOLOGY_PATH + displayName: heketi topology file location + required: True diff --git a/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml new file mode 100644 index 000000000..8c5e1ded3 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml @@ -0,0 +1,136 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: glusterfs + labels: + glusterfs: template + annotations: + description: GlusterFS DaemonSet template + tags: glusterfs +objects: +- kind: DaemonSet + apiVersion: extensions/v1beta1 + metadata: + name: glusterfs-${CLUSTER_NAME} + labels: + glusterfs: ${CLUSTER_NAME}-daemonset + annotations: + description: GlusterFS DaemonSet + tags: glusterfs + spec: + selector: + matchLabels: + glusterfs: ${CLUSTER_NAME}-pod + template: + metadata: + name: glusterfs-${CLUSTER_NAME} + labels: + glusterfs: ${CLUSTER_NAME}-pod + glusterfs-node: pod + spec: + nodeSelector: "${{NODE_LABELS}}" + hostNetwork: true + containers: + - name: glusterfs + image: ${IMAGE_NAME}:${IMAGE_VERSION} + imagePullPolicy: IfNotPresent + volumeMounts: + - name: glusterfs-heketi + mountPath: "/var/lib/heketi" + - name: glusterfs-run + mountPath: "/run" + - name: glusterfs-lvm + mountPath: "/run/lvm" + - name: glusterfs-etc + mountPath: "/etc/glusterfs" + - name: glusterfs-logs + mountPath: "/var/log/glusterfs" + - name: glusterfs-config + mountPath: "/var/lib/glusterd" + - name: glusterfs-dev + mountPath: "/dev" + - name: glusterfs-misc + mountPath: "/var/lib/misc/glusterfsd" + - name: glusterfs-cgroup + mountPath: "/sys/fs/cgroup" + readOnly: true + - name: glusterfs-ssl + mountPath: "/etc/ssl" + readOnly: true + securityContext: + capabilities: {} + privileged: true + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 40 + exec: + command: + - "/bin/bash" + - "-c" + - systemctl status glusterd.service + periodSeconds: 25 + successThreshold: 1 + failureThreshold: 15 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 40 + exec: + command: + - "/bin/bash" + - "-c" + - systemctl status glusterd.service + periodSeconds: 25 + successThreshold: 1 + failureThreshold: 15 + resources: {} + terminationMessagePath: "/dev/termination-log" + volumes: + - name: glusterfs-heketi + hostPath: + path: "/var/lib/heketi" + - name: glusterfs-run + emptyDir: {} + - name: glusterfs-lvm + hostPath: + path: "/run/lvm" + - name: glusterfs-etc + hostPath: + path: "/etc/glusterfs" + - name: glusterfs-logs + hostPath: + path: "/var/log/glusterfs" + - name: glusterfs-config + hostPath: + path: "/var/lib/glusterd" + - name: glusterfs-dev + hostPath: + path: "/dev" + - name: glusterfs-misc + hostPath: + path: "/var/lib/misc/glusterfsd" + - name: glusterfs-cgroup + hostPath: + path: "/sys/fs/cgroup" + - name: glusterfs-ssl + hostPath: + path: "/etc/ssl" + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} +parameters: +- name: NODE_LABELS + displayName: Daemonset Node Labels + description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\' + value: '{ "glusterfs": "storage-host" }' +- name: IMAGE_NAME + displayName: GlusterFS container image name + required: True +- name: IMAGE_VERSION + displayName: GlusterFS container image version + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances + value: storage diff --git a/roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml new file mode 100644 index 000000000..61b6a8c13 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml @@ -0,0 +1,134 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: heketi + labels: + glusterfs: heketi-template + annotations: + description: Heketi service deployment template + tags: glusterfs,heketi +objects: +- kind: Service + apiVersion: v1 + metadata: + name: heketi-${CLUSTER_NAME} + labels: + glusterfs: heketi-${CLUSTER_NAME}-service + annotations: + description: Exposes Heketi service + spec: + ports: + - name: heketi + port: 8080 + targetPort: 8080 + selector: + glusterfs: heketi-${CLUSTER_NAME}-pod +- kind: Route + apiVersion: v1 + metadata: + name: ${HEKETI_ROUTE} + labels: + glusterfs: heketi-${CLUSTER_NAME}-route + spec: + to: + kind: Service + name: heketi-${CLUSTER_NAME} +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: heketi-${CLUSTER_NAME} + labels: + glusterfs: heketi-${CLUSTER_NAME}-dc + annotations: + description: Defines how to deploy Heketi + spec: + replicas: 1 + selector: + glusterfs: heketi-${CLUSTER_NAME}-pod + triggers: + - type: ConfigChange + strategy: + type: Recreate + template: + metadata: + name: heketi-${CLUSTER_NAME} + labels: + glusterfs: heketi-${CLUSTER_NAME}-pod + spec: + serviceAccountName: heketi-${CLUSTER_NAME}-service-account + containers: + - name: heketi + image: ${IMAGE_NAME}:${IMAGE_VERSION} + imagePullPolicy: IfNotPresent + env: + - name: HEKETI_USER_KEY + value: ${HEKETI_USER_KEY} + - name: HEKETI_ADMIN_KEY + value: ${HEKETI_ADMIN_KEY} + - name: HEKETI_EXECUTOR + value: ${HEKETI_EXECUTOR} + - name: HEKETI_FSTAB + value: /var/lib/heketi/fstab + - name: HEKETI_SNAPSHOT_LIMIT + value: '14' + - name: HEKETI_KUBE_GLUSTER_DAEMONSET + value: '1' + - name: HEKETI_KUBE_NAMESPACE + value: ${HEKETI_KUBE_NAMESPACE} + ports: + - containerPort: 8080 + volumeMounts: + - name: db + mountPath: /var/lib/heketi + - name: config + mountPath: /etc/heketi + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 3 + httpGet: + path: /hello + port: 8080 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 30 + httpGet: + path: /hello + port: 8080 + volumes: + - name: db + glusterfs: + endpoints: heketi-db-${CLUSTER_NAME}-endpoints + path: heketidbstorage + - name: config + secret: + secretName: heketi-${CLUSTER_NAME}-config-secret +parameters: +- name: HEKETI_USER_KEY + displayName: Heketi User Secret + description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY + displayName: Heketi Administrator Secret + description: Set secret for administration of the Heketi service as user _admin_ +- name: HEKETI_EXECUTOR + displayName: heketi executor type + description: Set the executor type, kubernetes or ssh + value: kubernetes +- name: HEKETI_KUBE_NAMESPACE + displayName: Namespace + description: Set the namespace where the GlusterFS pods reside + value: default +- name: HEKETI_ROUTE + displayName: heketi route name + description: Set the hostname for the route URL + value: "heketi-glusterfs" +- name: IMAGE_NAME + displayName: heketi container image name + required: True +- name: IMAGE_VERSION + displayName: heketi container image version + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify this heketi service, useful for running multiple heketi instances + value: glusterfs diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml index a31c5bd5e..bc0dde17d 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml @@ -15,6 +15,7 @@ oc_project: state: present name: "{{ glusterfs_namespace }}" + node_selector: "{% if glusterfs_use_default_selector %}{{ omit }}{% endif %}" when: glusterfs_is_native or glusterfs_heketi_is_native or glusterfs_storageclass - name: Delete pre-existing heketi resources diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml index 7a2987883..012c722ff 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml @@ -5,6 +5,7 @@ glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native | bool }}" glusterfs_name: "{{ openshift_storage_glusterfs_name }}" glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | default(['storagenode', openshift_storage_glusterfs_name] | join('=')) | map_from_pairs }}" + glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}" glusterfs_storageclass: "{{ openshift_storage_glusterfs_storageclass }}" glusterfs_image: "{{ openshift_storage_glusterfs_image }}" glusterfs_version: "{{ openshift_storage_glusterfs_version }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml index 17f87578d..1bcab8e49 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml @@ -5,6 +5,7 @@ glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native | bool }}" glusterfs_name: "{{ openshift_storage_glusterfs_registry_name }}" glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | default(['storagenode', openshift_storage_glusterfs_registry_name] | join('=')) | map_from_pairs }}" + glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_registry_use_default_selector }}" glusterfs_storageclass: "{{ openshift_storage_glusterfs_registry_storageclass }}" glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}" glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}" diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-endpoints.yml.j2 new file mode 100644 index 000000000..11c9195bb --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-endpoints.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Endpoints +metadata: + name: glusterfs-{{ glusterfs_name }}-endpoints +subsets: +- addresses: +{% for node in glusterfs_nodes %} + - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }} +{% endfor %} + ports: + - port: 1 diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-service.yml.j2 new file mode 100644 index 000000000..3f869d2b7 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-service.yml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: glusterfs-{{ glusterfs_name }}-endpoints +spec: + ports: + - port: 1 +status: + loadBalancer: {} diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2 new file mode 100644 index 000000000..095fb780f --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: glusterfs-{{ glusterfs_name }} +provisioner: kubernetes.io/glusterfs +parameters: + resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" + restuser: "admin" +{% if glusterfs_heketi_admin_key is defined %} + secretNamespace: "{{ glusterfs_namespace }}" + secretName: "heketi-{{ glusterfs_name }}-admin-secret" +{%- endif -%} diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/heketi-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-endpoints.yml.j2 new file mode 100644 index 000000000..99cbdf748 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-endpoints.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Endpoints +metadata: + name: heketi-db-{{ glusterfs_name }}-endpoints +subsets: +- addresses: +{% for node in glusterfs_nodes %} + - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }} +{% endfor %} + ports: + - port: 1 diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/heketi-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-service.yml.j2 new file mode 100644 index 000000000..dcb896441 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-service.yml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: heketi-db-{{ glusterfs_name }}-endpoints +spec: + ports: + - port: 1 +status: + loadBalancer: {} diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2 new file mode 100644 index 000000000..579b11bb7 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2 @@ -0,0 +1,36 @@ +{ + "_port_comment": "Heketi Server Port Number", + "port" : "8080", + + "_use_auth": "Enable JWT authorization. Please enable for deployment", + "use_auth" : false, + + "_jwt" : "Private keys for access", + "jwt" : { + "_admin" : "Admin has access to all APIs", + "admin" : { + "key" : "My Secret" + }, + "_user" : "User only has access to /volumes endpoint", + "user" : { + "key" : "My Secret" + } + }, + + "_glusterfs_comment": "GlusterFS Configuration", + "glusterfs" : { + + "_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh", + "executor" : "{{ glusterfs_heketi_executor }}", + + "_db_comment": "Database file name", + "db" : "/var/lib/heketi/heketi.db", + + "sshexec" : { + "keyfile" : "/etc/heketi/private_key", + "port" : "{{ glusterfs_heketi_ssh_port }}", + "user" : "{{ glusterfs_heketi_ssh_user }}", + "sudo" : {{ glusterfs_heketi_ssh_sudo | lower }} + } + } +} diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/topology.json.j2 new file mode 100644 index 000000000..d6c28f6dd --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/topology.json.j2 @@ -0,0 +1,49 @@ +{ + "clusters": [ +{%- set clusters = {} -%} +{%- for node in glusterfs_nodes -%} + {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%} + {%- if cluster in clusters -%} + {%- set _dummy = clusters[cluster].append(node) -%} + {%- else -%} + {%- set _dummy = clusters.update({cluster: [ node, ]}) -%} + {%- endif -%} +{%- endfor -%} +{%- for cluster in clusters -%} + { + "nodes": [ +{%- for node in clusters[cluster] -%} + { + "node": { + "hostnames": { + "manage": [ +{%- if 'glusterfs_hostname' in hostvars[node] -%} + "{{ hostvars[node].glusterfs_hostname }}" +{%- elif 'openshift' in hostvars[node] -%} + "{{ hostvars[node].openshift.node.nodename }}" +{%- else -%} + "{{ node }}" +{%- endif -%} + ], + "storage": [ +{%- if 'glusterfs_ip' in hostvars[node] -%} + "{{ hostvars[node].glusterfs_ip }}" +{%- else -%} + "{{ hostvars[node].openshift.common.ip }}" +{%- endif -%} + ] + }, + "zone": {{ hostvars[node].glusterfs_zone | default(1) }} + }, + "devices": [ +{%- for device in hostvars[node].glusterfs_devices -%} + "{{ device }}"{% if not loop.last %},{% endif %} +{%- endfor -%} + ] + }{% if not loop.last %},{% endif %} +{%- endfor -%} + ] + }{% if not loop.last %},{% endif %} +{%- endfor -%} + ] +} |