diff options
29 files changed, 193 insertions, 135 deletions
diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml index 517023ba1..be2f8b5f4 100644 --- a/playbooks/common/openshift-cluster/initialize_facts.yml +++ b/playbooks/common/openshift-cluster/initialize_facts.yml @@ -93,7 +93,7 @@ state: present with_items: - iproute - - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'python-dbus' }}" + - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}" - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}" - yum-utils diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml index 39e82498d..a3446ef84 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml @@ -98,7 +98,11 @@ serial: 1 tasks: - include_role: - name: etcd_upgrade + name: etcd + tasks_from: upgrade_image + vars: + r_etcd_common_etcd_runtime: "host" + etcd_peer: "{{ openshift.common.hostname }}" when: - ansible_distribution == 'Fedora' - not openshift.common.is_containerized | bool diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml index 831ca8f57..e5e895775 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml @@ -5,13 +5,14 @@ - name: Upgrade containerized hosts to {{ etcd_upgrade_version }} hosts: oo_etcd_hosts_to_upgrade serial: 1 - roles: - - role: etcd_upgrade - r_etcd_upgrade_action: upgrade - r_etcd_upgrade_mechanism: image - r_etcd_upgrade_version: "{{ etcd_upgrade_version }}" - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - etcd_peer: "{{ openshift.common.hostname }}" + tasks: + - include_role: + name: etcd + tasks_from: upgrade_image + vars: + r_etcd_upgrade_version: "{{ etcd_upgrade_version }}" + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + etcd_peer: "{{ openshift.common.hostname }}" when: - etcd_container_version | default('99') | version_compare(etcd_upgrade_version,'<') - openshift.common.is_containerized | bool diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml index 2e79451e0..a2a26bad4 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml @@ -5,13 +5,14 @@ - name: Upgrade to {{ etcd_upgrade_version }} hosts: oo_etcd_hosts_to_upgrade serial: 1 - roles: - - role: etcd_upgrade - r_etcd_upgrade_action: upgrade - r_etcd_upgrade_mechanism: rpm - r_etcd_upgrade_version: "{{ etcd_upgrade_version }}" - r_etcd_common_etcd_runtime: "host" - etcd_peer: "{{ openshift.common.hostname }}" + tasks: + - include_role: + name: etcd + tasks_from: upgrade_rpm + vars: + r_etcd_upgrade_version: "{{ etcd_upgrade_version }}" + r_etcd_common_etcd_runtime: "host" + etcd_peer: "{{ openshift.common.hostname }}" when: - etcd_rpm_version.stdout | default('99') | version_compare(etcd_upgrade_version, '<') - ansible_distribution == 'RedHat' diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml index 81f3ee9e4..274fd8603 100644 --- a/roles/docker/defaults/main.yml +++ b/roles/docker/defaults/main.yml @@ -4,3 +4,17 @@ docker_cli_auth_config_path: '/root/.docker' # oreg_url is defined by user input. oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}" oreg_auth_credentials_replace: False + +openshift_docker_additional_registries: [] +openshift_docker_blocked_registries: [] +openshift_docker_insecure_registries: [] + +# The l2_docker_* variables convert csv strings to lists, if +# necessary. These variables should be used in place of their respective +# openshift_docker_* counterparts to ensure the properly formatted lists are +# utilized. +l2_docker_additional_registries: "{% if openshift_docker_additional_registries is string %}{% if openshift_docker_additional_registries == '' %}[]{% elif ',' in openshift_docker_additional_registries %}{{ openshift_docker_additional_registries.split(',') | list }}{% else %}{{ [ openshift_docker_additional_registries ] }}{% endif %}{% else %}{{ openshift_docker_additional_registries }}{% endif %}" +l2_docker_blocked_registries: "{% if openshift_docker_blocked_registries is string %}{% if openshift_docker_blocked_registries == '' %}[]{% elif ',' in openshift_docker_blocked_registries %}{{ openshift_docker_blocked_registries.split(',') | list }}{% else %}{{ [ openshift_docker_blocked_registries ] }}{% endif %}{% else %}{{ openshift_docker_blocked_registries }}{% endif %}" +l2_docker_insecure_registries: "{% if openshift_docker_insecure_registries is string %}{% if openshift_docker_insecure_registries == '' %}[]{% elif ',' in openshift_docker_insecure_registries %}{{ openshift_docker_insecure_registries.split(',') | list }}{% else %}{{ [ openshift_docker_insecure_registries ] }}{% endif %}{% else %}{{ openshift_docker_insecure_registries }}{% endif %}" + +containers_registries_conf_path: /etc/containers/registries.conf diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml index 16aea5067..0c5621259 100644 --- a/roles/docker/tasks/package_docker.yml +++ b/roles/docker/tasks/package_docker.yml @@ -53,22 +53,22 @@ - stat: path=/etc/sysconfig/docker register: docker_check -- name: Set registry params +- name: Comment old registry params in /etc/sysconfig/docker lineinfile: dest: /etc/sysconfig/docker regexp: '^{{ item.reg_conf_var }}=.*$' - line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'" - when: item.reg_fact_val != '' and docker_check.stat.isreg is defined and docker_check.stat.isreg + line: "#{{ item.reg_conf_var }}=''# Moved to {{ containers_registries_conf_path }}" with_items: - reg_conf_var: ADD_REGISTRY - reg_fact_val: "{{ docker_additional_registries | default(None, true)}}" - reg_flag: --add-registry - reg_conf_var: BLOCK_REGISTRY - reg_fact_val: "{{ docker_blocked_registries| default(None, true) }}" - reg_flag: --block-registry - reg_conf_var: INSECURE_REGISTRY - reg_fact_val: "{{ docker_insecure_registries| default(None, true) }}" - reg_flag: --insecure-registry + notify: + - restart docker + +- name: Place additional/blocked/insecure registies in /etc/containers/registries.conf + template: + dest: "{{ containers_registries_conf_path }}" + src: registries.conf notify: - restart docker diff --git a/roles/docker/templates/registries.conf b/roles/docker/templates/registries.conf new file mode 100644 index 000000000..c55dbd84f --- /dev/null +++ b/roles/docker/templates/registries.conf @@ -0,0 +1,46 @@ +# {{ ansible_managed }} +# This is a system-wide configuration file used to +# keep track of registries for various container backends. +# It adheres to YAML format and does not support recursive +# lists of registries. + +# The default location for this configuration file is /etc/containers/registries.conf. + +# The only valid categories are: 'registries', 'insecure_registies', +# and 'block_registries'. + + +#registries: +# - registry.access.redhat.com + +{% if l2_docker_additional_registries %} +registries: +{% for reg in l2_docker_additional_registries %} + - {{ reg }} +{% endfor %} +{% endif %} + +# If you need to access insecure registries, uncomment the section below +# and add the registries fully-qualified name. An insecure registry is one +# that does not have a valid SSL certificate or only does HTTP. +#insecure_registries: +# - + +{% if l2_docker_insecure_registries %} +insecure_registries: +{% for reg in l2_docker_insecure_registries %} + - {{ reg }} +{% endfor %} +{% endif %} + +# If you need to block pull access from a registry, uncomment the section below +# and add the registries fully-qualified name. +#block_registries: +# - + +{% if l2_docker_blocked_registries %} +block_registries: +{% for reg in l2_docker_blocked_registries %} + - {{ reg }} +{% endfor %} +{% endif %} diff --git a/roles/etcd_upgrade/tasks/upgrade_image.yml b/roles/etcd/tasks/upgrade/upgrade_image.yml index 136ec1142..cea95a1b3 100644 --- a/roles/etcd_upgrade/tasks/upgrade_image.yml +++ b/roles/etcd/tasks/upgrade/upgrade_image.yml @@ -29,8 +29,15 @@ ## TODO: probably should just move this into the backup playbooks, also this ## will fail on atomic host. We need to revisit how to do etcd backups there as ## the container may be newer than etcdctl on the host. Assumes etcd3 obsoletes etcd (7.3.1) +- name: Detecting Atomic Host Operating System + stat: + path: /run/ostree-booted + register: l_ostree_booted + - name: Upgrade etcd for etcdctl when not atomic - package: name=etcd state=latest + package: + name: etcd + state: latest when: not l_ostree_booted.stat.exists | bool - name: Verify cluster is healthy diff --git a/roles/etcd_upgrade/tasks/upgrade_rpm.yml b/roles/etcd/tasks/upgrade/upgrade_rpm.yml index 324b69605..324b69605 100644 --- a/roles/etcd_upgrade/tasks/upgrade_rpm.yml +++ b/roles/etcd/tasks/upgrade/upgrade_rpm.yml diff --git a/roles/etcd/tasks/upgrade_image.yml b/roles/etcd/tasks/upgrade_image.yml new file mode 100644 index 000000000..9e69027eb --- /dev/null +++ b/roles/etcd/tasks/upgrade_image.yml @@ -0,0 +1,2 @@ +--- +- include: upgrade/upgrade_image.yml diff --git a/roles/etcd/tasks/upgrade_rpm.yml b/roles/etcd/tasks/upgrade_rpm.yml new file mode 100644 index 000000000..29603d2b6 --- /dev/null +++ b/roles/etcd/tasks/upgrade_rpm.yml @@ -0,0 +1,2 @@ +--- +- include: upgrade/upgrade_rpm.yml diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml index b67411f40..6705e1ac5 100644 --- a/roles/etcd_common/defaults/main.yml +++ b/roles/etcd_common/defaults/main.yml @@ -73,3 +73,6 @@ etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_clien etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}" etcd_systemd_dir: "/etc/systemd/system/{{ etcd_service }}.service.d" + +# etcd_peer needs to be set by a role caller +etcdctlv2: "etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://{{ etcd_peer }}:{{ etcd_client_port }}" diff --git a/roles/etcd_upgrade/defaults/main.yml b/roles/etcd_upgrade/defaults/main.yml deleted file mode 100644 index 61bbba225..000000000 --- a/roles/etcd_upgrade/defaults/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -r_etcd_upgrade_action: upgrade -r_etcd_upgrade_mechanism: rpm diff --git a/roles/etcd_upgrade/meta/main.yml b/roles/etcd_upgrade/meta/main.yml deleted file mode 100644 index afdb0267f..000000000 --- a/roles/etcd_upgrade/meta/main.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -galaxy_info: - author: Jan Chaloupka - description: - company: Red Hat, Inc. - license: Apache License, Version 2.0 - min_ansible_version: 1.9 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud - - system -dependencies: -- role: etcd_common - r_etcd_common_embedded_etcd: "{{ r_etcd_upgrade_embedded_etcd }}" diff --git a/roles/etcd_upgrade/tasks/main.yml b/roles/etcd_upgrade/tasks/main.yml deleted file mode 100644 index 129c69d6b..000000000 --- a/roles/etcd_upgrade/tasks/main.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -# INPUT r_etcd_upgrade_action -- name: Fail if invalid etcd_upgrade_action provided - fail: - msg: "etcd_upgrade role can only be called with 'upgrade'" - when: - - r_etcd_upgrade_action not in ['upgrade'] - -- name: Detecting Atomic Host Operating System - stat: - path: /run/ostree-booted - register: l_ostree_booted - -- include: "{{ r_etcd_upgrade_action }}.yml" diff --git a/roles/etcd_upgrade/tasks/upgrade.yml b/roles/etcd_upgrade/tasks/upgrade.yml deleted file mode 100644 index 420c9638e..000000000 --- a/roles/etcd_upgrade/tasks/upgrade.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -# INPUT r_etcd_upgrade_version -# INPUT r_etcd_upgrade_mechanism -- name: Failt if r_etcd_upgrade_mechanism is not set during upgrade - fail: - msg: "r_etcd_upgrade_mechanism can be only set to 'rpm' or 'image'" - when: - - r_etcd_upgrade_mechanism not in ['rpm', 'image'] - -- name: "Upgrade {{ r_etcd_upgrade_mechanism }} based etcd" - include: upgrade_{{ r_etcd_upgrade_mechanism }}.yml diff --git a/roles/etcd_upgrade/vars/main.yml b/roles/etcd_upgrade/vars/main.yml deleted file mode 100644 index 5ed919d42..000000000 --- a/roles/etcd_upgrade/vars/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# EXPECTS etcd_peer -etcdctlv2: "etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://{{ etcd_peer }}:{{ etcd_client_port }}" diff --git a/roles/nuage_master/templates/nuage-master-config-daemonset.j2 b/roles/nuage_master/templates/nuage-master-config-daemonset.j2 index 612d689c2..7be5d6743 100755 --- a/roles/nuage_master/templates/nuage-master-config-daemonset.j2 +++ b/roles/nuage_master/templates/nuage-master-config-daemonset.j2 @@ -62,16 +62,14 @@ spec: selector: matchLabels: k8s-app: nuage-master-config + updateStrategy: + type: RollingUpdate template: metadata: labels: k8s-app: nuage-master-config spec: hostNetwork: true - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - operator: Exists nodeSelector: install-monitor: "true" containers: diff --git a/roles/nuage_master/templates/nuage-node-config-daemonset.j2 b/roles/nuage_master/templates/nuage-node-config-daemonset.j2 index 02e9a1563..6a1267d94 100755 --- a/roles/nuage_master/templates/nuage-node-config-daemonset.j2 +++ b/roles/nuage_master/templates/nuage-node-config-daemonset.j2 @@ -23,7 +23,7 @@ data: # IP address and port number of master API server masterApiServer: {{ api_server_url }} # REST server URL - nuageMonRestServer: {{ nuage_mon_rest_server_url }} + nuageMonRestServer: https://{{ openshift_master_cluster_hostname }}:{{ nuage_mon_rest_server_port }} # Bridge name for the docker bridge dockerBridgeName: docker0 # Certificate for connecting to the openshift monitor REST api @@ -32,11 +32,6 @@ data: nuageMonClientKey: {{ nuage_node_config_dsets_mount_dir }}/vsp-openshift/nuageMonClient.key # CA certificate for verifying the master's rest server nuageMonServerCA: {{ nuage_node_config_dsets_mount_dir }}/vsp-openshift/nuageMonCA.crt - # Nuage vport mtu size - interfaceMTU: {{ nuage_vport_mtu }} - # Logging level for the plugin - # allowed options are: "dbg", "info", "warn", "err", "emer", "off" - logLevel: 3 # This will generate the required Nuage CNI yaml configuration cni_yaml_config: | @@ -72,10 +67,6 @@ spec: k8s-app: nuage-cni-ds spec: hostNetwork: true - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - operator: Exists containers: # This container installs Nuage CNI binaries # and CNI network config file on each node. @@ -157,10 +148,6 @@ spec: k8s-app: nuage-vrs-ds spec: hostNetwork: true - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - operator: Exists containers: # This container installs Nuage VRS running as a # container on each worker node diff --git a/roles/nuage_node/vars/main.yaml b/roles/nuage_node/vars/main.yaml index d8bfca62a..fdf01b7c2 100644 --- a/roles/nuage_node/vars/main.yaml +++ b/roles/nuage_node/vars/main.yaml @@ -24,4 +24,4 @@ cni_bin_dir: "/opt/cni/bin/" nuage_plugin_crt_dir: /usr/share/vsp-openshift openshift_atomic_node_config_file: /etc/sysconfig/{{ openshift.common.service_type }}-node -nuage_atomic_docker_additional_mounts: "DOCKER_ADDTL_BIND_MOUNTS=-v /var/usr/share/vsp-openshift:/var/usr/share/vsp-openshift -v /etc/default:/etc/default -v /var/run:/var/run -v /opt/cni/bin:/opt/cni/bin -v /etc/cni/net.d:/etc/cni/net.d" +nuage_atomic_docker_additional_mounts: "NUAGE_ADDTL_BIND_MOUNTS=-v /var/usr/share/vsp-openshift:/var/usr/share/vsp-openshift -v /etc/default:/etc/default -v /var/run:/var/run -v /opt/cni/bin:/opt/cni/bin -v /etc/cni/net.d:/etc/cni/net.d" diff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py index c45f255af..f27eb629d 100644 --- a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py +++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py @@ -101,7 +101,7 @@ class LookupModule(LookupBase): {'name': 'MatchInterPodAffinity'} ]) - if short_version in ['3.5', '3.6', '3.7']: + if short_version in ['3.5', '3.6']: predicates.extend([ {'name': 'NoVolumeZoneConflict'}, {'name': 'MaxEBSVolumeCount'}, @@ -114,6 +114,21 @@ class LookupModule(LookupBase): {'name': 'CheckNodeDiskPressure'}, ]) + if short_version in ['3.7']: + predicates.extend([ + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'}, + {'name': 'MaxAzureDiskVolumeCount'}, + {'name': 'MatchInterPodAffinity'}, + {'name': 'NoDiskConflict'}, + {'name': 'GeneralPredicates'}, + {'name': 'PodToleratesNodeTaints'}, + {'name': 'CheckNodeMemoryPressure'}, + {'name': 'CheckNodeDiskPressure'}, + {'name': 'NoVolumeNodeConflict'}, + ]) + if regions_enabled: region_predicate = { 'name': 'Region', diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py index 4a28fb8f8..38a918803 100644 --- a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py +++ b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py @@ -57,6 +57,20 @@ DEFAULT_PREDICATES_1_5 = [ DEFAULT_PREDICATES_3_6 = DEFAULT_PREDICATES_1_5 +DEFAULT_PREDICATES_3_7 = [ + {'name': 'NoVolumeZoneConflict'}, + {'name': 'MaxEBSVolumeCount'}, + {'name': 'MaxGCEPDVolumeCount'}, + {'name': 'MaxAzureDiskVolumeCount'}, + {'name': 'MatchInterPodAffinity'}, + {'name': 'NoDiskConflict'}, + {'name': 'GeneralPredicates'}, + {'name': 'PodToleratesNodeTaints'}, + {'name': 'CheckNodeMemoryPressure'}, + {'name': 'CheckNodeDiskPressure'}, + {'name': 'NoVolumeNodeConflict'}, +] + REGION_PREDICATE = { 'name': 'Region', 'argument': { @@ -79,6 +93,8 @@ TEST_VARS = [ ('3.5', 'openshift-enterprise', DEFAULT_PREDICATES_1_5), ('3.6', 'origin', DEFAULT_PREDICATES_3_6), ('3.6', 'openshift-enterprise', DEFAULT_PREDICATES_3_6), + ('3.7', 'origin', DEFAULT_PREDICATES_3_7), + ('3.7', 'openshift-enterprise', DEFAULT_PREDICATES_3_7), ] diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service index 8734e7443..fa7238849 100644 --- a/roles/openshift_node/templates/openshift.docker.node.dep.service +++ b/roles/openshift_node/templates/openshift.docker.node.dep.service @@ -6,6 +6,6 @@ Before={{ openshift.common.service_type }}-node.service {% if openshift_use_crio|default(false) %}Wants=cri-o.service{% endif %} [Service] -ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi" +ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi" ExecStop= SyslogIdentifier={{ openshift.common.service_type }}-node-dep diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service index 4ab10b95f..310d8b29d 100644 --- a/roles/openshift_node/templates/openshift.docker.node.service +++ b/roles/openshift_node/templates/openshift.docker.node.service @@ -34,6 +34,7 @@ ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \ -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch \ -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni \ -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log \ + {% if openshift_use_nuage | default(false) -%} $NUAGE_ADDTL_BIND_MOUNTS {% endif -%} \ -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro \ {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\ {{ openshift.node.node_image }}:${IMAGE_VERSION} diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service index 4c47f8c0d..aae35719c 100644 --- a/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service +++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service @@ -6,6 +6,6 @@ Before={{ openshift.common.service_type }}-node.service [Service] -ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi" +ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi" ExecStop= SyslogIdentifier={{ openshift.common.service_type }}-node-dep diff --git a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml index 9ebb0d5ec..7b705c2d4 100644 --- a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml +++ b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml @@ -85,8 +85,6 @@ objects: volumeMounts: - name: db mountPath: /var/lib/heketi - - name: topology - mountPath: ${TOPOLOGY_PATH} - name: config mountPath: /etc/heketi readinessProbe: @@ -103,9 +101,6 @@ objects: port: 8080 volumes: - name: db - - name: topology - secret: - secretName: heketi-${CLUSTER_NAME}-topology-secret - name: config secret: secretName: heketi-${CLUSTER_NAME}-config-secret @@ -138,6 +133,3 @@ parameters: displayName: GlusterFS cluster name description: A unique name to identify this heketi service, useful for running multiple heketi instances value: glusterfs -- name: TOPOLOGY_PATH - displayName: heketi topology file location - required: True diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml index bc0dde17d..3f6dab78b 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml @@ -51,8 +51,8 @@ kind: pod state: list selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod" - register: heketi_pod - until: "heketi_pod.results.results[0]['items'] | count == 0" + register: deploy_heketi_pod + until: "deploy_heketi_pod.results.results[0]['items'] | count == 0" delay: 10 retries: "{{ (glusterfs_timeout | int / 10) | int }}" when: glusterfs_heketi_wipe @@ -103,7 +103,7 @@ state: list kind: pod selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod" - register: heketi_pod + register: deploy_heketi_pod when: glusterfs_heketi_is_native - name: Check if need to deploy deploy-heketi @@ -111,9 +111,9 @@ glusterfs_heketi_deploy_is_missing: False when: - "glusterfs_heketi_is_native" - - "heketi_pod.results.results[0]['items'] | count > 0" + - "deploy_heketi_pod.results.results[0]['items'] | count > 0" # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True - - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" + - "deploy_heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" - name: Check for existing heketi pod oc_obj: @@ -147,6 +147,21 @@ when: - glusterfs_heketi_is_native +- name: Get heketi admin secret + oc_secret: + state: list + namespace: "{{ glusterfs_namespace }}" + name: "heketi-{{ glusterfs_name }}-admin-secret" + decode: True + register: glusterfs_heketi_admin_secret + +- name: Set heketi admin key + set_fact: + glusterfs_heketi_admin_key: "{{ glusterfs_heketi_admin_secret.results.decoded.key }}" + when: + - glusterfs_is_native + - glusterfs_heketi_admin_secret.results.results[0] + - name: Generate heketi admin key set_fact: glusterfs_heketi_admin_key: "{{ 32 | oo_generate_secret }}" @@ -190,14 +205,37 @@ - glusterfs_heketi_deploy_is_missing - glusterfs_heketi_is_missing +- name: Wait for deploy-heketi pod + oc_obj: + namespace: "{{ glusterfs_namespace }}" + kind: pod + state: list + selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod" + register: deploy_heketi_pod + until: + - "deploy_heketi_pod.results.results[0]['items'] | count > 0" + # Pod's 'Ready' status must be True + - "deploy_heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" + delay: 10 + retries: "{{ (glusterfs_timeout | int / 10) | int }}" + when: + - glusterfs_heketi_is_native + - not glusterfs_heketi_deploy_is_missing + - glusterfs_heketi_is_missing + - name: Set heketi-cli command set_fact: - glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {% endif %}{{ glusterfs_heketi_cli }} -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin {% if glusterfs_heketi_admin_key is defined %}--secret '{{ glusterfs_heketi_admin_key }}'{% endif %}" + glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} {% endif %}{{ glusterfs_heketi_cli }} -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin {% if glusterfs_heketi_admin_key is defined %}--secret '{{ glusterfs_heketi_admin_key }}'{% endif %}" - name: Verify heketi service command: "{{ glusterfs_heketi_client }} cluster list" changed_when: False +- name: Place heketi topology on heketi Pod + shell: "{{ openshift.common.client_binary }} exec --namespace={{ glusterfs_namespace }} -i {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} -- bash -c 'mkdir -p {{ mktemp.stdout }} && cat > {{ mktemp.stdout }}/topology.json' < {{ mktemp.stdout }}/topology.json" + when: + - glusterfs_heketi_is_native + - name: Load heketi topology command: "{{ glusterfs_heketi_client }} topology load --json={{ mktemp.stdout }}/topology.json 2>&1" register: topology_load diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml index 3ba1eb2d2..73396c9af 100644 --- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml @@ -6,16 +6,6 @@ with_items: - "deploy-heketi-template.yml" -- name: Create heketi topology secret - oc_secret: - namespace: "{{ glusterfs_namespace }}" - state: present - name: "heketi-{{ glusterfs_name }}-topology-secret" - force: True - files: - - name: topology.json - path: "{{ mktemp.stdout }}/topology.json" - - name: Create deploy-heketi template oc_obj: namespace: "{{ glusterfs_namespace }}" @@ -39,18 +29,7 @@ HEKETI_EXECUTOR: "{{ glusterfs_heketi_executor }}" HEKETI_KUBE_NAMESPACE: "{{ glusterfs_namespace }}" CLUSTER_NAME: "{{ glusterfs_name }}" - TOPOLOGY_PATH: "{{ mktemp.stdout }}" -- name: Wait for deploy-heketi pod - oc_obj: - namespace: "{{ glusterfs_namespace }}" - kind: pod - state: list - selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod" - register: heketi_pod - until: - - "heketi_pod.results.results[0]['items'] | count > 0" - # Pod's 'Ready' status must be True - - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" - delay: 10 - retries: "{{ (glusterfs_timeout | int / 10) | int }}" +- name: Set heketi Deployed fact + set_fact: + glusterfs_heketi_deploy_is_missing: False diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml index afc04a537..54a6dd7c3 100644 --- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml @@ -4,7 +4,7 @@ register: setup_storage - name: Copy heketi-storage list - shell: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json" + shell: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json" # This is used in the subsequent task - name: Copy the admin client config |