diff options
Diffstat (limited to 'playbooks/common')
90 files changed, 1417 insertions, 534 deletions
diff --git a/playbooks/common/README.md b/playbooks/common/README.md index 0b5e26989..968bd99cb 100644 --- a/playbooks/common/README.md +++ b/playbooks/common/README.md @@ -1,9 +1,8 @@ # Common playbooks This directory has a generic set of playbooks that are included by playbooks in -[`byo`](../byo), as well as other playbooks related to the -[`bin/cluster`](../../bin) tool. +[`byo`](../byo). Note: playbooks in this directory use generic group names that do not line up -with the groups used by the `byo` playbooks or `bin/cluster` derived playbooks, -requiring an explicit remapping of groups. +with the groups used by the `byo` playbooks, requiring an explicit remapping of +groups. diff --git a/playbooks/common/openshift-cfme/config.yml b/playbooks/common/openshift-cfme/config.yml new file mode 100644 index 000000000..533a35d9e --- /dev/null +++ b/playbooks/common/openshift-cfme/config.yml @@ -0,0 +1,44 @@ +--- +# TODO: Make this work. The 'name' variable below is undefined +# presently because it's part of the cfme role. This play can't run +# until that's re-worked. +# +# - name: Pre-Pull manageiq-pods docker images +# hosts: nodes +# tasks: +# - name: Ensure the latest manageiq-pods docker image is pulling +# docker_image: +# name: "{{ openshift_cfme_container_image }}" +# # Fire-and-forget method, never timeout +# async: 99999999999 +# # F-a-f, never check on this. True 'background' task. +# poll: 0 + +- name: Configure Masters for CFME Bulk Image Imports + hosts: oo_masters_to_config + serial: 1 + tasks: + - name: Run master cfme tuning playbook + include_role: + name: openshift_cfme + tasks_from: tune_masters + +- name: Setup CFME + hosts: oo_first_master + vars: + r_openshift_cfme_miq_template_content: "{{ lookup('file', 'roles/openshift_cfme/files/miq-template.yaml') | from_yaml}}" + pre_tasks: + - name: Create a temporary place to evaluate the PV templates + command: mktemp -d /tmp/openshift-ansible-XXXXXXX + register: r_openshift_cfme_mktemp + changed_when: false + - name: Ensure the server template was read from disk + debug: + msg="{{ r_openshift_cfme_miq_template_content | from_yaml }}" + + tasks: + - name: Run the CFME Setup Role + include_role: + name: openshift_cfme + vars: + template_dir: "{{ hostvars[groups.masters.0].r_openshift_cfme_mktemp.stdout }}" diff --git a/playbooks/common/openshift-cfme/filter_plugins b/playbooks/common/openshift-cfme/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/common/openshift-cfme/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cfme/library b/playbooks/common/openshift-cfme/library new file mode 120000 index 000000000..ba40d2f56 --- /dev/null +++ b/playbooks/common/openshift-cfme/library @@ -0,0 +1 @@ +../../../library
\ No newline at end of file diff --git a/playbooks/common/openshift-cfme/roles b/playbooks/common/openshift-cfme/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/common/openshift-cfme/roles @@ -0,0 +1 @@ +../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cfme/uninstall.yml b/playbooks/common/openshift-cfme/uninstall.yml new file mode 100644 index 000000000..78b8e7668 --- /dev/null +++ b/playbooks/common/openshift-cfme/uninstall.yml @@ -0,0 +1,8 @@ +--- +- name: Uninstall CFME + hosts: masters + tasks: + - name: Run the CFME Uninstall Role Tasks + include_role: + name: openshift_cfme + tasks_from: uninstall diff --git a/playbooks/common/openshift-checks/adhoc.yml b/playbooks/common/openshift-checks/adhoc.yml new file mode 100644 index 000000000..dfcef8435 --- /dev/null +++ b/playbooks/common/openshift-checks/adhoc.yml @@ -0,0 +1,12 @@ +--- +- name: OpenShift health checks + hosts: oo_all_hosts + roles: + - openshift_health_checker + vars: + - r_openshift_health_checker_playbook_context: adhoc + post_tasks: + - name: Run health checks + action: openshift_health_check + args: + checks: '{{ openshift_checks | default([]) }}' diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/common/openshift-checks/health.yml index 1bee460e8..21ea785ef 100644 --- a/playbooks/common/openshift-checks/health.yml +++ b/playbooks/common/openshift-checks/health.yml @@ -1,11 +1,11 @@ --- - name: Run OpenShift health checks - hosts: OSEv3 + hosts: oo_all_hosts roles: - openshift_health_checker vars: - - r_openshift_health_checker_playbook_context: "health" + - r_openshift_health_checker_playbook_context: health post_tasks: - - action: openshift_health_check # https://github.com/ansible/ansible/issues/20513 + - action: openshift_health_check args: checks: ['@health'] diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/common/openshift-checks/pre-install.yml index e01c6f38d..88e6f9120 100644 --- a/playbooks/common/openshift-checks/pre-install.yml +++ b/playbooks/common/openshift-checks/pre-install.yml @@ -1,11 +1,11 @@ --- -- hosts: OSEv3 - name: run OpenShift pre-install checks +- name: run OpenShift pre-install checks + hosts: oo_all_hosts roles: - openshift_health_checker vars: - - r_openshift_health_checker_playbook_context: "pre-install" + - r_openshift_health_checker_playbook_context: pre-install post_tasks: - - action: openshift_health_check # https://github.com/ansible/ansible/issues/20513 + - action: openshift_health_check args: checks: ['@preflight'] diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 1482b3a3f..bbd5a0185 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -1,21 +1,37 @@ --- +# TODO: refactor this into its own include +# and pass a variable for ctx +- name: Verify Requirements + hosts: oo_all_hosts + roles: + - openshift_health_checker + vars: + - r_openshift_health_checker_playbook_context: install + post_tasks: + - action: openshift_health_check + args: + checks: + - disk_availability + - memory_availability + - package_availability + - package_version + - docker_image_availability + - docker_storage + - include: initialize_oo_option_facts.yml tags: - always -- name: Disable excluders +- name: Set hostname hosts: oo_masters_to_config:oo_nodes_to_config - tags: - - always - gather_facts: no - roles: - - role: openshift_excluder - r_openshift_excluder_action: disable - r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" + tasks: + # TODO: switch back to hostname module once we depend on ansible-2.4 + # https://github.com/ansible/ansible/pull/25906 + - name: Set hostname + command: "hostnamectl set-hostname {{ openshift.common.hostname }}" + when: openshift_set_hostname | default(false,true) | bool - include: ../openshift-etcd/config.yml - tags: - - etcd - include: ../openshift-nfs/config.yml tags: @@ -26,12 +42,8 @@ - loadbalancer - include: ../openshift-master/config.yml - tags: - - master -- include: additional_config.yml - tags: - - master +- include: ../openshift-master/additional_config.yml - include: ../openshift-node/config.yml tags: @@ -45,12 +57,8 @@ tags: - hosted -- name: Re-enable excluder if it was previously enabled - hosts: oo_masters_to_config:oo_nodes_to_config +- include: service_catalog.yml + when: + - openshift_enable_service_catalog | default(false) | bool tags: - - always - gather_facts: no - roles: - - role: openshift_excluder - r_openshift_excluder_action: enable - r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" + - servicecatalog diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml index 5425f448f..be14b06f0 100644 --- a/playbooks/common/openshift-cluster/enable_dnsmasq.yml +++ b/playbooks/common/openshift-cluster/enable_dnsmasq.yml @@ -27,9 +27,6 @@ role: "{{ item.role }}" local_facts: "{{ item.local_facts }}" with_items: - - role: common - local_facts: - use_dnsmasq: True - role: master local_facts: dns_port: '8053' @@ -37,7 +34,7 @@ dest: "{{ openshift.common.config_base }}/master/master-config.yaml" yaml_key: dnsConfig.bindAddress yaml_value: "{{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }}" - notify: restart master + notify: restart master api - meta: flush_handlers - name: Configure nodes for dnsmasq @@ -50,9 +47,6 @@ role: "{{ item.role }}" local_facts: "{{ item.local_facts }}" with_items: - - role: common - local_facts: - use_dnsmasq: True - role: node local_facts: dns_ip: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}" diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml index c28ce4c14..e55b2f964 100644 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -5,20 +5,20 @@ become: no gather_facts: no tasks: - - name: Evaluate groups - g_etcd_hosts required + - name: Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required fail: - msg: This playbook requires g_etcd_hosts to be set - when: g_etcd_hosts is not defined + msg: This playbook requires g_etcd_hosts or g_new_etcd_hosts to be set + when: g_etcd_hosts is not defined and g_new_etcd_hosts is not defined - name: Evaluate groups - g_master_hosts or g_new_master_hosts required fail: msg: This playbook requires g_master_hosts or g_new_master_hosts to be set - when: g_master_hosts is not defined or g_new_master_hosts is not defined + when: g_master_hosts is not defined and g_new_master_hosts is not defined - name: Evaluate groups - g_node_hosts or g_new_node_hosts required fail: msg: This playbook requires g_node_hosts or g_new_node_hosts to be set - when: g_node_hosts is not defined or g_new_node_hosts is not defined + when: g_node_hosts is not defined and g_new_node_hosts is not defined - name: Evaluate groups - g_lb_hosts required fail: @@ -33,13 +33,26 @@ - name: Evaluate groups - g_nfs_hosts is single host fail: msg: The nfs group must be limited to one host - when: (groups[g_nfs_hosts] | default([])) | length > 1 + when: g_nfs_hosts | default([]) | length > 1 - name: Evaluate groups - g_glusterfs_hosts required fail: msg: This playbook requires g_glusterfs_hosts to be set when: g_glusterfs_hosts is not defined + - name: Evaluate groups - Fail if no etcd hosts group is defined + fail: + msg: > + Running etcd as an embedded service is no longer supported. If this is a + new install please define an 'etcd' group with either one or three + hosts. These hosts may be the same hosts as your masters. If this is an + upgrade you may set openshift_master_unsupported_embedded_etcd=true + until a migration playbook becomes available. + when: + - g_etcd_hosts | default([]) | length not in [3,1] + - not openshift_master_unsupported_embedded_etcd | default(False) + - not openshift_node_bootstrap | default(False) + - name: Evaluate oo_all_hosts add_host: name: "{{ item }}" @@ -67,6 +80,15 @@ when: g_master_hosts|length > 0 changed_when: no + - name: Evaluate oo_new_etcd_to_config + add_host: + name: "{{ item }}" + groups: oo_new_etcd_to_config + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" + with_items: "{{ g_new_etcd_hosts | default([]) }}" + changed_when: no + - name: Evaluate oo_masters_to_config add_host: name: "{{ item }}" @@ -108,7 +130,7 @@ add_host: name: "{{ item }}" groups: oo_etcd_hosts_to_backup - with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}" + with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else (groups.oo_first_master | default([])) }}" changed_when: False - name: Evaluate oo_nodes_to_config @@ -157,3 +179,12 @@ ansible_become: "{{ g_sudo | default(omit) }}" with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts | default([])) }}" changed_when: no + + - name: Evaluate oo_etcd_to_migrate + add_host: + name: "{{ item }}" + groups: oo_etcd_to_migrate + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" + with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config | default([]) | length != 0 else (groups.oo_first_master |default([]))}}" + changed_when: no diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml index 9cebecd68..0723575c2 100644 --- a/playbooks/common/openshift-cluster/initialize_facts.yml +++ b/playbooks/common/openshift-cluster/initialize_facts.yml @@ -6,12 +6,151 @@ - name: Initialize host facts hosts: oo_all_hosts - roles: - - openshift_facts tasks: - - openshift_facts: + - name: load openshift_facts module + include_role: + name: openshift_facts + + # TODO: Should this role be refactored into health_checks?? + - name: Run openshift_sanitize_inventory to set variables + include_role: + name: openshift_sanitize_inventory + + - name: Detecting Operating System from ostree_booted + stat: + path: /run/ostree-booted + register: ostree_booted + + # Locally setup containerized facts for now + - name: initialize_facts set fact l_is_atomic + set_fact: + l_is_atomic: "{{ ostree_booted.stat.exists }}" + + - name: initialize_facts set fact for containerized and l_is_*_system_container + set_fact: + l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}" + l_is_openvswitch_system_container: "{{ (openshift_use_openvswitch_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" + l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" + l_is_master_system_container: "{{ (openshift_use_master_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" + l_is_etcd_system_container: "{{ (openshift_use_etcd_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" + + - name: initialize_facts set facts for l_any_system_container + set_fact: + l_any_system_container: "{{ l_is_etcd_system_container or l_is_openvswitch_system_container or l_is_node_system_container or l_is_master_system_container }}" + + - name: initialize_facts set fact for l_etcd_runtime + set_fact: + l_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if l_is_containerized else 'host' }}" + + # TODO: Should this be moved into health checks?? + # Seems as though any check that happens with a corresponding fail should move into health_checks + - name: Validate python version - ans_dist is fedora and python is v3 + fail: + msg: | + openshift-ansible requires Python 3 for {{ ansible_distribution }}; + For information on enabling Python 3 with Ansible, see https://docs.ansible.com/ansible/python_3_support.html + when: + - ansible_distribution == 'Fedora' + - ansible_python['version']['major'] != 3 + + # TODO: Should this be moved into health checks?? + # Seems as though any check that happens with a corresponding fail should move into health_checks + - name: Validate python version - ans_dist not Fedora and python must be v2 + fail: + msg: "openshift-ansible requires Python 2 for {{ ansible_distribution }}" + when: + - ansible_distribution != 'Fedora' + - ansible_python['version']['major'] != 2 + + # TODO: Should this be moved into health checks?? + # Seems as though any check that happens with a corresponding fail should move into health_checks + # Fail as early as possible if Atomic and old version of Docker + - when: + - l_is_atomic | bool + block: + + # See https://access.redhat.com/articles/2317361 + # and https://github.com/ansible/ansible/issues/15892 + # NOTE: the "'s can not be removed at this level else the docker command will fail + # NOTE: When ansible >2.2.1.x is used this can be updated per + # https://github.com/openshift/openshift-ansible/pull/3475#discussion_r103525121 + - name: Determine Atomic Host Docker Version + shell: 'CURLY="{"; docker version --format "$CURLY{json .Server.Version}}"' + register: l_atomic_docker_version + + - name: assert atomic host docker version is 1.12 or later + assert: + that: + - l_atomic_docker_version.stdout | replace('"', '') | version_compare('1.12','>=') + msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host. + + - when: + - not l_is_atomic | bool + block: + - name: Ensure openshift-ansible installer package deps are installed + package: + name: "{{ item }}" + state: present + with_items: + - iproute + - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'python-dbus' }}" + - PyYAML + - yum-utils + + - name: Ensure various deps for running system containers are installed + package: + name: "{{ item }}" + state: present + with_items: + - atomic + - ostree + - runc + when: + - l_any_system_container | bool + + - name: Default system_images_registry to a enterprise registry + set_fact: + system_images_registry: "registry.access.redhat.com" + when: + - system_images_registry is not defined + - openshift_deployment_type == "openshift-enterprise" + + - name: Default system_images_registry to community registry + set_fact: + system_images_registry: "docker.io" + when: + - system_images_registry is not defined + - openshift_deployment_type == "origin" + + - name: Gather Cluster facts and set is_containerized if needed + openshift_facts: role: common local_facts: + deployment_type: "{{ openshift_deployment_type }}" + deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}" + cli_image: "{{ osm_image | default(None) }}" hostname: "{{ openshift_hostname | default(None) }}" - - set_fact: - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" + ip: "{{ openshift_ip | default(None) }}" + is_containerized: "{{ l_is_containerized | default(None) }}" + is_openvswitch_system_container: "{{ l_is_openvswitch_system_container | default(false) }}" + is_node_system_container: "{{ l_is_node_system_container | default(false) }}" + is_master_system_container: "{{ l_is_master_system_container | default(false) }}" + is_etcd_system_container: "{{ l_is_etcd_system_container | default(false) }}" + etcd_runtime: "{{ l_etcd_runtime }}" + system_images_registry: "{{ system_images_registry }}" + public_hostname: "{{ openshift_public_hostname | default(None) }}" + public_ip: "{{ openshift_public_ip | default(None) }}" + portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}" + http_proxy: "{{ openshift_http_proxy | default(None) }}" + https_proxy: "{{ openshift_https_proxy | default(None) }}" + no_proxy: "{{ openshift_no_proxy | default(None) }}" + generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}" + no_proxy_internal_hostnames: "{{ openshift_no_proxy_internal_hostnames | default(None) }}" + + - name: initialize_facts set_fact repoquery command + set_fact: + repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}" + + - name: initialize_facts set_fact on openshift_docker_hosted_registry_network + set_fact: + openshift_docker_hosted_registry_network: "{{ '' if 'oo_first_master' not in groups else hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" diff --git a/playbooks/common/openshift-cluster/initialize_openshift_repos.yml b/playbooks/common/openshift-cluster/initialize_openshift_repos.yml new file mode 100644 index 000000000..a7114fc80 --- /dev/null +++ b/playbooks/common/openshift-cluster/initialize_openshift_repos.yml @@ -0,0 +1,8 @@ +--- +- name: Setup yum repositories for all hosts + hosts: oo_all_hosts + gather_facts: no + tasks: + - name: initialize openshift repos + include_role: + name: openshift_repos diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml index f4e52869e..7af6b25bc 100644 --- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml +++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml @@ -1,24 +1,5 @@ --- # NOTE: requires openshift_facts be run -- name: Verify compatible yum/subscription-manager combination - hosts: oo_all_hosts - gather_facts: no - tasks: - # See: - # https://bugzilla.redhat.com/show_bug.cgi?id=1395047 - # https://bugzilla.redhat.com/show_bug.cgi?id=1282961 - # https://github.com/openshift/openshift-ansible/issues/1138 - # Consider the repoquery module for this work - - name: Check for bad combinations of yum and subscription-manager - command: > - {{ repoquery_cmd }} --installed --qf '%{version}' "yum" - register: yum_ver_test - changed_when: false - when: not openshift.common.is_atomic | bool - - fail: - msg: Incompatible versions of yum and subscription-manager found. You may need to update yum and yum-utils. - when: not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout - - name: Determine openshift_version to configure on first master hosts: oo_first_master roles: @@ -31,5 +12,10 @@ hosts: oo_all_hosts:!oo_first_master vars: openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}" + pre_tasks: + - set_fact: + openshift_pkg_version: -{{ openshift_version }} + when: openshift_pkg_version is not defined + - debug: msg="openshift_pkg_version set to {{ openshift_pkg_version }}" roles: - openshift_version diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml index 8d94b6509..75339f6df 100644 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -26,6 +26,8 @@ logging_elasticsearch_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}" logging_elasticsearch_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}" roles: + - role: openshift_default_storage_class + when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce') - role: openshift_hosted - role: openshift_metrics when: openshift_hosted_metrics_deploy | default(false) | bool @@ -45,8 +47,9 @@ - role: cockpit-ui when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool) - - role: openshift_default_storage_class - when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce') + + - role: openshift_prometheus + when: openshift_hosted_prometheus_deploy | default(false) | bool - name: Update master-config for publicLoggingURL hosts: oo_masters_to_config:!oo_first_master diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml index 57580406c..c1a5d83cd 100644 --- a/playbooks/common/openshift-cluster/openshift_logging.yml +++ b/playbooks/common/openshift-cluster/openshift_logging.yml @@ -1,6 +1,4 @@ --- -- include: evaluate_groups.yml - - name: OpenShift Aggregated Logging hosts: oo_first_master roles: diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml index bcff4a1a1..1dc180c26 100644 --- a/playbooks/common/openshift-cluster/openshift_metrics.yml +++ b/playbooks/common/openshift-cluster/openshift_metrics.yml @@ -1,7 +1,14 @@ --- -- include: evaluate_groups.yml - - name: OpenShift Metrics hosts: oo_first_master roles: - openshift_metrics + +- name: OpenShift Metrics + hosts: oo_masters:!oo_first_master + serial: 1 + tasks: + - name: Setup the non-first masters configs + include_role: + name: openshift_metrics + tasks_from: update_master_config.yaml diff --git a/playbooks/common/openshift-cluster/openshift_prometheus.yml b/playbooks/common/openshift-cluster/openshift_prometheus.yml new file mode 100644 index 000000000..a979c0c00 --- /dev/null +++ b/playbooks/common/openshift-cluster/openshift_prometheus.yml @@ -0,0 +1,9 @@ +--- +- include: std_include.yml + +- name: OpenShift Prometheus + hosts: oo_first_master + roles: + - openshift_prometheus + vars: + openshift_prometheus_state: present diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml b/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml index c30889d64..51b196299 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml @@ -51,3 +51,13 @@ | oo_collect('openshift.common.hostname') | default(none, true) }}" openshift_certificates_redeploy: true + - role: lib_utils + post_tasks: + - yedit: + src: "{{ openshift.common.config_base }}/master/master-config.yaml" + key: servingInfo.namedCertificates + value: "{{ openshift.master.named_certificates | default([]) | oo_named_certificates_list }}" + when: + - ('named_certificates' in openshift.master) + - openshift.master.named_certificates | default([]) | length > 0 + - openshift_master_overwrite_named_certificates | default(false) | bool diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml index 8c8062585..afd5463b2 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml @@ -66,6 +66,7 @@ --signer-cert={{ openshift.common.config_base }}/master/ca.crt --signer-key={{ openshift.common.config_base }}/master/ca.key --signer-serial={{ openshift.common.config_base }}/master/ca.serial.txt + --config={{ mktemp.stdout }}/admin.kubeconfig --hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc,docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}" --cert={{ openshift.common.config_base }}/master/registry.crt --key={{ openshift.common.config_base }}/master/registry.key diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml index 9f14f2d69..748bbbf91 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml @@ -116,8 +116,9 @@ tls.crt="{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem" tls.key="{{ mktemp.stdout }}/openshift-hosted-router-certificate.key" --type=kubernetes.io/tls + --config={{ mktemp.stdout }}/admin.kubeconfig --confirm - -o json | {{ openshift.common.client_binary }} replace -f - + -o json | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig replace -f - - name: Remove temporary router certificate and key files file: diff --git a/playbooks/common/openshift-cluster/sanity_checks.yml b/playbooks/common/openshift-cluster/sanity_checks.yml new file mode 100644 index 000000000..26716a92d --- /dev/null +++ b/playbooks/common/openshift-cluster/sanity_checks.yml @@ -0,0 +1,51 @@ +--- +- name: Verify Requirements + hosts: oo_all_hosts + tasks: + - fail: + msg: Flannel can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use flannel + when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_flannel | default(false) | bool + + - fail: + msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage + when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_nuage | default(false) | bool + + - fail: + msg: Nuage sdn can not be used with flannel + when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool + + - fail: + msg: Contiv can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use contiv + when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_contiv | default(false) | bool + + - fail: + msg: Contiv can not be used with flannel + when: openshift_use_flannel | default(false) | bool and openshift_use_contiv | default(false) | bool + + - fail: + msg: Contiv can not be used with nuage + when: openshift_use_nuage | default(false) | bool and openshift_use_contiv | default(false) | bool + + - fail: + msg: Calico can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use Calico + when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_calico | default(false) | bool + + - fail: + msg: The Calico playbook does not yet integrate with the Flannel playbook in Openshift. Set either openshift_use_calico or openshift_use_flannel, but not both. + when: openshift_use_calico | default(false) | bool and openshift_use_flannel | default(false) | bool + + - fail: + msg: Calico can not be used with Nuage in Openshift. Set either openshift_use_calico or openshift_use_nuage, but not both + when: openshift_use_calico | default(false) | bool and openshift_use_nuage | default(false) | bool + + - fail: + msg: Calico can not be used with Contiv in Openshift. Set either openshift_use_calico or openshift_use_contiv, but not both + when: openshift_use_calico | default(false) | bool and openshift_use_contiv | default(false) | bool + + - fail: + msg: openshift_hostname must be 63 characters or less + when: openshift_hostname is defined and openshift_hostname | length > 63 + + - fail: + msg: openshift_public_hostname must be 63 characters or less + when: openshift_public_hostname is defined and openshift_public_hostname | length > 63 diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml new file mode 100644 index 000000000..599350258 --- /dev/null +++ b/playbooks/common/openshift-cluster/service_catalog.yml @@ -0,0 +1,20 @@ +--- + +- name: Update Master configs + hosts: oo_masters + serial: 1 + tasks: + - block: + - include_role: + name: openshift_service_catalog + tasks_from: wire_aggregator + vars: + first_master: "{{ groups.oo_first_master[0] }}" + +- name: Service Catalog + hosts: oo_first_master + roles: + - openshift_service_catalog + - ansible_service_broker + vars: + first_master: "{{ groups.oo_first_master[0] }}" diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml index 6ed31a644..cef0072f3 100644 --- a/playbooks/common/openshift-cluster/std_include.yml +++ b/playbooks/common/openshift-cluster/std_include.yml @@ -7,10 +7,18 @@ tags: - always +- include: sanity_checks.yml + tags: + - always + - include: validate_hostnames.yml tags: - node +- include: initialize_openshift_repos.yml + tags: + - always + - include: initialize_openshift_version.yml tags: - always diff --git a/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml index 1a6580795..eb118365a 100644 --- a/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml +++ b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml @@ -3,7 +3,7 @@ - name: Generate etcd instance names(s) set_fact: - scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}" + scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}" register: etcd_names_output with_sequence: count={{ num_etcd }} diff --git a/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml index 36d7b7870..783f70f50 100644 --- a/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml +++ b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml @@ -3,7 +3,7 @@ - name: Generate master instance names(s) set_fact: - scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}" + scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}" register: master_names_output with_sequence: count={{ num_masters }} diff --git a/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml index 278942f8b..c103e40a9 100644 --- a/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml +++ b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml @@ -5,7 +5,7 @@ - name: Generate node instance names(s) set_fact: - scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}" + scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}" register: node_names_output with_sequence: count={{ number_nodes }} diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml deleted file mode 100644 index be956fca5..000000000 --- a/playbooks/common/openshift-cluster/update_repos_and_packages.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- include: evaluate_groups.yml - -- name: Subscribe hosts, update repos and update OS packages - hosts: oo_hosts_to_update - roles: - # Explicitly calling openshift_facts because it appears that when - # rhel_subscribe is skipped that the openshift_facts dependency for - # openshift_repos is also skipped (this is the case at least for Ansible - # 2.0.2) - - openshift_facts - - role: rhel_subscribe - when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and - ansible_distribution == "RedHat" and - lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | - default('no', True) | lower in ['no', 'false'] - - openshift_repos - - os_update_latest diff --git a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml deleted file mode 100644 index 9f7961614..000000000 --- a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -# This is a hack to allow us to use systemd_units.yml, but skip the handlers which -# restart services. We will unconditionally restart all containerized services -# because we have to unconditionally restart Docker: -- set_fact: - skip_node_svc_handlers: True - -- name: Update systemd units - include: ../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version={{ openshift_image_tag }} - -# This is a no-op because of skip_node_svc_handlers, but lets us trigger it before end of -# play when the node has already been marked schedulable again. (this would look strange -# in logs otherwise) -- meta: flush_handlers diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml index 07db071ce..98953f72e 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -4,7 +4,6 @@ # Do not allow adding hosts during upgrade. g_new_master_hosts: [] g_new_node_hosts: [] - openshift_cluster_id: "{{ cluster_id | default('default') }}" - include: ../initialize_nodes_to_upgrade.yml @@ -52,11 +51,15 @@ - name: Drain Node for Kubelet upgrade command: > - {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --force --delete-local-data --ignore-daemonsets + {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets delegate_to: "{{ groups.oo_first_master.0 }}" when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade + register: l_docker_upgrade_drain_result + until: not l_docker_upgrade_drain_result | failed + retries: 60 + delay: 60 - - include: upgrade.yml + - include: tasks/upgrade.yml when: l_docker_upgrade is defined and l_docker_upgrade | bool - name: Set node schedulability diff --git a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml index 1b418920f..83f16ac0d 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml @@ -1,6 +1,10 @@ --- - name: Restart docker service: name=docker state=restarted + register: l_docker_restart_docker_in_upgrade_result + until: not l_docker_restart_docker_in_upgrade_result | failed + retries: 3 + delay: 30 - name: Update docker facts openshift_facts: @@ -11,7 +15,6 @@ with_items: - etcd_container - openvswitch - - "{{ openshift.common.service_type }}-master" - "{{ openshift.common.service_type }}-master-api" - "{{ openshift.common.service_type }}-master-controllers" - "{{ openshift.common.service_type }}-node" @@ -24,4 +27,5 @@ state: started delay: 10 port: "{{ openshift.master.api_port }}" + timeout: 600 when: inventory_hostname in groups.oo_masters_to_config diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml index 17f8fc6e9..808cc562c 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml @@ -4,7 +4,6 @@ - name: Stop containerized services service: name={{ item }} state=stopped with_items: - - "{{ openshift.common.service_type }}-master" - "{{ openshift.common.service_type }}-master-api" - "{{ openshift.common.service_type }}-master-controllers" - "{{ openshift.common.service_type }}-node" @@ -32,7 +31,13 @@ - debug: var=docker_image_count.stdout when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool -- service: name=docker state=stopped +- service: + name: docker + state: stopped + register: l_pb_docker_upgrade_stop_result + until: not l_pb_docker_upgrade_stop_result | failed + retries: 3 + delay: 30 - name: Upgrade Docker package: name=docker{{ '-' + docker_version }} state=present diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml index 616ba04f8..2cc6c9019 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml @@ -2,7 +2,7 @@ - name: Backup etcd hosts: oo_etcd_hosts_to_backup roles: - - role: openshift_facts + - role: openshift_etcd_facts - role: etcd_common r_etcd_common_action: backup r_etcd_common_backup_tag: etcd_backup_tag diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml index 0431c1ce0..39e82498d 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml @@ -36,7 +36,7 @@ - not openshift.common.is_etcd_system_container | bool - name: Record containerized etcd version (runc) - command: runc exec etcd_container rpm -qa --qf '%{version}' etcd\* + command: runc exec etcd rpm -qa --qf '%{version}' etcd\* register: etcd_container_version_runc failed_when: false # AUDIT:changed_when: `false` because we are only inspecting diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml index 0f421928b..c98065cf4 100644 --- a/playbooks/common/openshift-cluster/upgrades/init.yml +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -4,7 +4,6 @@ # Do not allow adding hosts during upgrade. g_new_master_hosts: [] g_new_node_hosts: [] - openshift_cluster_id: "{{ cluster_id | default('default') }}" - include: ../initialize_oo_option_facts.yml diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml index 046535680..72de63070 100644 --- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml @@ -6,27 +6,32 @@ - lib_openshift tasks: - - name: Retrieve list of openshift nodes matching upgrade label - oc_obj: - state: list - kind: node - selector: "{{ openshift_upgrade_nodes_label }}" - register: nodes_to_upgrade - when: openshift_upgrade_nodes_label is defined + - when: openshift_upgrade_nodes_label is defined + block: + - name: Retrieve list of openshift nodes matching upgrade label + oc_obj: + state: list + kind: node + selector: "{{ openshift_upgrade_nodes_label }}" + register: nodes_to_upgrade - # We got a list of nodes with the label, now we need to match these with inventory hosts - # using their openshift.common.hostname fact. - - name: Map labelled nodes to inventory hosts - add_host: - name: "{{ item }}" - groups: temp_nodes_to_upgrade - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_become: "{{ g_sudo | default(omit) }}" - with_items: " {{ groups['oo_nodes_to_config'] }}" - when: - - openshift_upgrade_nodes_label is defined - - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list - changed_when: false + - name: Fail if no nodes match openshift_upgrade_nodes_label + fail: + msg: "openshift_upgrade_nodes_label was specified but no nodes matched" + when: nodes_to_upgrade.results.results[0]['items'] | length == 0 + + # We got a list of nodes with the label, now we need to match these with inventory hosts + # using their openshift.common.hostname fact. + - name: Map labelled nodes to inventory hosts + add_host: + name: "{{ item }}" + groups: temp_nodes_to_upgrade + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" + with_items: " {{ groups['oo_nodes_to_config'] }}" + when: + - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list + changed_when: false # Build up the oo_nodes_to_upgrade group, use the list filtered by label if # present, otherwise hit all nodes: diff --git a/playbooks/common/openshift-cluster/upgrades/master_docker b/playbooks/common/openshift-cluster/upgrades/master_docker deleted file mode 120000 index 6aeca2842..000000000 --- a/playbooks/common/openshift-cluster/upgrades/master_docker +++ /dev/null @@ -1 +0,0 @@ -../../../../roles/openshift_master/templates/master_docker
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml index 9d8b73cff..6d8503879 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml @@ -1,8 +1,10 @@ --- # Only check if docker upgrade is required if docker_upgrade is not # already set to False. -- include: ../docker/upgrade_check.yml - when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool +- include: ../../docker/upgrade_check.yml + when: + - docker_upgrade is not defined or (docker_upgrade | bool) + - not (openshift.common.is_atomic | bool) # Additional checks for Atomic hosts: diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml index 06eb5f936..45022cd61 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml @@ -9,23 +9,16 @@ local_facts: ha: "{{ groups.oo_masters_to_config | length > 1 }}" - - name: Ensure Master is running - service: - name: "{{ openshift.common.service_type }}-master" - state: started - enabled: yes - when: openshift.master.ha is defined and not openshift.master.ha | bool and openshift.common.is_containerized | bool - - name: Ensure HA Master is running service: name: "{{ openshift.common.service_type }}-master-api" state: started enabled: yes - when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool + when: openshift.common.is_containerized | bool - name: Ensure HA Master is running service: name: "{{ openshift.common.service_type }}-master-controllers" state: started enabled: yes - when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool + when: openshift.common.is_containerized | bool diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml new file mode 100644 index 000000000..497709d25 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml @@ -0,0 +1,13 @@ +--- +- name: Verify Host Requirements + hosts: oo_all_hosts + roles: + - openshift_health_checker + vars: + - r_openshift_health_checker_playbook_context: upgrade + post_tasks: + - action: openshift_health_check + args: + checks: + - disk_availability + - memory_availability diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml index 9a959a959..4c345dbe8 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml @@ -9,6 +9,21 @@ deployment types when: deployment_type not in ['origin','openshift-enterprise', 'online'] + # osm_cluster_network_cidr, osm_host_subnet_length and openshift_portal_net are + # required when upgrading to avoid changes that may occur between releases + # Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1451023 + - assert: + that: + - "osm_cluster_network_cidr is defined" + - "osm_host_subnet_length is defined" + - "openshift_portal_net is defined" + msg: > + osm_cluster_network_cidr, osm_host_subnet_length, and openshift_portal_net are required inventory + variables when upgrading. These variables should match what is currently used in the cluster. If + you don't remember what these values are you can find them in /etc/origin/master/master-config.yaml + on a master with the names clusterNetworkCIDR (osm_cluster_network_cidr), + hostSubnetLength (osm_host_subnet_length), and serviceNetworkCIDR (openshift_portal_net). + # Error out in situations where the user has older versions specified in their # inventory in any of the openshift_release, openshift_image_tag, and # openshift_pkg_version variables. These must be removed or updated to proceed diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml deleted file mode 100644 index 354af3cde..000000000 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- name: Verify node processes - hosts: oo_nodes_to_config - roles: - - openshift_facts - - openshift_docker_facts - tasks: - - name: Ensure Node is running - service: - name: "{{ openshift.common.service_type }}-node" - state: started - enabled: yes - when: openshift.common.is_containerized | bool diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index b980909eb..b75aae589 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -3,6 +3,22 @@ # Upgrade Masters ############################################################################### +# oc adm migrate storage should be run prior to etcd v3 upgrade +# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060 +- name: Pre master upgrade - Upgrade all storage + hosts: oo_first_master + tasks: + - name: Upgrade all storage + command: > + {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig + migrate storage --include=* --confirm + register: l_pb_upgrade_control_plane_pre_upgrade_storage + when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool + failed_when: + - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool + - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0 + - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool + # If facts cache were for some reason deleted, this fact may not be set, and if not set # it will always default to true. This causes problems for the etcd data dir fact detection # so we must first make sure this is set correctly before attempting the backup. @@ -75,7 +91,7 @@ - include_vars: ../../../../roles/openshift_master/vars/main.yml - - name: Update systemd units + - name: Remove any legacy systemd units and update systemd units include: ../../../../roles/openshift_master/tasks/systemd_units.yml - name: Check for ca-bundle.crt @@ -130,6 +146,19 @@ - include: "{{ openshift_master_upgrade_post_hook }}" when: openshift_master_upgrade_post_hook is defined + - name: Post master upgrade - Upgrade clusterpolicies storage + command: > + {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig + migrate storage --include=clusterpolicies --confirm + register: l_pb_upgrade_control_plane_post_upgrade_storage + when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool + failed_when: + - openshift_upgrade_post_storage_migration_enabled | default(true) | bool + - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 + - openshift_upgrade_post_storage_migration_fatal | default(false) | bool + run_once: true + delegate_to: "{{ groups.oo_first_master.0 }}" + - set_fact: master_update_complete: True @@ -205,13 +234,25 @@ - name: Reconcile Security Context Constraints command: > - {{ openshift.common.client_binary }} adm policy reconcile-sccs --confirm --additive-only=true -o name + {{ openshift.common.client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --confirm --additive-only=true -o name register: reconcile_scc_result changed_when: - reconcile_scc_result.stdout != '' - reconcile_scc_result.rc == 0 run_once: true + - name: Migrate storage post policy reconciliation + command: > + {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig + migrate storage --include=* --confirm + run_once: true + register: l_pb_upgrade_control_plane_post_upgrade_storage + when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool + failed_when: + - openshift_upgrade_post_storage_migration_enabled | default(true) | bool + - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 + - openshift_upgrade_post_storage_migration_fatal | default(false) | bool + - set_fact: reconcile_complete: True @@ -240,7 +281,7 @@ roles: - openshift_facts tasks: - - include: docker/upgrade.yml + - include: docker/tasks/upgrade.yml when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool - name: Drain and upgrade master nodes @@ -270,13 +311,18 @@ - name: Drain Node for Kubelet upgrade command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data --ignore-daemonsets + {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets delegate_to: "{{ groups.oo_first_master.0 }}" + register: l_upgrade_control_plane_drain_result + until: not l_upgrade_control_plane_drain_result | failed + retries: 60 + delay: 60 roles: - lib_openshift - openshift_facts - docker + - openshift_node_dnsmasq - openshift_node_upgrade post_tasks: diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index 91dbc2cd4..c93a5d89c 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -26,13 +26,18 @@ - name: Drain Node for Kubelet upgrade command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data --ignore-daemonsets + {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets delegate_to: "{{ groups.oo_first_master.0 }}" + register: l_upgrade_nodes_drain_result + until: not l_upgrade_nodes_drain_result | failed + retries: 60 + delay: 60 roles: - lib_openshift - openshift_facts - docker + - openshift_node_dnsmasq - openshift_node_upgrade - role: openshift_excluder r_openshift_excluder_action: enable diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml index 83d2cec81..8558bf3e9 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml @@ -74,18 +74,21 @@ - block: - debug: msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler predicates: {{ openshift_master_scheduler_current_predicates }}\ncurrent scheduler default predicates are: {{ openshift_master_scheduler_default_predicates }}" - when: "{{ openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates and - openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates] }}" + when: + - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates + - openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates] - set_fact: openshift_upgrade_scheduler_predicates: "{{ openshift_master_scheduler_default_predicates }}" - when: "{{ openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates and - openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates] }}" + when: + - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates + - openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates] - set_fact: openshift_upgrade_scheduler_predicates: "{{ default_predicates_no_region }}" - when: "{{ openshift_master_scheduler_current_predicates != default_predicates_no_region and - openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region] }}" + when: + - openshift_master_scheduler_current_predicates != default_predicates_no_region + - openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region] when: openshift_master_scheduler_predicates | default(none) is none @@ -131,18 +134,21 @@ - block: - debug: msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler priorities: {{ openshift_master_scheduler_current_priorities }}\ncurrent scheduler default priorities are: {{ openshift_master_scheduler_default_priorities }}" - when: "{{ openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities and - openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities] }}" + when: + - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities + - openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities] - set_fact: openshift_upgrade_scheduler_priorities: "{{ openshift_master_scheduler_default_priorities }}" - when: "{{ openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities and - openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities] }}" + when: + - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities + - openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities] - set_fact: openshift_upgrade_scheduler_priorities: "{{ default_priorities_no_zone }}" - when: "{{ openshift_master_scheduler_current_priorities != default_priorities_no_zone and - openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone] }}" + when: + - openshift_master_scheduler_current_priorities != default_priorities_no_zone + - openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone] when: openshift_master_scheduler_priorities | default(none) is none @@ -162,5 +168,6 @@ content: "{{ scheduler_config | to_nice_json }}" dest: "{{ openshift_master_scheduler_conf }}" backup: true - when: "{{ openshift_upgrade_scheduler_predicates is defined or - openshift_upgrade_scheduler_priorities is defined }}" + when: > + openshift_upgrade_scheduler_predicates is defined or + openshift_upgrade_scheduler_priorities is defined diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml index f1245aa2e..a241ef039 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml @@ -39,8 +39,9 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool - include: ../pre/verify_inventory_vars.yml tags: @@ -88,7 +89,7 @@ - name: Verify docker upgrade targets hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config tasks: - - include: ../pre/verify_docker_upgrade_targets.yml + - include: ../pre/tasks/verify_docker_upgrade_targets.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml index b693ab55c..54c85f0fb 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml @@ -47,8 +47,9 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool - include: ../pre/verify_inventory_vars.yml tags: @@ -92,7 +93,7 @@ - name: Verify docker upgrade targets hosts: oo_masters_to_config:oo_etcd_to_config tasks: - - include: ../pre/verify_docker_upgrade_targets.yml + - include: ../pre/tasks/verify_docker_upgrade_targets.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml index 4fd029107..cee4e9087 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml @@ -40,8 +40,9 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool - include: ../pre/verify_inventory_vars.yml tags: @@ -89,7 +90,7 @@ - name: Verify docker upgrade targets hosts: oo_nodes_to_upgrade tasks: - - include: ../pre/verify_docker_upgrade_targets.yml + - include: ../pre/tasks/verify_docker_upgrade_targets.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml index 965e39482..ae217ba2e 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml @@ -39,8 +39,9 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool - include: ../pre/verify_inventory_vars.yml tags: @@ -88,7 +89,7 @@ - name: Verify docker upgrade targets hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config tasks: - - include: ../pre/verify_docker_upgrade_targets.yml + - include: ../pre/tasks/verify_docker_upgrade_targets.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml index 7830f462c..d7cb38d03 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml @@ -47,8 +47,9 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool - include: ../pre/verify_inventory_vars.yml tags: @@ -92,7 +93,7 @@ - name: Verify docker upgrade targets hosts: oo_masters_to_config:oo_etcd_to_config tasks: - - include: ../pre/verify_docker_upgrade_targets.yml + - include: ../pre/tasks/verify_docker_upgrade_targets.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml index 4364ff8e3..8531e6045 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml @@ -40,8 +40,9 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool - include: ../pre/verify_inventory_vars.yml tags: @@ -89,7 +90,7 @@ - name: Verify docker upgrade targets hosts: oo_nodes_to_upgrade tasks: - - include: ../pre/verify_docker_upgrade_targets.yml + - include: ../pre/tasks/verify_docker_upgrade_targets.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml deleted file mode 100644 index 48c69eccd..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -############################################################################### -# Post upgrade - Upgrade job storage -############################################################################### -- name: Upgrade job storage - hosts: oo_first_master - roles: - - { role: openshift_cli } - vars: - # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe - # restart. - skip_docker_role: True - tasks: - - name: Upgrade job storage - command: > - {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig - migrate storage --include=jobs --confirm - run_once: true diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml index e63b03e51..a3d0d6305 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml @@ -39,8 +39,9 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool - include: ../pre/verify_inventory_vars.yml tags: @@ -88,7 +89,7 @@ - name: Verify docker upgrade targets hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config tasks: - - include: ../pre/verify_docker_upgrade_targets.yml + - include: ../pre/tasks/verify_docker_upgrade_targets.yml tags: - pre_upgrade @@ -115,5 +116,3 @@ - include: ../upgrade_nodes.yml - include: ../post_control_plane.yml - -- include: storage_upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml index 74c2964aa..5fee56615 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml @@ -47,8 +47,9 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool - include: ../pre/verify_inventory_vars.yml tags: @@ -92,7 +93,7 @@ - name: Verify docker upgrade targets hosts: oo_masters_to_config:oo_etcd_to_config tasks: - - include: ../pre/verify_docker_upgrade_targets.yml + - include: ../pre/tasks/verify_docker_upgrade_targets.yml tags: - pre_upgrade @@ -119,5 +120,3 @@ master_config_hook: "v3_5/master_config_upgrade.yml" - include: ../post_control_plane.yml - -- include: storage_upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml index 036d3fcf5..e29d0f8e6 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml @@ -40,8 +40,9 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool - include: ../pre/verify_inventory_vars.yml tags: @@ -89,7 +90,7 @@ - name: Verify docker upgrade targets hosts: oo_nodes_to_upgrade tasks: - - include: ../pre/verify_docker_upgrade_targets.yml + - include: ../pre/tasks/verify_docker_upgrade_targets.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml deleted file mode 100644 index 48c69eccd..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -############################################################################### -# Post upgrade - Upgrade job storage -############################################################################### -- name: Upgrade job storage - hosts: oo_first_master - roles: - - { role: openshift_cli } - vars: - # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe - # restart. - skip_docker_role: True - tasks: - - name: Upgrade job storage - command: > - {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig - migrate storage --include=jobs --confirm - run_once: true diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml index 5d41b84d0..51acd17da 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -39,8 +39,9 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool - include: ../pre/verify_inventory_vars.yml tags: @@ -70,6 +71,10 @@ # docker is configured and running. skip_docker_role: True +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + - include: ../pre/verify_control_plane_running.yml tags: - pre_upgrade @@ -88,7 +93,7 @@ - name: Verify docker upgrade targets hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config tasks: - - include: ../pre/verify_docker_upgrade_targets.yml + - include: ../pre/tasks/verify_docker_upgrade_targets.yml tags: - pre_upgrade @@ -115,5 +120,3 @@ - include: ../upgrade_nodes.yml - include: ../post_control_plane.yml - -- include: storage_upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index a66fb51ff..9fe059ac9 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -47,8 +47,9 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool - include: ../pre/verify_inventory_vars.yml tags: @@ -92,7 +93,7 @@ - name: Verify docker upgrade targets hosts: oo_masters_to_config:oo_etcd_to_config tasks: - - include: ../pre/verify_docker_upgrade_targets.yml + - include: ../pre/tasks/verify_docker_upgrade_targets.yml tags: - pre_upgrade @@ -119,5 +120,3 @@ master_config_hook: "v3_6/master_config_upgrade.yml" - include: ../post_control_plane.yml - -- include: storage_upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml index 25eceaf90..1b10d4e37 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml @@ -40,8 +40,9 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool - include: ../pre/verify_inventory_vars.yml tags: @@ -89,7 +90,7 @@ - name: Verify docker upgrade targets hosts: oo_nodes_to_upgrade tasks: - - include: ../pre/verify_docker_upgrade_targets.yml + - include: ../pre/tasks/verify_docker_upgrade_targets.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins new file mode 120000 index 000000000..7de3c1dd7 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins @@ -0,0 +1 @@ +../../../../../filter_plugins/
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml new file mode 100644 index 000000000..ed89dbe8d --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml @@ -0,0 +1,16 @@ +--- +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'admissionConfig.pluginConfig' + yaml_value: "{{ openshift.master.admission_plugin_config }}" + when: "'admission_plugin_config' in openshift.master" + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'admissionConfig.pluginOrderOverride' + yaml_value: + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'kubernetesMasterConfig.admissionConfig' + yaml_value: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/roles b/playbooks/common/openshift-cluster/upgrades/v3_7/roles new file mode 120000 index 000000000..415645be6 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/roles @@ -0,0 +1 @@ +../../../../../roles/
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml new file mode 100644 index 000000000..9ec40723a --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml @@ -0,0 +1,122 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.7' + openshift_upgrade_min: '3.6' + +# Pre-upgrade + +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos and initialize facts on all hosts + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: validator.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + +- include: ../upgrade_nodes.yml + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml new file mode 100644 index 000000000..f97f34c3b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -0,0 +1,122 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.7' + openshift_upgrade_min: '3.6' + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on control plane hosts + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: validator.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_7/master_config_upgrade.yml" + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml new file mode 100644 index 000000000..e95b90cd5 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml @@ -0,0 +1,111 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.7' + openshift_upgrade_min: '3.6' + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on nodes + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + tags: + - pre_upgrade + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- name: Verify masters are already upgraded + hosts: oo_masters_to_config + tags: + - pre_upgrade + tasks: + - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." + when: openshift.common.version != openshift_version + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_nodes_to_upgrade + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml new file mode 100644 index 000000000..f76fc68d1 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml @@ -0,0 +1,23 @@ +--- +############################################################################### +# Pre upgrade checks for known data problems, if this playbook fails you should +# contact support. If you're not supported contact users@lists.openshift.com +############################################################################### +- name: Verify 3.7 specific upgrade checks + hosts: oo_first_master + roles: + - { role: lib_openshift } + + tasks: + - name: Check for invalid namespaces and SDN errors + oc_objectvalidator: + + - name: Confirm OpenShift authorization objects are in sync + command: > + {{ openshift.common.client_binary }} adm migrate authorization + when: not openshift.common.version_gte_3_7 | bool + changed_when: false + register: l_oc_result + until: l_oc_result.rc == 0 + retries: 4 + delay: 15 diff --git a/playbooks/common/openshift-cluster/validate_hostnames.yml b/playbooks/common/openshift-cluster/validate_hostnames.yml index 33fc5630f..be2e6a15a 100644 --- a/playbooks/common/openshift-cluster/validate_hostnames.yml +++ b/playbooks/common/openshift-cluster/validate_hostnames.yml @@ -1,17 +1,22 @@ --- -- name: Gather and set facts for node hosts +- name: Validate node hostnames hosts: oo_nodes_to_config - roles: - - openshift_facts tasks: - - shell: + - name: Query DNS for IP address of {{ openshift.common.hostname }} + shell: getent ahostsv4 {{ openshift.common.hostname }} | head -n 1 | awk '{ print $1 }' register: lookupip changed_when: false failed_when: false - name: Warn user about bad openshift_hostname values pause: - prompt: "The hostname \"{{ openshift.common.hostname }}\" for \"{{ ansible_nodename }}\" doesn't resolve to an ip address owned by this host. Please set openshift_hostname variable to a hostname that when resolved on the host in question resolves to an IP address matching an interface on this host. This host will fail liveness checks for pods utilizing hostPorts, press ENTER to continue or CTRL-C to abort." + prompt: + The hostname {{ openshift.common.hostname }} for {{ ansible_nodename }} + doesn't resolve to an IP address owned by this host. Please set + openshift_hostname variable to a hostname that when resolved on the host + in question resolves to an IP address matching an interface on this + host. This host will fail liveness checks for pods utilizing hostPorts, + press ENTER to continue or CTRL-C to abort. seconds: "{{ 10 if openshift_override_hostname_check | default(false) | bool else omit }}" when: - lookupip.stdout != '127.0.0.1' diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml index 2cb6197d1..f2b85eea1 100644 --- a/playbooks/common/openshift-etcd/config.yml +++ b/playbooks/common/openshift-etcd/config.yml @@ -3,6 +3,7 @@ hosts: oo_etcd_to_config any_errors_fatal: true roles: + - role: os_firewall - role: openshift_etcd etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/common/openshift-etcd/migrate.yml new file mode 100644 index 000000000..e4ab0aa41 --- /dev/null +++ b/playbooks/common/openshift-etcd/migrate.yml @@ -0,0 +1,147 @@ +--- +- name: Run pre-checks + hosts: oo_etcd_to_migrate + roles: + - role: etcd_migrate + r_etcd_migrate_action: check + r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" + etcd_peer: "{{ ansible_default_ipv4.address }}" + +# TODO: This will be different for release-3.6 branch +- name: Prepare masters for etcd data migration + hosts: oo_masters_to_config + tasks: + - set_fact: + master_services: + - "{{ openshift.common.service_type + '-master-controllers' }}" + - "{{ openshift.common.service_type + '-master-api' }}" + - debug: + msg: "master service name: {{ master_services }}" + - name: Stop masters + service: + name: "{{ item }}" + state: stopped + with_items: "{{ master_services }}" + +- name: Backup v2 data + hosts: oo_etcd_to_migrate + gather_facts: no + roles: + - role: openshift_facts + - role: etcd_common + r_etcd_common_action: backup + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + r_etcd_common_backup_tag: pre-migration + r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" + r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" + +- name: Gate on etcd backup + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + etcd_backup_completed: "{{ hostvars + | oo_select_keys(groups.oo_etcd_to_migrate) + | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}" + - set_fact: + etcd_backup_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_backup_completed) }}" + - fail: + msg: "Migration cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}" + when: + - etcd_backup_failed | length > 0 + +- name: Stop etcd + hosts: oo_etcd_to_migrate + gather_facts: no + pre_tasks: + - set_fact: + l_etcd_service: "{{ 'etcd_container' if openshift.common.is_containerized else 'etcd' }}" + - name: Disable etcd members + service: + name: "{{ l_etcd_service }}" + state: stopped + +- name: Migrate data on first etcd + hosts: oo_etcd_to_migrate[0] + gather_facts: no + roles: + - role: etcd_migrate + r_etcd_migrate_action: migrate + r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" + etcd_peer: "{{ openshift.common.ip }}" + etcd_url_scheme: "https" + etcd_peer_url_scheme: "https" + +- name: Clean data stores on remaining etcd hosts + hosts: oo_etcd_to_migrate[1:] + gather_facts: no + roles: + - role: etcd_migrate + r_etcd_migrate_action: clean_data + r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" + etcd_peer: "{{ openshift.common.ip }}" + etcd_url_scheme: "https" + etcd_peer_url_scheme: "https" + post_tasks: + - name: Add etcd hosts + delegate_to: localhost + add_host: + name: "{{ item }}" + groups: oo_new_etcd_to_config + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" + with_items: "{{ groups.oo_etcd_to_migrate[1:] | default([]) }}" + changed_when: no + - name: Set success + set_fact: + r_etcd_migrate_success: true + +- include: ./scaleup.yml + +- name: Gate on etcd migration + hosts: oo_masters_to_config + gather_facts: no + tasks: + - set_fact: + etcd_migration_completed: "{{ hostvars + | oo_select_keys(groups.oo_etcd_to_migrate) + | oo_collect('inventory_hostname', {'r_etcd_migrate_success': true}) }}" + - set_fact: + etcd_migration_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_migration_completed) }}" + +- name: Add TTLs on the first master + hosts: oo_first_master[0] + roles: + - role: etcd_migrate + r_etcd_migrate_action: add_ttls + etcd_peer: "{{ hostvars[groups.oo_etcd_to_migrate.0].openshift.common.ip }}" + etcd_url_scheme: "https" + etcd_peer_url_scheme: "https" + when: etcd_migration_failed | length == 0 + +- name: Configure masters if etcd data migration is succesfull + hosts: oo_masters_to_config + roles: + - role: etcd_migrate + r_etcd_migrate_action: configure + when: etcd_migration_failed | length == 0 + tasks: + - debug: + msg: "Skipping master re-configuration since migration failed." + when: + - etcd_migration_failed | length > 0 + - name: Start master services + service: + name: "{{ item }}" + state: started + register: service_status + # Sometimes the master-api, resp. master-controllers fails to start for the first time + until: service_status.state is defined and service_status.state == "started" + retries: 5 + delay: 10 + with_items: "{{ master_services[::-1] }}" + - fail: + msg: "Migration failed. The following hosts were not properly migrated: {{ etcd_migration_failed | join(',') }}" + when: + - etcd_migration_failed | length > 0 diff --git a/playbooks/common/openshift-etcd/restart.yml b/playbooks/common/openshift-etcd/restart.yml index 196c86f28..af1ef245a 100644 --- a/playbooks/common/openshift-etcd/restart.yml +++ b/playbooks/common/openshift-etcd/restart.yml @@ -5,5 +5,5 @@ tasks: - name: restart etcd service: - name: "{{ 'etcd' if not openshift.common.is_containerized | bool else 'etcd_container' }}" + name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}" state: restarted diff --git a/playbooks/common/openshift-etcd/scaleup.yml b/playbooks/common/openshift-etcd/scaleup.yml new file mode 100644 index 000000000..d3fa48bad --- /dev/null +++ b/playbooks/common/openshift-etcd/scaleup.yml @@ -0,0 +1,74 @@ +--- +- name: Gather facts + hosts: oo_etcd_to_config:oo_new_etcd_to_config + roles: + - openshift_etcd_facts + post_tasks: + - set_fact: + etcd_hostname: "{{ etcd_hostname }}" + etcd_ip: "{{ etcd_ip }}" + +- name: Configure etcd + hosts: oo_new_etcd_to_config + serial: 1 + any_errors_fatal: true + vars: + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + pre_tasks: + - name: Add new etcd members to cluster + command: > + /usr/bin/etcdctl --cert-file {{ etcd_peer_cert_file }} + --key-file {{ etcd_peer_key_file }} + --ca-file {{ etcd_peer_ca_file }} + -C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_hostname }}:{{ etcd_client_port }} + member add {{ etcd_hostname }} {{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }} + delegate_to: "{{ etcd_ca_host }}" + failed_when: + - etcd_add_check.rc == 1 + - ("peerURL exists" not in etcd_add_check.stderr) + register: etcd_add_check + retries: 3 + delay: 10 + until: etcd_add_check.rc == 0 + roles: + - role: os_firewall + when: etcd_add_check.rc == 0 + - role: openshift_etcd + when: etcd_add_check.rc == 0 + etcd_peers: "{{ groups.oo_etcd_to_config | union(groups.oo_new_etcd_to_config)| default([], true) }}" + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" + etcd_initial_cluster_state: "existing" + initial_etcd_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') | regex_replace('\"','') }}" + etcd_ca_setup: False + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + - role: nickhammond.logrotate + when: etcd_add_check.rc == 0 + post_tasks: + - name: Verify cluster is stable + command: > + /usr/bin/etcdctl --cert-file {{ etcd_peer_cert_file }} + --key-file {{ etcd_peer_key_file }} + --ca-file {{ etcd_peer_ca_file }} + -C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_hostname }}:{{ etcd_client_port }} + cluster-health + register: scaleup_health + retries: 3 + delay: 30 + until: scaleup_health.rc == 0 + +- name: Update master etcd client urls + hosts: oo_masters_to_config + serial: 1 + tasks: + - include_role: + name: openshift_master + tasks_from: update_etcd_client_urls + vars: + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + openshift_ca_host: "{{ groups.oo_first_master.0 }}" + openshift_master_etcd_hosts: "{{ hostvars + | oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config'])) + | oo_collect('openshift.common.hostname') + | default(none, true) }}" + openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}" diff --git a/playbooks/common/openshift-etcd/service.yml b/playbooks/common/openshift-etcd/service.yml deleted file mode 100644 index ced4bddc5..000000000 --- a/playbooks/common/openshift-etcd/service.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- name: Populate g_service_masters host group if needed - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - fail: msg="new_cluster_state is required to be injected in this playbook" - when: new_cluster_state is not defined - - - name: Evaluate g_service_etcd - add_host: - name: "{{ item }}" - groups: g_service_etcd - with_items: "{{ oo_host_group_exp | default([]) }}" - changed_when: False - -- name: Change etcd state on etcd instance(s) - hosts: g_service_etcd - connection: ssh - gather_facts: no - tasks: - - service: name=etcd state="{{ new_cluster_state }}" diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml index 1efdfb336..d9de578f3 100644 --- a/playbooks/common/openshift-glusterfs/config.yml +++ b/playbooks/common/openshift-glusterfs/config.yml @@ -1,23 +1,26 @@ --- -- name: Open firewall ports for GlusterFS - hosts: oo_glusterfs_to_config - vars: - os_firewall_allow: - - service: glusterfs_sshd - port: "2222/tcp" - - service: glusterfs_daemon - port: "24007/tcp" - - service: glusterfs_management - port: "24008/tcp" - - service: glusterfs_bricks - port: "49152-49251/tcp" - roles: - - role: os_firewall +- name: Open firewall ports for GlusterFS nodes + hosts: glusterfs + tasks: + - include_role: + name: openshift_storage_glusterfs + tasks_from: firewall.yml when: - - openshift_storage_glusterfs_is_native | default(True) + - openshift_storage_glusterfs_is_native | default(True) | bool + +- name: Open firewall ports for GlusterFS registry nodes + hosts: glusterfs_registry + tasks: + - include_role: + name: openshift_storage_glusterfs + tasks_from: firewall.yml + when: + - openshift_storage_glusterfs_registry_is_native | default(True) | bool - name: Configure GlusterFS hosts: oo_first_master - roles: - - role: openshift_storage_glusterfs + tasks: + - name: setup glusterfs + include_role: + name: openshift_storage_glusterfs when: groups.oo_glusterfs_to_config | default([]) | count > 0 diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml index c414913bf..09ed81a83 100644 --- a/playbooks/common/openshift-loadbalancer/config.yml +++ b/playbooks/common/openshift-loadbalancer/config.yml @@ -12,5 +12,7 @@ openshift_use_nuage | default(false), nuage_mon_rest_server_port | default(none))) + openshift_loadbalancer_additional_backends | default([]) }}" + openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}" roles: + - role: os_firewall - role: openshift_loadbalancer diff --git a/playbooks/common/openshift-loadbalancer/service.yml b/playbooks/common/openshift-loadbalancer/service.yml deleted file mode 100644 index d3762c961..000000000 --- a/playbooks/common/openshift-loadbalancer/service.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- name: Populate g_service_nodes host group if needed - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - fail: msg="new_cluster_state is required to be injected in this playbook" - when: new_cluster_state is not defined - - - name: Evaluate g_service_lb - add_host: - name: "{{ item }}" - groups: g_service_lb - with_items: "{{ oo_host_group_exp | default([]) }}" - changed_when: False - -- name: Change state on lb instance(s) - hosts: g_service_lb - connection: ssh - gather_facts: no - tasks: - - service: name=haproxy state="{{ new_cluster_state }}" diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-master/additional_config.yml index c0ea93d2c..7468c78f0 100644 --- a/playbooks/common/openshift-cluster/additional_config.yml +++ b/playbooks/common/openshift-master/additional_config.yml @@ -11,13 +11,13 @@ when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker" - role: openshift_examples registry_url: "{{ openshift.master.registry_url }}" - when: openshift.common.install_examples | bool + when: openshift_install_examples | default(True) - role: openshift_hosted_templates registry_url: "{{ openshift.master.registry_url }}" - role: openshift_manageiq - when: openshift.common.use_manageiq | bool + when: openshift_use_manageiq | default(false) | bool - role: cockpit when: not openshift.common.is_atomic and ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and (osm_use_cockpit | bool or osm_use_cockpit is undefined ) and ( openshift.common.deployment_subtype != 'registry' ) - role: flannel_register - when: openshift.common.use_flannel | bool + when: openshift_use_flannel | default(false) | bool diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index ddc4db8f8..c77d7bb87 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -1,10 +1,31 @@ --- +- name: Disable excluders + hosts: oo_masters_to_config + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: disable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" + - name: Gather and set facts for master hosts hosts: oo_masters_to_config vars: t_oo_option_master_debug_level: "{{ lookup('oo_option', 'openshift_master_debug_level') }}" pre_tasks: + # Per https://bugzilla.redhat.com/show_bug.cgi?id=1469336 + # + # When scaling up a cluster upgraded from OCP <= 3.5, ensure that + # OPENSHIFT_DEFAULT_REGISTRY is present as defined on the existing + # masters, or absent if such is the case. + - name: Detect if this host is a new master in a scale up + set_fact: + g_openshift_master_is_scaleup: "{{ openshift.common.hostname in ( groups['new_masters'] | default([]) ) }}" + + - name: Scaleup Detection + debug: + var: g_openshift_master_is_scaleup + - name: Check for RPM generated config marker file .config_managed stat: path: /etc/origin/.config_managed @@ -69,7 +90,7 @@ ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}" master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}" -- name: Determine if session secrets must be generated +- name: Inspect state of first master config settings hosts: oo_first_master roles: - role: openshift_facts @@ -79,6 +100,60 @@ local_facts: session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(openshift.master.session_auth_secrets | default(None)) }}" session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(openshift.master.session_encryption_secrets | default(None)) }}" + - name: Check for existing configuration + stat: + path: /etc/origin/master/master-config.yaml + register: master_config_stat + + - name: Set clean install fact + set_fact: + l_clean_install: "{{ not master_config_stat.stat.exists | bool }}" + + - name: Determine if etcd3 storage is in use + command: grep -Pzo "storage-backend:\n.*etcd3" /etc/origin/master/master-config.yaml -q + register: etcd3_grep + failed_when: false + changed_when: false + + - name: Set etcd3 fact + set_fact: + l_etcd3_enabled: "{{ etcd3_grep.rc == 0 | bool }}" + + - name: Check if atomic-openshift-master sysconfig exists yet + stat: + path: /etc/sysconfig/atomic-openshift-master + register: l_aom_exists + + - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master parameter if present + command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master + register: l_default_registry_defined + when: l_aom_exists.stat.exists | bool + + - name: Check if atomic-openshift-master-api sysconfig exists yet + stat: + path: /etc/sysconfig/atomic-openshift-master-api + register: l_aom_api_exists + + - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master-api parameter if present + command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master-api + register: l_default_registry_defined_api + when: l_aom_api_exists.stat.exists | bool + + - name: Check if atomic-openshift-master-controllers sysconfig exists yet + stat: + path: /etc/sysconfig/atomic-openshift-master-controllers + register: l_aom_controllers_exists + + - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master-controllers parameter if present + command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master-controllers + register: l_default_registry_defined_controllers + when: l_aom_controllers_exists.stat.exists | bool + + - name: Update facts with OPENSHIFT_DEFAULT_REGISTRY value + set_fact: + l_default_registry_value: "{{ l_default_registry_defined.stdout | default('') }}" + l_default_registry_value_api: "{{ l_default_registry_defined_api.stdout | default('') }}" + l_default_registry_value_controllers: "{{ l_default_registry_defined_controllers.stdout | default('') }}" - name: Generate master session secrets hosts: oo_first_master @@ -109,7 +184,11 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" + openshift_no_proxy_etcd_host_ips: "{{ hostvars | oo_select_keys(groups['oo_etcd_to_config'] | default([])) + | oo_collect('openshift.common.ip') | default([]) | join(',') + }}" roles: + - role: os_firewall - role: openshift_master openshift_ca_host: "{{ groups.oo_first_master.0 }}" openshift_master_etcd_hosts: "{{ hostvars @@ -122,12 +201,26 @@ etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}" etcd_cert_config_dir: "{{ openshift.common.config_base }}/master" etcd_cert_prefix: "master.etcd-" + r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}" + r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}" + openshift_master_is_scaleup_host: "{{ g_openshift_master_is_scaleup | default(false) }}" + openshift_master_default_registry_value: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value }}" + openshift_master_default_registry_value_api: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_api }}" + openshift_master_default_registry_value_controllers: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_controllers }}" - role: nuage_master - when: openshift.common.use_nuage | bool + when: openshift_use_nuage | default(false) | bool - role: calico_master - when: openshift.common.use_calico | bool + when: openshift_use_calico | default(false) | bool post_tasks: - name: Create group for deployment type group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }} changed_when: False + +- name: Re-enable excluder if it was previously enabled + hosts: oo_masters_to_config + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: enable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml index 6fec346c3..4d73b8124 100644 --- a/playbooks/common/openshift-master/restart.yml +++ b/playbooks/common/openshift-master/restart.yml @@ -7,7 +7,7 @@ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" serial: 1 handlers: - - include: roles/openshift_master/handlers/main.yml + - include: ../../../roles/openshift_master/handlers/main.yml static: yes roles: - openshift_facts diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml index 67ba0aa2e..a5dbe0590 100644 --- a/playbooks/common/openshift-master/restart_hosts.yml +++ b/playbooks/common/openshift-master/restart_hosts.yml @@ -37,3 +37,4 @@ state: started delay: 10 port: "{{ openshift.master.api_port }}" + timeout: 600 diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml index 508b5a3ac..4f8b758fd 100644 --- a/playbooks/common/openshift-master/restart_services.yml +++ b/playbooks/common/openshift-master/restart_services.yml @@ -1,9 +1,4 @@ --- -- name: Restart master - service: - name: "{{ openshift.common.service_type }}-master" - state: restarted - when: not openshift_master_ha | bool - name: Restart master API service: name: "{{ openshift.common.service_type }}-master-api" @@ -15,6 +10,7 @@ state: started delay: 10 port: "{{ openshift.master.api_port }}" + timeout: 600 when: openshift_master_ha | bool - name: Restart master controllers service: diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml index bc61ee9bb..17f9ef4bc 100644 --- a/playbooks/common/openshift-master/scaleup.yml +++ b/playbooks/common/openshift-master/scaleup.yml @@ -1,11 +1,4 @@ --- -- include: ../openshift-cluster/evaluate_groups.yml - -- name: Gather facts - hosts: oo_etcd_to_config:oo_masters_to_config:oo_nodes_to_config - roles: - - openshift_facts - - name: Update master count hosts: oo_masters:!oo_masters_to_config serial: 1 @@ -50,38 +43,8 @@ delay: 1 changed_when: false -- name: Configure docker hosts - hosts: oo_masters_to_config:oo_nodes_to_config - vars: - docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}" - docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}" - docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') | oo_split }}" - roles: - - openshift_facts - - openshift_docker - -- name: Disable excluders - hosts: oo_masters_to_config - tags: - - always - gather_facts: no - roles: - - role: openshift_excluder - r_openshift_excluder_action: disable - r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" - - include: ../openshift-master/config.yml - include: ../openshift-loadbalancer/config.yml - include: ../openshift-node/config.yml - -- name: Re-enable excluder if it was previously enabled - hosts: oo_masters_to_config - tags: - - always - gather_facts: no - roles: - - role: openshift_excluder - r_openshift_excluder_action: enable - r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" diff --git a/playbooks/common/openshift-master/service.yml b/playbooks/common/openshift-master/service.yml deleted file mode 100644 index 48a2731aa..000000000 --- a/playbooks/common/openshift-master/service.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- name: Populate g_service_masters host group if needed - hosts: localhost - gather_facts: no - connection: local - become: no - tasks: - - fail: msg="new_cluster_state is required to be injected in this playbook" - when: new_cluster_state is not defined - - - name: Evaluate g_service_masters - add_host: - name: "{{ item }}" - groups: g_service_masters - with_items: "{{ oo_host_group_exp | default([]) }}" - changed_when: False - -- name: Change state on master instance(s) - hosts: g_service_masters - connection: ssh - gather_facts: no - tasks: - - service: name={{ openshift.common.service_type }}-master state="{{ new_cluster_state }}" diff --git a/playbooks/common/openshift-nfs/config.yml b/playbooks/common/openshift-nfs/config.yml index 000e46e80..64ea0d3c4 100644 --- a/playbooks/common/openshift-nfs/config.yml +++ b/playbooks/common/openshift-nfs/config.yml @@ -2,5 +2,5 @@ - name: Configure nfs hosts: oo_nfs_to_config roles: - - role: openshift_facts + - role: os_firewall - role: openshift_storage_nfs diff --git a/playbooks/common/openshift-nfs/service.yml b/playbooks/common/openshift-nfs/service.yml deleted file mode 100644 index b1e35e4b1..000000000 --- a/playbooks/common/openshift-nfs/service.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -- name: Populate g_service_nfs host group if needed - hosts: localhost - gather_facts: no - tasks: - - fail: msg="new_cluster_state is required to be injected in this playbook" - when: new_cluster_state is not defined - - - name: Evaluate g_service_nfs - add_host: - name: "{{ item }}" - groups: g_service_nfs - with_items: "{{ oo_host_group_exp | default([]) }}" - changed_when: False - -- name: Change state on nfs instance(s) - hosts: g_service_nfs - connection: ssh - gather_facts: no - tasks: - - service: name=nfs-server state="{{ new_cluster_state }}" diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index acebabc91..0801c41ff 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -1,24 +1,11 @@ --- -- name: Gather and set facts for node hosts +- name: Disable excluders hosts: oo_nodes_to_config - vars: - t_oo_option_node_debug_level: "{{ lookup('oo_option', 'openshift_node_debug_level') }}" - pre_tasks: - - set_fact: - openshift_node_debug_level: "{{ t_oo_option_node_debug_level }}" - when: openshift_node_debug_level is not defined and t_oo_option_node_debug_level != "" + gather_facts: no roles: - - openshift_facts - tasks: - # Since the master is generating the node certificates before they are - # configured, we need to make sure to set the node properties beforehand if - # we do not want the defaults - - openshift_facts: - role: node - local_facts: - labels: "{{ openshift_node_labels | default(None) }}" - annotations: "{{ openshift_node_annotations | default(None) }}" - schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}" + - role: openshift_excluder + r_openshift_excluder_action: disable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" - name: Evaluate node groups hosts: localhost @@ -32,7 +19,11 @@ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" with_items: "{{ groups.oo_nodes_to_config | default([]) }}" - when: hostvars[item].openshift is defined and hostvars[item].openshift.common is defined and hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config) + when: + - hostvars[item].openshift is defined + - hostvars[item].openshift.common is defined + - hostvars[item].openshift.common.is_containerized | bool + - (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config) changed_when: False - name: Configure containerized nodes @@ -47,9 +38,9 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" + roles: + - role: os_firewall - role: openshift_node openshift_ca_host: "{{ groups.oo_first_master.0 }}" @@ -64,9 +55,8 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" roles: + - role: os_firewall - role: openshift_node openshift_ca_host: "{{ groups.oo_first_master.0 }}" @@ -81,18 +71,27 @@ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" etcd_cert_subdir: "openshift-node-{{ openshift.common.hostname }}" etcd_cert_config_dir: "{{ openshift.common.config_base }}/node" - when: openshift.common.use_flannel | bool + when: openshift_use_flannel | default(false) | bool - role: calico - when: openshift.common.use_calico | bool + when: openshift_use_calico | default(false) | bool - role: nuage_node - when: openshift.common.use_nuage | bool + when: openshift_use_nuage | default(false) | bool - role: contiv contiv_role: netplugin - when: openshift.common.use_contiv | bool + when: openshift_use_contiv | default(false) | bool - role: nickhammond.logrotate - role: openshift_manage_node openshift_master_host: "{{ groups.oo_first_master.0 }}" + when: not openshift_node_bootstrap | default(False) tasks: - name: Create group for deployment type group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }} changed_when: False + +- name: Re-enable excluder if it was previously enabled + hosts: oo_nodes_to_config + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: enable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" diff --git a/playbooks/common/openshift-node/network_manager.yml b/playbooks/common/openshift-node/network_manager.yml index 0014a5dbd..b3a7399dc 100644 --- a/playbooks/common/openshift-node/network_manager.yml +++ b/playbooks/common/openshift-node/network_manager.yml @@ -1,4 +1,6 @@ --- +- include: ../openshift-cluster/evaluate_groups.yml + - name: Install and configure NetworkManager hosts: oo_all_hosts become: yes diff --git a/playbooks/common/openshift-node/restart.yml b/playbooks/common/openshift-node/restart.yml index 01cf948e0..c3beb59b7 100644 --- a/playbooks/common/openshift-node/restart.yml +++ b/playbooks/common/openshift-node/restart.yml @@ -11,6 +11,10 @@ service: name: docker state: restarted + register: l_docker_restart_docker_in_node_result + until: not l_docker_restart_docker_in_node_result | failed + retries: 3 + delay: 30 - name: Update docker facts openshift_facts: @@ -23,7 +27,6 @@ with_items: - etcd_container - openvswitch - - "{{ openshift.common.service_type }}-master" - "{{ openshift.common.service_type }}-master-api" - "{{ openshift.common.service_type }}-master-controllers" - "{{ openshift.common.service_type }}-node" @@ -36,6 +39,7 @@ state: started delay: 10 port: "{{ openshift.master.api_port }}" + timeout: 600 when: inventory_hostname in groups.oo_masters_to_config - name: restart node diff --git a/playbooks/common/openshift-node/scaleup.yml b/playbooks/common/openshift-node/scaleup.yml deleted file mode 100644 index 40da8990d..000000000 --- a/playbooks/common/openshift-node/scaleup.yml +++ /dev/null @@ -1,50 +0,0 @@ ---- -- include: ../openshift-cluster/evaluate_groups.yml - -- name: Gather facts - hosts: oo_etcd_to_config:oo_masters_to_config:oo_nodes_to_config - roles: - - openshift_facts - -- name: Gather and set facts for first master - hosts: oo_first_master - vars: - openshift_master_count: "{{ groups.oo_masters | length }}" - pre_tasks: - - set_fact: - openshift_master_default_subdomain: "{{ lookup('oo_option', 'openshift_master_default_subdomain') | default(None, true) }}" - when: openshift_master_default_subdomain is not defined - roles: - - openshift_master_facts - -- name: Configure docker hosts - hosts: oo_nodes_to_config - vars: - docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}" - docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}" - docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') | oo_split }}" - roles: - - openshift_facts - - openshift_docker - -- name: Disable excluders - hosts: oo_nodes_to_config - tags: - - always - gather_facts: no - roles: - - role: openshift_excluder - r_openshift_excluder_action: disable - r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" - -- include: ../openshift-node/config.yml - -- name: Re-enable excluder if it was previously enabled - hosts: oo_nodes_to_config - tags: - - always - gather_facts: no - roles: - - role: openshift_excluder - r_openshift_excluder_action: enable - r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" diff --git a/playbooks/common/openshift-node/service.yml b/playbooks/common/openshift-node/service.yml deleted file mode 100644 index 130a5416f..000000000 --- a/playbooks/common/openshift-node/service.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- name: Populate g_service_nodes host group if needed - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - fail: msg="new_cluster_state is required to be injected in this playbook" - when: new_cluster_state is not defined - - - name: Evaluate g_service_nodes - add_host: - name: "{{ item }}" - groups: g_service_nodes - with_items: "{{ oo_host_group_exp | default([]) }}" - changed_when: False - -- name: Change state on node instance(s) - hosts: g_service_nodes - connection: ssh - gather_facts: no - tasks: - - name: Change state on node instance(s) - service: - name: "{{ service_type }}-node" - state: "{{ new_cluster_state }}" |