summaryrefslogtreecommitdiffstats
path: root/playbooks/common
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks/common')
-rw-r--r--playbooks/common/openshift-cluster/config.yml3
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml17
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml53
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml13
-rw-r--r--playbooks/common/openshift-master/config.yml25
-rw-r--r--playbooks/common/openshift-master/restart_hosts.yml17
-rw-r--r--playbooks/common/openshift-master/restart_services.yml6
-rw-r--r--playbooks/common/openshift-master/validate_restart.yml (renamed from playbooks/common/openshift-master/restart.yml)13
10 files changed, 83 insertions, 72 deletions
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 0f226f5f9..a95cb68b7 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -38,6 +38,9 @@
- set_fact:
openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}"
when: openshift_docker_log_options is not defined
+ - set_fact:
+ openshift_docker_selinux_enabled: "{{ lookup('oo_option', 'docker_selinux_enabled') }}"
+ when: openshift_docker_selinux_enabled is not defined
- include: ../openshift-etcd/config.yml
tags:
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates.yml b/playbooks/common/openshift-cluster/redeploy-certificates.yml
index 6e3e04a6b..2383836d4 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates.yml
@@ -108,10 +108,6 @@
| oo_select_keys(groups['oo_etcd_to_config'] | default([]))
| oo_collect('openshift.common.hostname')
| default(none, true) }}"
- openshift_master_hostnames: "{{ hostvars
- | oo_select_keys(groups['oo_masters_to_config'] | default([]))
- | oo_collect('openshift.common.all_hostnames')
- | oo_flatten | unique }}"
openshift_certificates_redeploy: true
- role: openshift_etcd_client_certificates
etcd_certificates_redeploy: true
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index 8cac2fb3b..76645ff3f 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -1,5 +1,6 @@
---
-- hosts: localhost
+- name: Create l_oo_all_hosts group
+ hosts: localhost
connection: local
become: no
gather_facts: no
@@ -10,7 +11,8 @@
groups: l_oo_all_hosts
with_items: "{{ g_all_hosts | default([]) }}"
-- hosts: l_oo_all_hosts
+- name: Include g_*_hosts vars for hosts in group l_oo_all_hosts
+ hosts: l_oo_all_hosts
gather_facts: no
tasks:
- include_vars: ../../../byo/openshift-cluster/cluster_hosts.yml
@@ -46,3 +48,14 @@
when: openshift_docker_log_options is not defined
- include: ../initialize_facts.yml
+
+- name: Ensure clean repo cache in the event repos have been changed manually
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - name: Clean package cache
+ command: "{{ ansible_pkg_mgr }} clean all"
+ when: not openshift.common.is_atomic | bool
+ args:
+ warn: no
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index 9632626a4..c83923dae 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -12,10 +12,6 @@
msg: Verify the correct version was found
when: verify_upgrade_version is defined and openshift_version != verify_upgrade_version
- - name: Clean package cache
- command: "{{ ansible_pkg_mgr }} clean all"
- when: not openshift.common.is_atomic | bool
-
- set_fact:
g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
when: not openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 6950b6166..77b37cdc2 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -30,14 +30,6 @@
- name: Upgrade and backup etcd
include: ./etcd/main.yml
-- name: Upgrade master packages
- hosts: oo_masters_to_config
- roles:
- - openshift_facts
- tasks:
- - include: rpm_upgrade.yml component=master
- when: not openshift.common.is_containerized | bool
-
# Create service signer cert when missing. Service signer certificate
# is added to master config in the master config hook for v3_3.
- name: Determine if service signer cert must be created
@@ -59,14 +51,30 @@
roles:
- openshift_master_facts
-- name: Upgrade master config and systemd units
+# The main master upgrade play. Should handle all changes to the system in one pass, with
+# support for optional hooks to be defined.
+- name: Upgrade master
hosts: oo_masters_to_config
+ vars:
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ serial: 1
handlers:
- include: ../../../../roles/openshift_master/handlers/main.yml
static: yes
roles:
- openshift_facts
post_tasks:
+
+ # Run the pre-upgrade hook if defined:
+ - debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}"
+ when: openshift_master_upgrade_pre_hook is defined
+
+ - include: "{{ openshift_master_upgrade_pre_hook }}"
+ when: openshift_master_upgrade_pre_hook is defined
+
+ - include: rpm_upgrade.yml component=master
+ when: not openshift.common.is_containerized | bool
+
- include_vars: ../../../../roles/openshift_master_facts/vars/main.yml
- include: upgrade_scheduler.yml
@@ -104,9 +112,26 @@
state: link
when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists
-- name: Set master update status to complete
- hosts: oo_masters_to_config
- tasks:
+ # Run the upgrade hook prior to restarting services/system if defined:
+ - debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}"
+ when: openshift_master_upgrade_hook is defined
+
+ - include: "{{ openshift_master_upgrade_hook }}"
+ when: openshift_master_upgrade_hook is defined
+
+ - include: ../../openshift-master/restart_hosts.yml
+ when: openshift.common.rolling_restart_mode == 'system'
+
+ - include: ../../openshift-master/restart_services.yml
+ when: openshift.common.rolling_restart_mode == 'services'
+
+ # Run the post-upgrade hook if defined:
+ - debug: msg="Running master post-upgrade hook {{ openshift_master_upgrade_post_hook }}"
+ when: openshift_master_upgrade_post_hook is defined
+
+ - include: "{{ openshift_master_upgrade_post_hook }}"
+ when: openshift_master_upgrade_post_hook is defined
+
- set_fact:
master_update_complete: True
@@ -128,10 +153,6 @@
msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
when: master_update_failed | length > 0
-# We are now ready to restart master services (or entire system
-# depending on openshift_rolling_restart_mode):
-- include: ../../openshift-master/restart.yml
-
###############################################################################
# Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints
###############################################################################
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 86b344d7a..2bb460815 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -87,6 +87,19 @@
- name: Restart rpm node service
service: name="{{ openshift.common.service_type }}-node" state=restarted
when: inventory_hostname in groups.oo_nodes_to_upgrade and not openshift.common.is_containerized | bool
+
+ - name: Wait for node to be ready
+ command: >
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} get node {{ openshift.common.hostname | lower }} --no-headers
+ register: node_output
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: inventory_hostname in groups.oo_nodes_to_upgrade
+ until: "{{ node_output.stdout.split()[1].startswith('Ready')}}"
+ # Give the node two minutes to come back online. Note that we pre-pull images now
+ # so containerized services should restart quickly as well.
+ retries: 24
+ delay: 5
+
- name: Set node schedulability
command: >
{{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 39d64a126..7a334e771 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -75,17 +75,6 @@
ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
-- name: Create temp directory for syncing certs
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Create local temp directory for syncing certs
- local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: g_master_mktemp
- changed_when: False
-
- name: Determine if session secrets must be generated
hosts: oo_first_master
roles:
@@ -117,7 +106,6 @@
hosts: oo_masters_to_config
any_errors_fatal: true
vars:
- sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
openshift_master_ha: "{{ openshift.master.ha }}"
openshift_master_count: "{{ openshift.master.master_count }}"
openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}"
@@ -134,10 +122,6 @@
| oo_select_keys(groups['oo_etcd_to_config'] | default([]))
| oo_collect('openshift.common.hostname')
| default(none, true) }}"
- openshift_master_hostnames: "{{ hostvars
- | oo_select_keys(groups['oo_masters_to_config'] | default([]))
- | oo_collect('openshift.common.all_hostnames')
- | oo_flatten | unique }}"
openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
@@ -148,12 +132,3 @@
- name: Create group for deployment type
group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
changed_when: False
-
-- name: Delete temporary directory on localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - file: name={{ g_master_mktemp.stdout }} state=absent
- changed_when: False
diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml
index ffa23d26a..832301e3d 100644
--- a/playbooks/common/openshift-master/restart_hosts.yml
+++ b/playbooks/common/openshift-master/restart_hosts.yml
@@ -7,12 +7,19 @@
ignore_errors: true
become: yes
-# Ensure the api_port is available.
-- name: Wait for master API to come back online
- become: no
+- name: Wait for master to restart
local_action:
module: wait_for
- host="{{ openshift.common.hostname }}"
+ host="{{ inventory_hostname }}"
state=started
delay=10
- port="{{ openshift.master.api_port }}"
+ become: no
+
+# Now that ssh is back up we can wait for API on the remote system,
+# avoiding some potential connection issues from local system:
+- name: Wait for master API to come back online
+ wait_for:
+ host: "{{ openshift.common.hostname }}"
+ state: started
+ delay: 10
+ port: "{{ openshift.master.api_port }}"
diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml
index b40c32669..508b5a3ac 100644
--- a/playbooks/common/openshift-master/restart_services.yml
+++ b/playbooks/common/openshift-master/restart_services.yml
@@ -8,14 +8,14 @@
service:
name: "{{ openshift.common.service_type }}-master-api"
state: restarted
- when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker'
+ when: openshift_master_ha | bool
- name: Wait for master API to come back online
wait_for:
host: "{{ openshift.common.hostname }}"
state: started
delay: 10
port: "{{ openshift.master.api_port }}"
- when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker'
+ when: openshift_master_ha | bool
- name: Restart master controllers
service:
name: "{{ openshift.common.service_type }}-master-controllers"
@@ -23,4 +23,4 @@
# Ignore errrors since it is possible that type != simple for
# pre-3.1.1 installations.
ignore_errors: true
- when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker'
+ when: openshift_master_ha | bool
diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/validate_restart.yml
index 7b340887a..5dbb21502 100644
--- a/playbooks/common/openshift-master/restart.yml
+++ b/playbooks/common/openshift-master/validate_restart.yml
@@ -1,6 +1,4 @@
---
-- include: ../openshift-cluster/evaluate_groups.yml
-
- name: Validate configuration for rolling restart
hosts: oo_masters_to_config
roles:
@@ -65,14 +63,3 @@
- set_fact:
current_host: "{{ exists.stat.exists }}"
when: openshift.common.rolling_restart_mode == 'system'
-
-- name: Restart masters
- hosts: oo_masters_to_config
- vars:
- openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
- serial: 1
- tasks:
- - include: restart_hosts.yml
- when: openshift.common.rolling_restart_mode == 'system'
- - include: restart_services.yml
- when: openshift.common.rolling_restart_mode == 'services'