summaryrefslogtreecommitdiffstats
path: root/playbooks/common/openshift-cluster
diff options
context:
space:
mode:
authorDevan Goodwin <dgoodwin@redhat.com>2016-09-22 11:48:46 -0300
committerDevan Goodwin <dgoodwin@redhat.com>2016-09-29 10:26:01 -0300
commit910e23336da02b8d1eec75276ce77ec269e2216c (patch)
treef692e1325d9641dd679ed50aa61ea493310b8f94 /playbooks/common/openshift-cluster
parent0e7e7f6b45ace8a3a9516c8800e5cf84d7aa14fa (diff)
downloadopenshift-910e23336da02b8d1eec75276ce77ec269e2216c.tar.gz
openshift-910e23336da02b8d1eec75276ce77ec269e2216c.tar.bz2
openshift-910e23336da02b8d1eec75276ce77ec269e2216c.tar.xz
openshift-910e23336da02b8d1eec75276ce77ec269e2216c.zip
Skip the docker role in early upgrade stages.
This improves the situation further and prevents configuration changes from accidentally triggering docker restarts, before we've evacuated nodes. Now in two places, we skip the role entirely, instead of previous implementation which only skipped upgrading the installed version. (which did not catch config issues)
Diffstat (limited to 'playbooks/common/openshift-cluster')
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml5
2 files changed, 6 insertions, 5 deletions
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 75ae92716..22d56f6d3 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -111,9 +111,9 @@
origin_reconcile_bindings: "{{ deployment_type == 'origin' and openshift_version | version_compare('1.0.6', '>') }}"
ent_reconcile_bindings: true
openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- # Similar to pre.yml, we don't want to upgrade docker during the openshift_cli role,
- # it will be updated when we perform node upgrade.
- docker_protect_installed_version: True
+ # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe
+ # restart.
+ skip_docker_role: True
tasks:
- name: Verifying the correct commandline tools are available
shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}}
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index a54349220..917c95e29 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -7,10 +7,11 @@
any_errors_fatal: true
roles:
- openshift_facts
+ - docker
handlers:
- include: ../../../../roles/openshift_node/handlers/main.yml
static: yes
- tasks:
+ pre_tasks:
# TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
# or docker actually needs an upgrade before proceeding. Perhaps best to save this until
# we merge upgrade functionality into the base roles and a normal config.yml playbook run.
@@ -37,7 +38,7 @@
{{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --evacuate --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade
-
+ tasks:
- include: docker/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool