summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml8
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml13
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml6
-rw-r--r--roles/openshift_node/README.md2
7 files changed, 26 insertions, 15 deletions
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 0d451cf77..1e0a6d4e7 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -18,20 +18,20 @@
# If a node fails, halt everything, the admin will need to clean up and we
# don't want to carry on, potentially taking out every node. The playbook can safely be re-run
# and will not take any action on a node already running the requested docker version.
-- name: Evacuate and upgrade nodes
+- name: Drain and upgrade nodes
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
serial: 1
any_errors_fatal: true
tasks:
- - name: Prepare for Node evacuation
+ - name: Prepare for Node draining
command: >
{{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=false
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
- - name: Evacuate Node for Kubelet upgrade
+ - name: Drain Node for Kubelet upgrade
command: >
- {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --evacuate --force
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --drain --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
index 496b00697..d6115e7a5 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
@@ -89,6 +89,8 @@
- include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_4/master_config_upgrade.yml"
- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates.yml b/playbooks/common/openshift-cluster/redeploy-certificates.yml
index 5f008a045..5fc81bf3a 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates.yml
@@ -204,7 +204,7 @@
cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
changed_when: False
-- name: Serially evacuate all nodes to trigger redeployments
+- name: Serially drain all nodes to trigger redeployments
hosts: oo_nodes_to_config
serial: 1
any_errors_fatal: true
@@ -222,7 +222,7 @@
was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}"
when: openshift_certificates_redeploy_ca | default(false) | bool
- - name: Prepare for node evacuation
+ - name: Prepare for node draining
command: >
{{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
manage-node {{ openshift.node.nodename }}
@@ -230,11 +230,11 @@
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
- - name: Evacuate node
+ - name: Drain node
command: >
{{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
manage-node {{ openshift.node.nodename }}
- --evacuate --force
+ --drain --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
index 5ff9521ec..0f8d94737 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
@@ -75,7 +75,7 @@
hosts: etcd_hosts_to_upgrade
serial: 1
vars:
- upgrade_version: 3.0.14
+ upgrade_version: 3.0.15
tasks:
- include: containerized_tasks.yml
when: etcd_container_version.stdout | default('99') | version_compare('3.0','<') and openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 474e6311e..6950b6166 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -51,6 +51,14 @@
- include: create_service_signer_cert.yml
+# Set openshift_master_facts separately. In order to reconcile
+# admission_config's, we currently must run openshift_master_facts and
+# then run openshift_facts.
+- name: Set OpenShift master facts
+ hosts: oo_masters_to_config
+ roles:
+ - openshift_master_facts
+
- name: Upgrade master config and systemd units
hosts: oo_masters_to_config
handlers:
@@ -58,8 +66,9 @@
static: yes
roles:
- openshift_facts
- - openshift_master_facts
- tasks:
+ post_tasks:
+ - include_vars: ../../../../roles/openshift_master_facts/vars/main.yml
+
- include: upgrade_scheduler.yml
- include: "{{ master_config_hook }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index cefc7d12b..68b111df4 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -1,5 +1,5 @@
---
-- name: Evacuate and upgrade nodes
+- name: Drain and upgrade nodes
hosts: oo_nodes_to_upgrade
# This var must be set with -e on invocation, as it is not a per-host inventory var
# and is evaluated early. Values such as "20%" can also be used.
@@ -39,9 +39,9 @@
retries: 3
delay: 1
- - name: Evacuate Node for Kubelet upgrade
+ - name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --evacuate --force
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --drain --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade
diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md
index d1920c485..616f44c1d 100644
--- a/roles/openshift_node/README.md
+++ b/roles/openshift_node/README.md
@@ -43,7 +43,7 @@ Currently we support re-labeling nodes but we don't re-schedule running pods nor
```
oadm manage-node --schedulable=false ${NODE}
-oadm manage-node --evacuate ${NODE}
+oadm manage-node --drain ${NODE}
oadm manage-node --schedulable=true ${NODE}
````