diff options
author | Russell Teague <rteague@redhat.com> | 2017-02-08 09:26:51 -0500 |
---|---|---|
committer | Russell Teague <rteague@redhat.com> | 2017-02-13 14:15:49 -0500 |
commit | 8fd0bcfaa48b8fb62585dc96aa87741c58afe5cd (patch) | |
tree | e2aa9a1731f12b4b04bde68fc0be04094a7430eb /playbooks | |
parent | 671062646c9a409470be374408f104352af825f0 (diff) | |
download | openshift-8fd0bcfaa48b8fb62585dc96aa87741c58afe5cd.tar.gz openshift-8fd0bcfaa48b8fb62585dc96aa87741c58afe5cd.tar.bz2 openshift-8fd0bcfaa48b8fb62585dc96aa87741c58afe5cd.tar.xz openshift-8fd0bcfaa48b8fb62585dc96aa87741c58afe5cd.zip |
Modify playbooks to use oadm_manage_node module
Diffstat (limited to 'playbooks')
3 files changed, 64 insertions, 57 deletions
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml index 13e1da961..5d3280328 100644 --- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -22,12 +22,24 @@ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config serial: 1 any_errors_fatal: true + + roles: + - lib_openshift + tasks: - - name: Prepare for Node draining - command: > - {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=false + - name: Mark node unschedulable + oadm_manage_node: + node: "{{ openshift.node.nodename | lower }}" + schedulable: False delegate_to: "{{ groups.oo_first_master.0 }}" - when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade + retries: 10 + delay: 5 + register: node_unschedulable + until: node_unschedulable|succeeded + when: + - l_docker_upgrade is defined + - l_docker_upgrade | bool + - inventory_hostname in groups.oo_nodes_to_upgrade - name: Drain Node for Kubelet upgrade command: > @@ -39,7 +51,12 @@ when: l_docker_upgrade is defined and l_docker_upgrade | bool - name: Set node schedulability - command: > - {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=true + oadm_manage_node: + node: "{{ openshift.node.nodename | lower }}" + schedulable: True delegate_to: "{{ groups.oo_first_master.0 }}" - when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade and openshift.node.schedulable | bool + retries: 10 + delay: 5 + register: node_schedulable + until: node_schedulable|succeeded + when: node_unschedulable|changed diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index db2c27919..a4aefcdac 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -238,29 +238,22 @@ any_errors_fatal: true pre_tasks: + - name: Load lib_openshift modules + include_role: + name: lib_openshift + # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node # or docker actually needs an upgrade before proceeding. Perhaps best to save this until # we merge upgrade functionality into the base roles and a normal config.yml playbook run. - - name: Determine if node is currently scheduleable - command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} get node {{ openshift.node.nodename | lower }} -o json - register: node_output - delegate_to: "{{ groups.oo_first_master.0 }}" - changed_when: false - - - set_fact: - was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}" - - name: Mark node unschedulable - command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false + oadm_manage_node: + node: "{{ openshift.node.nodename | lower }}" + schedulable: False delegate_to: "{{ groups.oo_first_master.0 }}" - # NOTE: There is a transient "object has been modified" error here, allow a couple - # retries for a more reliable upgrade. - register: node_unsched - until: node_unsched.rc == 0 - retries: 3 - delay: 1 + retries: 10 + delay: 5 + register: node_unschedulable + until: node_unschedulable|succeeded - name: Drain Node for Kubelet upgrade command: > @@ -268,17 +261,19 @@ delegate_to: "{{ groups.oo_first_master.0 }}" roles: + - lib_openshift - openshift_facts - docker - openshift_node_upgrade post_tasks: - name: Set node schedulability - command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true + oadm_manage_node: + node: "{{ openshift.node.nodename | lower }}" + schedulable: True delegate_to: "{{ groups.oo_first_master.0 }}" - when: was_schedulable | bool - register: node_sched - until: node_sched.rc == 0 - retries: 3 - delay: 1 + retries: 10 + delay: 5 + register: node_schedulable + until: node_schedulable|succeeded + when: node_unschedulable|changed diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index e45b635f7..e3a98fd9b 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -7,29 +7,22 @@ any_errors_fatal: true pre_tasks: + - name: Load lib_openshift modules + include_role: + name: lib_openshift + # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node # or docker actually needs an upgrade before proceeding. Perhaps best to save this until # we merge upgrade functionality into the base roles and a normal config.yml playbook run. - - name: Determine if node is currently scheduleable - command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} get node {{ openshift.node.nodename | lower }} -o json - register: node_output - delegate_to: "{{ groups.oo_first_master.0 }}" - changed_when: false - - - set_fact: - was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}" - - name: Mark node unschedulable - command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false + oadm_manage_node: + node: "{{ openshift.node.nodename | lower }}" + schedulable: False delegate_to: "{{ groups.oo_first_master.0 }}" - # NOTE: There is a transient "object has been modified" error here, allow a couple - # retries for a more reliable upgrade. - register: node_unsched - until: node_unsched.rc == 0 - retries: 3 - delay: 1 + retries: 10 + delay: 5 + register: node_unschedulable + until: node_unschedulable|succeeded - name: Drain Node for Kubelet upgrade command: > @@ -37,20 +30,22 @@ delegate_to: "{{ groups.oo_first_master.0 }}" roles: + - lib_openshift - openshift_facts - docker - openshift_node_upgrade post_tasks: - name: Set node schedulability - command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true + oadm_manage_node: + node: "{{ openshift.node.nodename | lower }}" + schedulable: True delegate_to: "{{ groups.oo_first_master.0 }}" - when: was_schedulable | bool - register: node_sched - until: node_sched.rc == 0 - retries: 3 - delay: 1 + retries: 10 + delay: 5 + register: node_schedulable + until: node_schedulable|succeeded + when: node_unschedulable|changed - include: ../reset_excluder.yml tags: |