From 4bc761ecebf2617780327b2681b057c97486ae80 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Mon, 11 Jan 2016 14:49:35 -0500 Subject: Set portal net in master playbook --- playbooks/common/openshift-master/config.yml | 1 + 1 file changed, 1 insertion(+) (limited to 'playbooks/common') diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 677c274c4..4ecdf2a0c 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -51,6 +51,7 @@ console_url: "{{ openshift_master_console_url | default(None) }}" console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}" public_console_url: "{{ openshift_master_public_console_url | default(None) }}" + portal_net: "{{ openshift_master_portal_net | default(None) }}" - name: Check status of external etcd certificatees stat: path: "{{ openshift.common.config_base }}/master/{{ item }}" -- cgit v1.2.3 From dea9abfe22864cf10d85d85370b1633ca18060b6 Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Mon, 14 Dec 2015 15:29:24 -0400 Subject: Implement simple master rolling restarts. Blocks running ansible on a host that will be restarted. Can restart just services, or optionally the full system. --- playbooks/common/openshift-cluster/restart.yml | 78 ++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 playbooks/common/openshift-cluster/restart.yml (limited to 'playbooks/common') diff --git a/playbooks/common/openshift-cluster/restart.yml b/playbooks/common/openshift-cluster/restart.yml new file mode 100644 index 000000000..4117f7297 --- /dev/null +++ b/playbooks/common/openshift-cluster/restart.yml @@ -0,0 +1,78 @@ +--- +- include: evaluate_groups.yml +# TODO: verify this is an HA environment +# TODO: fork for pacemaker vs haproxy (based on?) + +- name: Validate configuration for rolling restart + hosts: oo_masters_to_config + tasks: + - set_fact: + openshift_rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}" + - fail: + msg: "openshift_rolling_restart_mode must be set to either 'services' or 'system'" + when: openshift_rolling_restart_mode is defined and openshift_rolling_restart_mode not in ["services", "system"] + +# Creating a temp file on localhost, we then check each system that will +# be rebooted to see if that file exists, if so we know we're running +# ansible on a machine that needs a reboot, and we need to error out. +- name: Create temp file on localhost + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - local_action: command mktemp + register: mktemp + changed_when: False + +- name: Check if temp file exists on any masters + hosts: oo_masters_to_config + tasks: + - stat: path="{{ hostvars.localhost.mktemp.stdout }}" + register: exists + +- name: Cleanup temp file on localhost + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - file: path="{{ hostvars.localhost.mktemp.stdout }}" state=absent + +- name: Fail if restarting the system where ansible is running + hosts: oo_masters_to_config + any_errors_fatal: true + tasks: + - fail: msg="Cannot run playbook on a host that will be restarted." + when: exists.stat.exists + +- name: Restart Masters + hosts: oo_masters_to_config + serial: 1 + roles: + - openshift_facts + tasks: + - name: Restart master system + # https://github.com/ansible/ansible/issues/10616 + shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart" + async: 1 + poll: 0 + ignore_errors: true + become: yes + when: openshift_rolling_restart_mode == 'system' + - name: Restart master services + service: + name: "{{ openshift.common.service_type }}-master-api" + state: restarted + # NOTE: no need to check openshift_master_ha here, we know it must be, + # thus the api service is the one we restart. + when: openshift_rolling_restart_mode == 'services' + + - name: Wait for master API to come back online + become: no + local_action: + module: wait_for + host="{{ inventory_hostname }}" + state=started + delay=10 + port=8443 # TODO: should this be made a master host variable? -- cgit v1.2.3 From f9aaa8ac13adf841823f35be594641bdc2ebecac Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Tue, 5 Jan 2016 11:39:22 -0500 Subject: Update rolling restart playbook for pacemaker support. Replace fail with a warn and prompt if running ansible from a host that will be rebooted. Re-organize playbooks. --- playbooks/common/openshift-cluster/restart.yml | 78 ------------- playbooks/common/openshift-master/restart.yml | 128 +++++++++++++++++++++ .../common/openshift-master/restart_hosts.yml | 28 +++++ .../openshift-master/restart_hosts_pacemaker.yml | 25 ++++ .../common/openshift-master/restart_services.yml | 27 +++++ .../restart_services_pacemaker.yml | 10 ++ 6 files changed, 218 insertions(+), 78 deletions(-) delete mode 100644 playbooks/common/openshift-cluster/restart.yml create mode 100644 playbooks/common/openshift-master/restart.yml create mode 100644 playbooks/common/openshift-master/restart_hosts.yml create mode 100644 playbooks/common/openshift-master/restart_hosts_pacemaker.yml create mode 100644 playbooks/common/openshift-master/restart_services.yml create mode 100644 playbooks/common/openshift-master/restart_services_pacemaker.yml (limited to 'playbooks/common') diff --git a/playbooks/common/openshift-cluster/restart.yml b/playbooks/common/openshift-cluster/restart.yml deleted file mode 100644 index 4117f7297..000000000 --- a/playbooks/common/openshift-cluster/restart.yml +++ /dev/null @@ -1,78 +0,0 @@ ---- -- include: evaluate_groups.yml -# TODO: verify this is an HA environment -# TODO: fork for pacemaker vs haproxy (based on?) - -- name: Validate configuration for rolling restart - hosts: oo_masters_to_config - tasks: - - set_fact: - openshift_rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}" - - fail: - msg: "openshift_rolling_restart_mode must be set to either 'services' or 'system'" - when: openshift_rolling_restart_mode is defined and openshift_rolling_restart_mode not in ["services", "system"] - -# Creating a temp file on localhost, we then check each system that will -# be rebooted to see if that file exists, if so we know we're running -# ansible on a machine that needs a reboot, and we need to error out. -- name: Create temp file on localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - local_action: command mktemp - register: mktemp - changed_when: False - -- name: Check if temp file exists on any masters - hosts: oo_masters_to_config - tasks: - - stat: path="{{ hostvars.localhost.mktemp.stdout }}" - register: exists - -- name: Cleanup temp file on localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - file: path="{{ hostvars.localhost.mktemp.stdout }}" state=absent - -- name: Fail if restarting the system where ansible is running - hosts: oo_masters_to_config - any_errors_fatal: true - tasks: - - fail: msg="Cannot run playbook on a host that will be restarted." - when: exists.stat.exists - -- name: Restart Masters - hosts: oo_masters_to_config - serial: 1 - roles: - - openshift_facts - tasks: - - name: Restart master system - # https://github.com/ansible/ansible/issues/10616 - shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart" - async: 1 - poll: 0 - ignore_errors: true - become: yes - when: openshift_rolling_restart_mode == 'system' - - name: Restart master services - service: - name: "{{ openshift.common.service_type }}-master-api" - state: restarted - # NOTE: no need to check openshift_master_ha here, we know it must be, - # thus the api service is the one we restart. - when: openshift_rolling_restart_mode == 'services' - - - name: Wait for master API to come back online - become: no - local_action: - module: wait_for - host="{{ inventory_hostname }}" - state=started - delay=10 - port=8443 # TODO: should this be made a master host variable? diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml new file mode 100644 index 000000000..7603f0d61 --- /dev/null +++ b/playbooks/common/openshift-master/restart.yml @@ -0,0 +1,128 @@ +--- +- include: ../openshift-cluster/evaluate_groups.yml + +- name: Validate configuration for rolling restart + hosts: oo_masters_to_config + roles: + - openshift_facts + tasks: + - fail: + msg: "openshift_rolling_restart_mode must be set to either 'services' or 'system'" + when: openshift_rolling_restart_mode is defined and openshift_rolling_restart_mode not in ["services", "system"] + - openshift_facts: + role: "{{ item.role }}" + local_facts: "{{ item.local_facts }}" + with_items: + - role: common + local_facts: + rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}" + - role: master + local_facts: + cluster_method: "{{ openshift_master_cluster_method | default(None) }}" + +# Creating a temp file on localhost, we then check each system that will +# be rebooted to see if that file exists, if so we know we're running +# ansible on a machine that needs a reboot, and we need to error out. +- name: Create temp file on localhost + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - local_action: command mktemp + register: mktemp + changed_when: false + +- name: Check if temp file exists on any masters + hosts: oo_masters_to_config + tasks: + - stat: path="{{ hostvars.localhost.mktemp.stdout }}" + register: exists + changed_when: false + +- name: Cleanup temp file on localhost + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - file: path="{{ hostvars.localhost.mktemp.stdout }}" state=absent + changed_when: false + +- name: Warn if restarting the system where ansible is running + hosts: oo_masters_to_config + tasks: + - pause: + prompt: > + Warning: Running playbook from a host that will be restarted! + Press CTRL+C and A to abort playbook execution. You may + continue by pressing ENTER but the playbook will stop + executing once this system restarts and services must be + manually verified. + when: exists.stat.exists and openshift.common.rolling_restart_mode == 'system' + - set_fact: + current_host: "{{ exists.stat.exists }}" + when: openshift.common.rolling_restart_mode == 'system' + +- name: Determine which masters are currently active + hosts: oo_masters_to_config + tasks: + - name: Check master service status + command: > + systemctl is-active {{ openshift.common.service_type }}-master + register: active_check_output + when: openshift.master.cluster_method == 'pacemaker' + failed_when: active_check_output.stdout not in ['active', 'inactive'] + - set_fact: + is_active: "{{ active_check_output.stdout == 'active' }}" + when: openshift.master.cluster_method == 'pacemaker' + +- name: Evaluate master groups + hosts: localhost + become: no + tasks: + - name: Evaluate oo_active_masters + add_host: + name: "{{ item }}" + groups: oo_active_masters + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + with_items: "{{ groups.oo_masters_to_config | default([]) }}" + when: (hostvars[item]['is_active'] | default(false)) | bool + - name: Evaluate oo_current_masters + add_host: + name: "{{ item }}" + groups: oo_current_masters + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + with_items: "{{ groups.oo_masters_to_config | default([]) }}" + when: (hostvars[item]['current_host'] | default(false)) | bool + +- name: Restart masters + hosts: oo_masters_to_config:!oo_active_masters:!oo_current_masters + vars: + openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" + serial: 1 + tasks: + - include: restart_hosts.yml + when: openshift.common.rolling_restart_mode == 'system' + - include: restart_services.yml + when: openshift.common.rolling_restart_mode == 'services' + +- name: Restart active masters + hosts: oo_active_masters + serial: 1 + tasks: + - include: restart_hosts_pacemaker.yml + when: openshift.common.rolling_restart_mode == 'system' + - include: restart_services_pacemaker.yml + when: openshift.common.rolling_restart_mode == 'services' + +- name: Restart current masters + hosts: oo_current_masters + serial: 1 + tasks: + - include: restart_hosts.yml + when: openshift.common.rolling_restart_mode == 'system' + - include: restart_services.yml + when: openshift.common.rolling_restart_mode == 'services' diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml new file mode 100644 index 000000000..598e1ad63 --- /dev/null +++ b/playbooks/common/openshift-master/restart_hosts.yml @@ -0,0 +1,28 @@ +- name: Restart master system + # https://github.com/ansible/ansible/issues/10616 + shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart" + async: 1 + poll: 0 + ignore_errors: true + become: yes +# When cluster_method != pacemaker we can ensure the api_port is +# available. +- name: Wait for master API to come back online + become: no + local_action: + module: wait_for + host="{{ inventory_hostname }}" + state=started + delay=10 + port="{{ openshift.master.api_port }}" + when: openshift.master.cluster_method != 'pacemaker' +# When cluster_method is pacemaker we can only ensure that the host +# restarted successfully. +- name: Wait for master to start + become: no + local_action: + module: wait_for + host="{{ inventory_hostname }}" + state=started + delay=10 + when: openshift.master.cluster_method == 'pacemaker' diff --git a/playbooks/common/openshift-master/restart_hosts_pacemaker.yml b/playbooks/common/openshift-master/restart_hosts_pacemaker.yml new file mode 100644 index 000000000..c9219e8de --- /dev/null +++ b/playbooks/common/openshift-master/restart_hosts_pacemaker.yml @@ -0,0 +1,25 @@ +- name: Fail over master resource + command: > + pcs resource move master {{ hostvars | oo_select_keys(groups['oo_masters_to_config']) | oo_collect('openshift.common.hostname', {'is_active': 'False'}) | list | first }} +- name: Wait for master API to come back online + become: no + local_action: + module: wait_for + host="{{ openshift.master.cluster_hostname }}" + state=started + delay=10 + port="{{ openshift.master.api_port }}" +- name: Restart master system + # https://github.com/ansible/ansible/issues/10616 + shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart" + async: 1 + poll: 0 + ignore_errors: true + become: yes +- name: Wait for master to start + become: no + local_action: + module: wait_for + host="{{ inventory_hostname }}" + state=started + delay=10 diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml new file mode 100644 index 000000000..5e539cd65 --- /dev/null +++ b/playbooks/common/openshift-master/restart_services.yml @@ -0,0 +1,27 @@ +- name: Restart master + service: + name: "{{ openshift.common.service_type }}-master" + state: restarted + when: not openshift_master_ha | bool +- name: Restart master API + service: + name: "{{ openshift.common.service_type }}-master-api" + state: restarted + when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker' +- name: Wait for master API to come back online + become: no + local_action: + module: wait_for + host="{{ inventory_hostname }}" + state=started + delay=10 + port="{{ openshift.master.api_port }}" + when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker' +- name: Restart master controllers + service: + name: "{{ openshift.common.service_type }}-master-controllers" + state: restarted + # Ignore errrors since it is possible that type != simple for + # pre-3.1.1 installations. + ignore_errors: true + when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker' diff --git a/playbooks/common/openshift-master/restart_services_pacemaker.yml b/playbooks/common/openshift-master/restart_services_pacemaker.yml new file mode 100644 index 000000000..e738f3fb6 --- /dev/null +++ b/playbooks/common/openshift-master/restart_services_pacemaker.yml @@ -0,0 +1,10 @@ +- name: Restart master services + command: pcs resource restart master +- name: Wait for master API to come back online + become: no + local_action: + module: wait_for + host="{{ openshift.master.cluster_hostname }}" + state=started + delay=10 + port="{{ openshift.master.api_port }}" -- cgit v1.2.3 From 97be5890e2a34036a22d2d1e2586c83009ae6064 Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Tue, 12 Jan 2016 16:39:06 -0500 Subject: Validate pacemaker cluster members. --- playbooks/common/openshift-master/restart.yml | 13 +++++++++++++ playbooks/common/openshift-master/restart_hosts.yml | 15 +++++++++++++-- 2 files changed, 26 insertions(+), 2 deletions(-) (limited to 'playbooks/common') diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml index 7603f0d61..fa13a64cb 100644 --- a/playbooks/common/openshift-master/restart.yml +++ b/playbooks/common/openshift-master/restart.yml @@ -73,6 +73,7 @@ register: active_check_output when: openshift.master.cluster_method == 'pacemaker' failed_when: active_check_output.stdout not in ['active', 'inactive'] + changed_when: false - set_fact: is_active: "{{ active_check_output.stdout == 'active' }}" when: openshift.master.cluster_method == 'pacemaker' @@ -98,6 +99,18 @@ with_items: "{{ groups.oo_masters_to_config | default([]) }}" when: (hostvars[item]['current_host'] | default(false)) | bool +- name: Validate pacemaker cluster + hosts: oo_active_masters + tasks: + - name: Retrieve pcs status + command: pcs status + register: pcs_status_output + changed_when: false + - fail: + msg: > + Pacemaker cluster validation failed. One or more nodes are not online. + when: not (pcs_status_output.stdout | validate_pcs_cluster(groups.oo_masters_to_config)) | bool + - name: Restart masters hosts: oo_masters_to_config:!oo_active_masters:!oo_current_masters vars: diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml index 598e1ad63..ff206f5a2 100644 --- a/playbooks/common/openshift-master/restart_hosts.yml +++ b/playbooks/common/openshift-master/restart_hosts.yml @@ -16,8 +16,6 @@ delay=10 port="{{ openshift.master.api_port }}" when: openshift.master.cluster_method != 'pacemaker' -# When cluster_method is pacemaker we can only ensure that the host -# restarted successfully. - name: Wait for master to start become: no local_action: @@ -25,4 +23,17 @@ host="{{ inventory_hostname }}" state=started delay=10 + port=22 when: openshift.master.cluster_method == 'pacemaker' +- name: Wait for master to become available + command: pcs status + register: pcs_status_output + until: pcs_status_output.stdout | validate_pcs_cluster([inventory_hostname]) | bool + retries: 15 + delay: 2 + changed_when: false + when: openshift.master.cluster_method == 'pacemaker' +- fail: + msg: > + Pacemaker cluster validation failed {{ inventory hostname }} is not online. + when: openshift.master.cluster_method == 'pacemaker' and not (pcs_status_output.stdout | validate_pcs_cluster([inventory_hostname])) | bool -- cgit v1.2.3 From 25e213f79ba5e25bf51d584971064e26d3537b49 Mon Sep 17 00:00:00 2001 From: Scott Dodson Date: Thu, 7 Jan 2016 21:59:46 -0500 Subject: Add a Verify API Server handler that waits for the API server to become available --- playbooks/common/openshift-node/config.yml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'playbooks/common') diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 483a7768c..fbaf64300 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -215,6 +215,15 @@ | oo_collect('openshift.common.hostname') }}" openshift_node_vars: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) }}" pre_tasks: - + # Necessary because when you're on a node that's also a master the master will be + # restarted after the node restarts docker and it will take up to 60 seconds for + # systemd to start the master again + - name: Wait for master to become available before proceeding + wait_for: + host: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" + port: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_port }}" + state: started + timeout: 180 + when: openshift.common.is_containerized | bool roles: - openshift_manage_node -- cgit v1.2.3 From 609469eb8d25baeee30cda96377c9b3fda6e499d Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Tue, 12 Jan 2016 17:18:18 -0500 Subject: Update api verification. --- playbooks/common/openshift-node/config.yml | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) (limited to 'playbooks/common') diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index fbaf64300..336cbed5e 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -218,12 +218,20 @@ # Necessary because when you're on a node that's also a master the master will be # restarted after the node restarts docker and it will take up to 60 seconds for # systemd to start the master again - - name: Wait for master to become available before proceeding - wait_for: - host: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" - port: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_port }}" - state: started - timeout: 180 + - name: Wait for master API to become available before proceeding + # Using curl here since the uri module requires python-httplib2 and + # wait_for port doesn't provide health information. + command: > + curl -k --head --silent {{ openshift.master.api_url }} + register: api_available_output + until: api_available_output.stdout.find("200 OK") != -1 + retries: 120 + delay: 1 + changed_when: false when: openshift.common.is_containerized | bool + - fail: + msg: > + Unable to contact master API at {{ openshift.master.api_url }} + when: openshift.common.is_containerized | bool and api_available_output.stdout.find("200 OK") == -1 roles: - openshift_manage_node -- cgit v1.2.3 From 2e3e0ebe0d98f5374fbfb3a95145a9665d57fe69 Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Wed, 13 Jan 2016 10:16:43 -0500 Subject: Add wait in between api and controllers start for native ha. --- playbooks/common/openshift-master/config.yml | 1 + 1 file changed, 1 insertion(+) (limited to 'playbooks/common') diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 4ecdf2a0c..0df03f194 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -313,6 +313,7 @@ - name: Configure master instances hosts: oo_masters_to_config + any_errors_fatal: true serial: 1 vars: sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}" -- cgit v1.2.3 From 4d25e4d0b375e953e4125f7247283ef8235d67c2 Mon Sep 17 00:00:00 2001 From: Brenton Leanhardt Date: Mon, 11 Jan 2016 11:41:53 -0500 Subject: 3.1.1 upgrade playbook --- .../upgrades/v3_1_minor/filter_plugins | 1 + .../openshift-cluster/upgrades/v3_1_minor/library | 1 + .../upgrades/v3_1_minor/lookup_plugins | 1 + .../openshift-cluster/upgrades/v3_1_minor/post.yml | 50 ++++++++ .../openshift-cluster/upgrades/v3_1_minor/pre.yml | 87 +++++++++++++ .../openshift-cluster/upgrades/v3_1_minor/roles | 1 + .../upgrades/v3_1_minor/upgrade.yml | 137 +++++++++++++++++++++ 7 files changed, 278 insertions(+) create mode 120000 playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins create mode 120000 playbooks/common/openshift-cluster/upgrades/v3_1_minor/library create mode 120000 playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins create mode 100644 playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml create mode 100644 playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml create mode 120000 playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles create mode 100644 playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml (limited to 'playbooks/common') diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins new file mode 120000 index 000000000..27ddaa18b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins @@ -0,0 +1 @@ +../../../../../filter_plugins \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/library b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/library new file mode 120000 index 000000000..53bed9684 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/library @@ -0,0 +1 @@ +../library \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins new file mode 120000 index 000000000..cf407f69b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins @@ -0,0 +1 @@ +../../../../../lookup_plugins \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml new file mode 100644 index 000000000..d8336fcae --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml @@ -0,0 +1,50 @@ +--- +############################################################################### +# Post upgrade - Upgrade default router, default registry and examples +############################################################################### +- name: Upgrade default router and default registry + hosts: oo_first_master + vars: + openshift_deployment_type: "{{ deployment_type }}" + registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + g_new_version ) }}" + router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}" + oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig" + roles: + # Create the new templates shipped in 3.1.z, existing templates are left + # unmodified. This prevents the subsequent role definition for + # openshift_examples from failing when trying to replace templates that do + # not already exist. We could have potentially done a replace --force to + # create and update in one step. + - openshift_examples + # Update the existing templates + - role: openshift_examples + openshift_examples_import_command: replace + pre_tasks: + - name: Check for default router + command: > + {{ oc_cmd }} get -n default dc/router + register: _default_router + failed_when: false + changed_when: false + + - name: Check for default registry + command: > + {{ oc_cmd }} get -n default dc/docker-registry + register: _default_registry + failed_when: false + changed_when: false + + - name: Update router image to current version + when: _default_router.rc == 0 + command: > + {{ oc_cmd }} patch dc/router -p + '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}' + --api-version=v1 + + - name: Update registry image to current version + when: _default_registry.rc == 0 + command: > + {{ oc_cmd }} patch dc/docker-registry -p + '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}' + --api-version=v1 + diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml new file mode 100644 index 000000000..91780de09 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml @@ -0,0 +1,87 @@ +--- +############################################################################### +# Evaluate host groups and gather facts +############################################################################### +- name: Load openshift_facts + hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_facts + +############################################################################### +# Pre-upgrade checks +############################################################################### +- name: Verify upgrade can proceed + hosts: oo_first_master + vars: + openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" + target_version: "{{ '1.1.1' if deployment_type == 'origin' else '3.1.1' }}" + gather_facts: no + tasks: + - fail: + msg: > + This upgrade is only supported for origin, openshift-enterprise, and online + deployment types + when: deployment_type not in ['origin','openshift-enterprise', 'online'] + + - fail: + msg: > + openshift_pkg_version is {{ openshift_pkg_version }} which is not a + valid version for a {{ target_version }} upgrade + when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<') + +- name: Verify upgrade can proceed + hosts: oo_masters_to_config:oo_nodes_to_config + vars: + target_version: "{{ '1.1.1' if deployment_type == 'origin' else '3.1.1' }}" + tasks: + - name: Clean package cache + command: "{{ ansible_pkg_mgr }} clean all" + + - set_fact: + g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}" + + - name: Determine available versions + script: ../files/versions.sh {{ g_new_service_name }} openshift + register: g_versions_result + + - set_fact: + g_aos_versions: "{{ g_versions_result.stdout | from_yaml }}" + + - set_fact: + g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}" + + - fail: + msg: This playbook requires Origin 1.1 or later + when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.1','<') + + - fail: + msg: This playbook requires Atomic Enterprise Platform/OpenShift Enterprise 3.1 or later + when: deployment_type == 'atomic-openshift' and g_aos_versions.curr_version | version_compare('3.1','<') + + - fail: + msg: Upgrade packages not found + when: (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<')) + + - set_fact: + pre_upgrade_complete: True + + +############################################################################## +# Gate on pre-upgrade checks +############################################################################## +- name: Gate on pre-upgrade checks + hosts: localhost + connection: local + become: no + vars: + pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}" + tasks: + - set_fact: + pre_upgrade_completed: "{{ hostvars + | oo_select_keys(pre_upgrade_hosts) + | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}" + - set_fact: + pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}" + when: pre_upgrade_failed | length > 0 diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles new file mode 120000 index 000000000..6bc1a7aef --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles @@ -0,0 +1 @@ +../../../../../roles \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml new file mode 100644 index 000000000..81dbba1e3 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml @@ -0,0 +1,137 @@ +--- +############################################################################### +# The restart playbook should be run after this playbook completes. +############################################################################### + +############################################################################### +# Upgrade Masters +############################################################################### +- name: Upgrade master packages and configuration + hosts: oo_masters_to_config + vars: + openshift_version: "{{ openshift_pkg_version | default('') }}" + tasks: + - name: Upgrade master packages + command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-master{{ openshift_version }}" + + - name: Ensure python-yaml present for config upgrade + action: "{{ ansible_pkg_mgr }} name=PyYAML state=present" + when: not openshift.common.is_atomic | bool + +# Currently 3.1.1 does not have any new configuration settings +# +# - name: Upgrade master configuration +# openshift_upgrade_config: +# from_version: '3.0' +# to_version: '3.1' +# role: master +# config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}" + +- name: Set master update status to complete + hosts: oo_masters_to_config + tasks: + - set_fact: + master_update_complete: True + +############################################################################## +# Gate on master update complete +############################################################################## +- name: Gate on master update + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + master_update_completed: "{{ hostvars + | oo_select_keys(groups.oo_masters_to_config) + | oo_collect('inventory_hostname', {'master_update_complete': true}) }}" + - set_fact: + master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}" + when: master_update_failed | length > 0 + +############################################################################### +# Upgrade Nodes +############################################################################### +- name: Upgrade nodes + hosts: oo_nodes_to_config + vars: + openshift_version: "{{ openshift_pkg_version | default('') }}" + roles: + - openshift_facts + tasks: + - name: Upgrade node packages + command: "{{ ansible_pkg_mgr }} update -y {{ openshift.common.service_type }}-node{{ openshift_version }}" + + - name: Restart node service + service: name="{{ openshift.common.service_type }}-node" state=restarted + + - set_fact: + node_update_complete: True + +############################################################################## +# Gate on nodes update +############################################################################## +- name: Gate on nodes update + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + node_update_completed: "{{ hostvars + | oo_select_keys(groups.oo_nodes_to_config) + | oo_collect('inventory_hostname', {'node_update_complete': true}) }}" + - set_fact: + node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}" + when: node_update_failed | length > 0 + +############################################################################### +# Reconcile Cluster Roles and Cluster Role Bindings +############################################################################### +- name: Reconcile Cluster Roles and Cluster Role Bindings + hosts: oo_masters_to_config + vars: + origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}" + ent_reconcile_bindings: true + openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" + tasks: + - name: Reconcile Cluster Roles + command: > + {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + policy reconcile-cluster-roles --confirm + run_once: true + + - name: Reconcile Cluster Role Bindings + command: > + {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + policy reconcile-cluster-role-bindings + --exclude-groups=system:authenticated + --exclude-groups=system:unauthenticated + --exclude-users=system:anonymous + --additive-only=true --confirm + when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool + run_once: true + + - set_fact: + reconcile_complete: True + +############################################################################## +# Gate on reconcile +############################################################################## +- name: Gate on reconcile + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + reconcile_completed: "{{ hostvars + | oo_select_keys(groups.oo_masters_to_config) + | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}" + - set_fact: + reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}" + when: reconcile_failed | length > 0 -- cgit v1.2.3 From e99eda725ba65eeb0d1c13ee1bd3e8737b9d3602 Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Thu, 14 Jan 2016 11:29:59 -0500 Subject: Check api prior to starting node. --- playbooks/common/openshift-node/config.yml | 4 ---- 1 file changed, 4 deletions(-) (limited to 'playbooks/common') diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 336cbed5e..8d0c4945e 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -229,9 +229,5 @@ delay: 1 changed_when: false when: openshift.common.is_containerized | bool - - fail: - msg: > - Unable to contact master API at {{ openshift.master.api_url }} - when: openshift.common.is_containerized | bool and api_available_output.stdout.find("200 OK") == -1 roles: - openshift_manage_node -- cgit v1.2.3 From 8c2ef6e1192006fea958e277cef5e8d9672476a3 Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Fri, 15 Jan 2016 10:44:54 -0500 Subject: Call attention to openshift_master_rolling_restart_mode variable in restart prompt. --- playbooks/common/openshift-master/restart.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'playbooks/common') diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml index fa13a64cb..987fae63c 100644 --- a/playbooks/common/openshift-master/restart.yml +++ b/playbooks/common/openshift-master/restart.yml @@ -57,8 +57,10 @@ Warning: Running playbook from a host that will be restarted! Press CTRL+C and A to abort playbook execution. You may continue by pressing ENTER but the playbook will stop - executing once this system restarts and services must be - manually verified. + executing after this system has been restarted and services + must be verified manually. To only restart services, set + openshift_master_rolling_restart_mode=services in host + inventory and relaunch the playbook. when: exists.stat.exists and openshift.common.rolling_restart_mode == 'system' - set_fact: current_host: "{{ exists.stat.exists }}" -- cgit v1.2.3 From 33ac044082ebdeea44f8c6e58450e580311f319d Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Fri, 15 Jan 2016 13:11:57 -0500 Subject: Configure nodes which are also masters prior to nodes in containerized install. --- playbooks/common/openshift-node/config.yml | 46 ++++++++++++++++++++++++------ 1 file changed, 38 insertions(+), 8 deletions(-) (limited to 'playbooks/common') diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 8d0c4945e..1d31657ed 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -154,21 +154,15 @@ validate_checksum: yes with_items: nodes_needing_certs -- name: Configure node instances +- name: Deploy node certificates hosts: oo_nodes_to_config vars: sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}" - openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" - # TODO: Prefix flannel role variables. - etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}" - embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" - openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" - pre_tasks: + tasks: - name: Ensure certificate directory exists file: path: "{{ node_cert_dir }}" state: directory - # TODO: notify restart node # possibly test service started time against certificate/config file # timestamps in node to trigger notify @@ -177,8 +171,44 @@ src: "{{ sync_tmpdir }}/{{ node_subdir }}.tgz" dest: "{{ node_cert_dir }}" when: certs_missing + +- name: Evaluate node groups + hosts: localhost + become: no + tasks: + - name: Evaluate oo_containerized_master_nodes + add_host: + name: "{{ item }}" + groups: oo_containerized_master_nodes + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + with_items: "{{ groups.oo_nodes_to_config | default([]) }}" + when: hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config) + +- name: Configure node instances + hosts: oo_containerized_master_nodes + serial: 1 + vars: + openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" + openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" + roles: + - openshift_node + +- name: Configure node instances + hosts: oo_nodes_to_config:!oo_containerized_master_nodes + vars: + openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" + openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" roles: - openshift_node + +- name: Additional node config + hosts: oo_nodes_to_config + vars: + # TODO: Prefix flannel role variables. + etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}" + embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" + roles: - role: flannel when: openshift.common.use_flannel | bool - role: nickhammond.logrotate -- cgit v1.2.3 From cc7b1a3f585a86c21eb88d850d963148b98c9f23 Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Mon, 18 Jan 2016 10:29:48 -0500 Subject: Fix cluster_method conditional in master restart playbook. --- playbooks/common/openshift-master/restart.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'playbooks/common') diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml index 987fae63c..5c8e34817 100644 --- a/playbooks/common/openshift-master/restart.yml +++ b/playbooks/common/openshift-master/restart.yml @@ -73,12 +73,12 @@ command: > systemctl is-active {{ openshift.common.service_type }}-master register: active_check_output - when: openshift.master.cluster_method == 'pacemaker' + when: openshift.master.cluster_method | default(None) == 'pacemaker' failed_when: active_check_output.stdout not in ['active', 'inactive'] changed_when: false - set_fact: is_active: "{{ active_check_output.stdout == 'active' }}" - when: openshift.master.cluster_method == 'pacemaker' + when: openshift.master.cluster_method | default(None) == 'pacemaker' - name: Evaluate master groups hosts: localhost -- cgit v1.2.3 From 5273611da653bca4a15cfe57e4acc57aef089a37 Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Mon, 18 Jan 2016 10:44:37 -0500 Subject: Add 'unknown' to possible output for the is-active check. --- playbooks/common/openshift-master/restart.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'playbooks/common') diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml index 5c8e34817..d9d857b1a 100644 --- a/playbooks/common/openshift-master/restart.yml +++ b/playbooks/common/openshift-master/restart.yml @@ -74,7 +74,7 @@ systemctl is-active {{ openshift.common.service_type }}-master register: active_check_output when: openshift.master.cluster_method | default(None) == 'pacemaker' - failed_when: active_check_output.stdout not in ['active', 'inactive'] + failed_when: active_check_output.stdout not in ['active', 'inactive', 'unknown'] changed_when: false - set_fact: is_active: "{{ active_check_output.stdout == 'active' }}" -- cgit v1.2.3