From b65403b8ac3cd0eea46179d4758f6f0be5929728 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Wed, 4 Nov 2015 22:55:25 -0500 Subject: Further upgrade improvements - Restart masters post reconcile - generate missing master certs and sync to masters --- playbooks/adhoc/upgrades/upgrade.yml | 214 +++++++++++++++++++++++++++-------- 1 file changed, 168 insertions(+), 46 deletions(-) (limited to 'playbooks/adhoc/upgrades') diff --git a/playbooks/adhoc/upgrades/upgrade.yml b/playbooks/adhoc/upgrades/upgrade.yml index c113c7ab2..7433dc9c0 100644 --- a/playbooks/adhoc/upgrades/upgrade.yml +++ b/playbooks/adhoc/upgrades/upgrade.yml @@ -1,22 +1,17 @@ --- - name: Verify upgrade can proceed - hosts: masters + hosts: masters[0] + gather_facts: no tasks: - # Checking the global deployment type rather than host facts, this is about - # what the user is requesting. + # Checking the global deployment type rather than host facts, this is about + # what the user is requesting. - fail: msg="Deployment type enterprise not supported for upgrade" when: deployment_type == "enterprise" -- name: Update deployment type - hosts: OSEv3 - roles: - - openshift_facts - post_tasks: # technically tasks are run after roles, but post_tasks is a bit more explicit. - - openshift_facts: - role: common - local_facts: - deployment_type: "{{ deployment_type }}" - +# TODO: etcd is only guaranteed to be run on the master if embedded etcd is +# used, we should have a task to create a new group consisting of the single +# master running embedded etcd or an external etcd cluster specified by the +# etcd group in the inventory (which could also be the masters) - name: Backup etcd hosts: masters vars: @@ -27,9 +22,11 @@ tasks: - stat: path=/var/lib/openshift register: var_lib_openshift + - name: Create origin symlink if necessary file: src=/var/lib/openshift/ dest=/var/lib/origin state=link when: var_lib_openshift.stat.exists == True + - name: Check available disk space for etcd backup # We assume to be using the data dir for all backups. shell: > @@ -43,14 +40,51 @@ when: embedded_etcd | bool - name: Abort if insufficient disk space for etcd backup - fail: msg="{{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup, {{ avail_disk.stdout }} Kb available." + fail: + msg: > + {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup, + {{ avail_disk.stdout }} Kb available. when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int) + - name: Install etcd (for etcdctl) - yum: pkg=etcd state=latest + yum: + pkg: etcd + state: latest + - name: Generate etcd backup - command: etcdctl backup --data-dir={{ openshift.master.etcd_data_dir }} --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }} + command: > + etcdctl backup --data-dir={{ openshift.master.etcd_data_dir }} + --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }} + - name: Display location of etcd backup - debug: msg="Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}" + debug: + msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}" + +- name: Update deployment type + hosts: OSEv3 + roles: + - openshift_facts + post_tasks: + - openshift_facts: + role: common + local_facts: + deployment_type: "{{ deployment_type }}" + +- name: Upgrade base package on masters + hosts: masters + roles: + - openshift_facts + vars: + openshift_version: "{{ openshift_pkg_version | default('') }}" + tasks: + - name: Upgrade base package + yum: + pkg: "{{ openshift.common.service_type }}{{ openshift_version }}" + state: latest + +# TODO: ideally we would check the new version, without installing it. (some +# kind of yum repoquery? would need to handle openshift -> atomic-openshift +# package rename) - name: Perform upgrade version checking hosts: masters[0] @@ -73,19 +107,13 @@ fail: Unable to determine upgrade version for {{ openshift.common.service_type }} when: _new_version.stdout == "" -- name: Ensure AOS 3.0.2 or Origin 1.0.6 - hosts: masters[0] - tasks: - fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later - when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') ) + - fail: + msg: This playbook requires Atomic OpenShift 3.0.2 or later + when: deployment_type in ['openshift_enterprise', 'atomic-enterprise'] and g_new_version.stdout | version_compare('3.0','>=') and g_new_version.stdout | version_compare('3.0.2','<') -- name: Verify upgrade can proceed - hosts: masters[0] - tasks: - # Checking the global deployment type rather than host facts, this is about - # what the user is requesting. - - fail: msg="Deployment type 'enterprise' must be updated to 'openshift-enterprise' for upgrade to proceed" - when: deployment_type == "enterprise" and (_new_version.stdout | version_compare('1.0.7', '>=') or _new_version.stdout | version_compare('3.1', '>=')) + - fail: + msg: This playbook requires Origin 1.0.6 or later + when: deployment_type == 'origin' - name: Upgrade masters hosts: masters @@ -93,13 +121,104 @@ openshift_version: "{{ openshift_pkg_version | default('') }}" tasks: - name: Upgrade to latest available kernel - yum: pkg=kernel state=latest + yum: + pkg: kernel + state: latest + + - name: display just the deployment_type variable for the current host + debug: + var: hostvars[inventory_hostname].openshift.common.deployment_type + - name: Upgrade master packages command: yum update -y {{ openshift.common.service_type }}-master{{ openshift_version }} - - name: Upgrade master configuration. - openshift_upgrade_config: from_version=3.0 to_version=3.1 role=master config_base={{ hostvars[inventory_hostname].openshift.common.config_base }} - - name: Restart master services - service: name="{{ openshift.common.service_type}}-master" state=restarted + + - name: Ensure python-yaml present for config upgrade + yum: + pkg: python-yaml + state: installed + + - name: Upgrade master configuration + openshift_upgrade_config: + from_version: '3.0' + to_version: '3.1' + role: master + + - set_fact: + master_certs_missing: True + master_cert_subdir: master-{{ openshift.common.hostname }} + master_cert_config_dir: "{{ openshift.common.config_base }}/master" + +- name: Create temp directory for syncing certs + hosts: localhost + gather_facts: no + tasks: + - name: Create local temp directory for syncing certs + local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX + register: g_master_mktemp + changed_when: False + +- name: Generate missing master certificates + hosts: masters[0] + vars: + master_generated_certs_dir: "{{ openshift.common.config_base }}/generated-configs" + masters_needing_certs: "{{ hostvars + | oo_select_keys(groups.masters) + | difference([groups.masters.0]) }}" + sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}" + openshift_deployment_type: "{{ deployment_type }}" + roles: + - openshift_master_certificates + post_tasks: + - name: Remove generated etcd client certs when using external etcd + file: + path: "{{ master_generated_certs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}" + state: absent + when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config + with_nested: + - masters_needing_certs + - - master.etcd-client.crt + - master.etcd-client.key + + - name: Create a tarball of the master certs + command: > + tar -czvf {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz + -C {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }} . + args: + creates: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz" + with_items: masters_needing_certs + + - name: Retrieve the master cert tarball from the master + fetch: + src: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz" + dest: "{{ sync_tmpdir }}/" + flat: yes + fail_on_missing: yes + validate_checksum: yes + with_items: masters_needing_certs + + +- name: Sync certs and restart masters post configuration change + hosts: masters + vars: + sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}" + tasks: + - name: Unarchive the tarball on the master + unarchive: + src: "{{ sync_tmpdir }}/{{ master_cert_subdir }}.tgz" + dest: "{{ master_cert_config_dir }}" + when: inventory_hostname != groups.masters.0 + + - name: Restart master services + service: name="{{ openshift.common.service_type}}-master" state=restarted + + +- name: Delete temporary directory on localhost + hosts: localhost + gather_facts: no + tasks: + - file: name={{ g_master_mktemp.stdout }} state=absent + changed_when: False + - name: Upgrade nodes hosts: nodes @@ -113,17 +232,17 @@ - name: Restart node services service: name="{{ openshift.common.service_type }}-node" state=restarted -- name: Update cluster policy +- name: Update cluster policy and policy bindings hosts: masters[0] + vars: + origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version.stdout | version_compare('1.0.6', '>') }}" + ent_reconcile_bindings: "{{ deployment_type in ['openshift-enterprise', 'atomic-enterprise'] and g_new_version.stdout | version_compare('3.0.2','>') }}" tasks: - name: oadm policy reconcile-cluster-roles --confirm command: > {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-roles --confirm -- name: Update cluster policy bindings - hosts: masters[0] - tasks: - name: oadm policy reconcile-cluster-role-bindings --confirm command: > {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig @@ -132,11 +251,20 @@ --exclude-groups=system:unauthenticated --exclude-users=system:anonymous --additive-only=true --confirm - when: ( _new_version.stdout | version_compare('1.0.6', '>') and _new_version.stdout | version_compare('3.0','<') ) or _new_version.stdout | version_compare('3.0.2','>') + when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool + + +- name: Restart masters post reconcile + hosts: masters + tasks: + - name: Restart master services + service: name="{{ openshift.common.service_type}}-master" state=restarted -- name: Upgrade default router + +- name: Upgrade default router and registry hosts: masters[0] vars: + - registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}" - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}" - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig" tasks: @@ -172,12 +300,6 @@ {{ oc_cmd }} patch dc/router -p '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}' -- name: Upgrade default - hosts: masters[0] - vars: - - registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}" - - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig" - tasks: - name: Check for default registry command: > {{ oc_cmd }} get -n default dc/docker-registry -- cgit v1.2.3