diff options
12 files changed, 364 insertions, 19 deletions
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index 3541d5471..93fdd5ae4 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -234,7 +234,7 @@ class FilterModule(object): arrange them as a string 'key=value key=value' """ if not isinstance(data, dict): - raise errors.AnsibleFilterError("|failed expects first param is a dict") + raise errors.AnsibleFilterError("|failed expects first param is a dict [oo_combine_dict]. Got %s. Type: %s" % (str(data), str(type(data)))) return out_joiner.join([in_joiner.join([k, str(v)]) for k, v in data.items()]) @@ -286,7 +286,7 @@ class FilterModule(object): } """ if not isinstance(data, dict): - raise errors.AnsibleFilterError("|failed expects first param is a dict") + raise errors.AnsibleFilterError("|failed expects first param is a dict [oo_ec2_volume_def]. Got %s. Type: %s" % (str(data), str(type(data)))) if host_type not in ['master', 'node', 'etcd']: raise errors.AnsibleFilterError("|failed expects etcd, master or node" " as the host type") diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml index 7a3829283..9a5d84751 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml @@ -92,10 +92,9 @@ vars: master_config_hook: "v3_3/master_config_upgrade.yml" +- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml + - include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml vars: node_config_hook: "v3_3/node_config_upgrade.yml" -- include: ../../../openshift-master/restart.yml - -- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml index d6af71827..c9338a960 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml @@ -98,3 +98,4 @@ master_config_hook: "v3_3/master_config_upgrade.yml" - include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml + diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_4/README.md new file mode 100644 index 000000000..85b807dc6 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/README.md @@ -0,0 +1,18 @@ +# v3.4 Major and Minor Upgrade Playbook + +## Overview +This playbook currently performs the +following steps. + + * Upgrade and restart master services + * Unschedule node. + * Upgrade and restart docker + * Upgrade and restart node services + * Modifies the subset of the configuration necessary + * Applies the latest cluster policies + * Updates the default router if one exists + * Updates the default registry if one exists + * Updates image streams and quickstarts + +## Usage +ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/roles b/playbooks/byo/openshift-cluster/upgrades/v3_4/roles new file mode 120000 index 000000000..6bc1a7aef --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/roles @@ -0,0 +1 @@ +../../../../../roles
\ No newline at end of file diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml new file mode 100644 index 000000000..4f8a80ee8 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml @@ -0,0 +1,96 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../../../../common/openshift-cluster/upgrades/init.yml + tags: + - pre_upgrade + +# Configure the upgrade target for the common upgrade tasks: +- hosts: l_oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + +# Pre-upgrade + +- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos and initialize facts on all hosts + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml + +- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml + +- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml + +- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml + diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml new file mode 100644 index 000000000..8cde2ac88 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml @@ -0,0 +1,98 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../../../../common/openshift-cluster/upgrades/init.yml + tags: + - pre_upgrade + +# Configure the upgrade target for the common upgrade tasks: +- hosts: l_oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + +# Pre-upgrade + +- name: Update repos on control plane hosts + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml + +- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml + +- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml new file mode 100644 index 000000000..f385d4f22 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml @@ -0,0 +1,100 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../../../../common/openshift-cluster/upgrades/init.yml + tags: + - pre_upgrade + +# Configure the upgrade target for the common upgrade tasks: +- hosts: l_oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + +# Pre-upgrade +- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on nodes + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + tags: + - pre_upgrade + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- name: Verify masters are already upgraded + hosts: oo_masters_to_config + tags: + - pre_upgrade + tasks: + - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." + when: openshift.common.version != openshift_version + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_nodes_to_upgrade + tasks: + - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml + +- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml index 32a3636aa..439df5ffd 100644 --- a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml @@ -1,5 +1,3 @@ -- include_vars: ../../../../roles/openshift_node/vars/main.yml - - name: Update systemd units include: ../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version={{ openshift_image_tag }} diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index b3f4d7d1a..927d9b4ca 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -99,6 +99,8 @@ - include: rpm_upgrade.yml component=master when: not openshift.common.is_containerized | bool +# Create service signer cert when missing. Service signer certificate +# is added to master config in the master config hook for v3_3. - name: Determine if service signer cert must be created hosts: oo_first_master tasks: @@ -108,8 +110,6 @@ register: service_signer_cert_stat changed_when: false -# Create service signer cert when missing. Service signer certificate -# is added to master config in the master config hook for v3_3. - include: create_service_signer_cert.yml - name: Upgrade master config and systemd units @@ -128,13 +128,6 @@ - name: Update systemd units include: ../../../../roles/openshift_master/tasks/systemd_units.yml -# - name: Upgrade master configuration -# openshift_upgrade_config: -# from_version: '3.1' -# to_version: '3.2' -# role: master -# config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}" - - name: Check for ca-bundle.crt stat: path: "{{ openshift.common.config_base }}/master/ca-bundle.crt" @@ -184,6 +177,10 @@ msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}" when: master_update_failed | length > 0 +# We are now ready to restart master services (or entire system +# depending on openshift_rolling_restart_mode): +- include: ../../openshift-master/restart.yml + ############################################################################### # Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints ############################################################################### diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index f281b1303..61ce55b7f 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -1035,12 +1035,23 @@ def get_current_config(facts): return current_config def build_kubelet_args(facts): - """ Build node kubelet_args """ - cloud_cfg_path = os.path.join(facts['common']['config_base'], - 'cloudprovider') + """Build node kubelet_args + +In the node-config.yaml file, kubeletArgument sub-keys have their +values provided as a list. Hence the gratuitous use of ['foo'] below. + """ + cloud_cfg_path = os.path.join( + facts['common']['config_base'], + 'cloudprovider') + + # We only have to do this stuff on hosts that are nodes if 'node' in facts: + # Any changes to the kubeletArguments parameter are stored + # here first. kubelet_args = {} + if 'cloudprovider' in facts: + # EVERY cloud is special <3 if 'kind' in facts['cloudprovider']: if facts['cloudprovider']['kind'] == 'aws': kubelet_args['cloud-provider'] = ['aws'] @@ -1050,6 +1061,28 @@ def build_kubelet_args(facts): kubelet_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf'] if facts['cloudprovider']['kind'] == 'gce': kubelet_args['cloud-provider'] = ['gce'] + + # Automatically add node-labels to the kubeletArguments + # parameter. See BZ1359848 for additional details. + # + # Ref: https://bugzilla.redhat.com/show_bug.cgi?id=1359848 + if 'labels' in facts['node'] and isinstance(facts['node']['labels'], dict): + # tl;dr: os_node_labels="{'foo': 'bar', 'a': 'b'}" turns + # into ['foo=bar', 'a=b'] + # + # On the openshift_node_labels inventory variable we loop + # over each key-value tuple (from .items()) and join the + # key to the value with an '=' character, this produces a + # list. + # + # map() seems to be returning an itertools.imap object + # instead of a list. We cast it to a list ourselves. + labels_str = list(map(lambda x: '='.join(x), facts['node']['labels'].items())) + if labels_str != '': + kubelet_args['node-labels'] = labels_str + + # If we've added items to the kubelet_args dict then we need + # to merge the new items back into the main facts object. if kubelet_args != {}: facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [], []) return facts diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 1de63ecc3..8b3145785 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -9,6 +9,10 @@ role: "{{ item.role }}" local_facts: "{{ item.local_facts }}" with_items: + # Reset node labels to an empty dictionary. + - role: node + local_facts: + labels: {} - role: node local_facts: annotations: "{{ openshift_node_annotations | default(none) }}" |