diff options
Diffstat (limited to 'playbooks/common')
11 files changed, 185 insertions, 119 deletions
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml index ca4f5b8b2..06cda36a5 100644 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -39,9 +39,9 @@ openshift_hosted_logging_elasticsearch_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) in ['dynamic','nfs'] else '' }}" openshift_hosted_logging_elasticsearch_pvc_prefix: "{{ 'logging-es' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}" openshift_hosted_logging_elasticsearch_ops_cluster_size: "{{ logging_elasticsearch_ops_cluster_size }}" - openshift_hosted_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}" + openshift_hosted_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift_hosted_loggingops_storage_kind | default(none) == 'dynamic' else '' }}" openshift_hosted_logging_elasticsearch_ops_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) in ['dynamic','nfs' ] else '' }}" - openshift_hosted_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es' if openshift_hosted_logging_storage_kind | default(none) =='dynamic' else '' }}" + openshift_hosted_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es-ops' if openshift_hosted_loggingops_storage_kind | default(none) =='dynamic' else '' }}" - role: cockpit-ui when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool) diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml b/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml index f653a111f..c30889d64 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml @@ -36,6 +36,14 @@ - "openshift-master.crt" - "openshift-master.key" - "openshift-master.kubeconfig" + - name: Remove generated etcd client certificates + file: + path: "{{ openshift.common.config_base }}/master/{{ item }}" + state: absent + with_items: + - "master.etcd-client.crt" + - "master.etcd-client.key" + when: groups.oo_etcd_to_config | default([]) | length == 0 roles: - role: openshift_master_certificates openshift_master_etcd_hosts: "{{ hostvars diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml index 18b93e1d6..6771cc98d 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml @@ -2,6 +2,8 @@ - name: Update registry certificates hosts: oo_first_master vars: + roles: + - lib_openshift tasks: - name: Create temp directory for kubeconfig command: mktemp -d /tmp/openshift-ansible-XXXXXX @@ -46,12 +48,15 @@ # Replace dc/docker-registry certificate secret contents if set. - block: + - name: Load lib_openshift modules + include_role: + name: lib_openshift + - name: Retrieve registry service IP - command: > - {{ openshift.common.client_binary }} get service docker-registry - -o jsonpath='{.spec.clusterIP}' - --config={{ mktemp.stdout }}/admin.kubeconfig - -n default + oc_service: + namespace: default + name: docker-registry + state: list register: docker_registry_service_ip changed_when: false @@ -65,18 +70,22 @@ --signer-cert={{ openshift.common.config_base }}/master/ca.crt --signer-key={{ openshift.common.config_base }}/master/ca.key --signer-serial={{ openshift.common.config_base }}/master/ca.serial.txt - --hostnames="{{ docker_registry_service_ip.stdout }},docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}" + --hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}" --cert={{ openshift.common.config_base }}/master/registry.crt --key={{ openshift.common.config_base }}/master/registry.key - name: Update registry certificates secret - shell: > - {{ openshift.common.client_binary }} secret new registry-certificates - {{ openshift.common.config_base }}/master/registry.crt - {{ openshift.common.config_base }}/master/registry.key - --config={{ mktemp.stdout }}/admin.kubeconfig - -n default - -o json | oc replace -f - + oc_secret: + kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig" + name: registry-certificates + namespace: default + state: present + files: + - name: registry.crt + path: "{{ openshift.common.config_base }}/master/registry.crt" + - name: registry.key + path: "{{ openshift.common.config_base }}/master/registry.key" + run_once: true when: l_docker_registry_dc.rc == 0 and 'registry-certificates' in docker_registry_secrets and 'REGISTRY_HTTP_TLS_CERTIFICATE' in docker_registry_env_vars and 'REGISTRY_HTTP_TLS_KEY' in docker_registry_env_vars - name: Redeploy docker registry diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml index a9e9f0915..707fb6424 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml @@ -7,6 +7,8 @@ command: mktemp -d /tmp/openshift-ansible-XXXXXX register: mktemp changed_when: false + roles: + - lib_openshift - name: Copy admin client config(s) command: > @@ -45,10 +47,12 @@ - block: - name: Delete existing router certificate secret - command: > - {{ openshift.common.client_binary }} delete secret/router-certs - --config={{ mktemp.stdout }}/admin.kubeconfig - -n default + oc_secret: + kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig" + name: router-certs + namespace: default + state: absent + run_once: true - name: Remove router service annotations command: > diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml index 45aabf3e4..7ef79afa9 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml @@ -29,12 +29,18 @@ - name: Check available disk space for etcd backup shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1 register: avail_disk + # AUDIT:changed_when: `false` because we are only inspecting + # state, not manipulating anything + changed_when: false # TODO: replace shell module with command and update later checks - name: Check current embedded etcd disk usage shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1 register: etcd_disk_usage when: embedded_etcd | bool + # AUDIT:changed_when: `false` because we are only inspecting + # state, not manipulating anything + changed_when: false - name: Abort if insufficient disk space for etcd backup fail: diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml index 690858c53..a9b5b94e6 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml @@ -9,21 +9,36 @@ register: etcd_rpm_version failed_when: false when: not openshift.common.is_containerized | bool + # AUDIT:changed_when: `false` because we are only inspecting + # state, not manipulating anything + changed_when: false + - name: Record containerized etcd version command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\* register: etcd_container_version failed_when: false when: openshift.common.is_containerized | bool + # AUDIT:changed_when: `false` because we are only inspecting + # state, not manipulating anything + changed_when: false + - name: Record containerized etcd version command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\* register: etcd_container_version failed_when: false when: openshift.common.is_containerized | bool and not openshift.common.is_etcd_system_container | bool + # AUDIT:changed_when: `false` because we are only inspecting + # state, not manipulating anything + changed_when: false + - name: Record containerized etcd version command: runc exec etcd_container rpm -qa --qf '%{version}' etcd\* register: etcd_container_version failed_when: false when: openshift.common.is_containerized | bool and openshift.common.is_etcd_system_container | bool + # AUDIT:changed_when: `false` because we are only inspecting + # state, not manipulating anything + changed_when: false # I really dislike this copy/pasta but I wasn't able to find a way to get it to loop # through hosts, then loop through tasks only when appropriate diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml index 37c89374c..046535680 100644 --- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml @@ -1,20 +1,17 @@ --- - name: Filter list of nodes to be upgraded if necessary hosts: oo_first_master + + roles: + - lib_openshift + tasks: - name: Retrieve list of openshift nodes matching upgrade label - command: > - {{ openshift.common.client_binary }} - get nodes - --config={{ openshift.common.config_base }}/master/admin.kubeconfig - --selector={{ openshift_upgrade_nodes_label }} - -o jsonpath='{.items[*].metadata.name}' - register: matching_nodes - changed_when: false - when: openshift_upgrade_nodes_label is defined - - - set_fact: - nodes_to_upgrade: "{{ matching_nodes.stdout.split(' ') }}" + oc_obj: + state: list + kind: node + selector: "{{ openshift_upgrade_nodes_label }}" + register: nodes_to_upgrade when: openshift_upgrade_nodes_label is defined # We got a list of nodes with the label, now we need to match these with inventory hosts @@ -26,7 +23,9 @@ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" with_items: " {{ groups['oo_nodes_to_config'] }}" - when: openshift_upgrade_nodes_label is defined and hostvars[item].openshift.common.hostname in nodes_to_upgrade + when: + - openshift_upgrade_nodes_label is defined + - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list changed_when: false # Build up the oo_nodes_to_upgrade group, use the list filtered by label if diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index 4135f7e94..f0191e380 100644 --- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -9,77 +9,100 @@ registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', openshift_image_tag ) }}" router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', openshift_image_tag ) }}" oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig" - roles: - - openshift_manageiq - # Create the new templates shipped in 3.2, existing templates are left - # unmodified. This prevents the subsequent role definition for - # openshift_examples from failing when trying to replace templates that do - # not already exist. We could have potentially done a replace --force to - # create and update in one step. - - openshift_examples - - openshift_hosted_templates - # Update the existing templates - - role: openshift_examples - registry_url: "{{ openshift.master.registry_url }}" - openshift_examples_import_command: replace - - role: openshift_hosted_templates - registry_url: "{{ openshift.master.registry_url }}" - openshift_hosted_templates_import_command: replace - pre_tasks: + pre_tasks: + - name: Load lib_openshift modules + include_role: + name: lib_openshift # TODO: remove temp_skip_router_registry_upgrade variable. This is a short term hack # to allow ops to use this control plane upgrade, without triggering router/registry # upgrade which has not yet been synced with their process. - name: Collect all routers - command: > - {{ oc_cmd }} get pods --all-namespaces -l 'router' -o json + oc_obj: + state: list + kind: pods + all_namespaces: True + selector: 'router' register: all_routers - failed_when: false - changed_when: false when: temp_skip_router_registry_upgrade is not defined - - set_fact: haproxy_routers="{{ (all_routers.stdout | from_json)['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}" - when: all_routers.rc == 0 and temp_skip_router_registry_upgrade is not defined + - set_fact: haproxy_routers="{{ (all_routers.reults.results[0]['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}" + when: + - all_routers.results.returncode == 0 + - temp_skip_router_registry_upgrade is not defined - set_fact: haproxy_routers=[] - when: all_routers.rc != 0 and temp_skip_router_registry_upgrade is not defined + when: + - all_routers.results.returncode != 0 + - temp_skip_router_registry_upgrade is not defined - name: Update router image to current version - when: all_routers.rc == 0 and temp_skip_router_registry_upgrade is not defined + when: + - all_routers.results.returncode == 0 + - temp_skip_router_registry_upgrade is not defined command: > {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -n {{ item['namespace'] }} -p '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}' --api-version=v1 with_items: "{{ haproxy_routers }}" + # AUDIT:changed_when_note: `false` not being set here. What we + # need to do is check the current router image version and see if + # this task needs to be ran. - name: Check for default registry - command: > - {{ oc_cmd }} get -n default dc/docker-registry + oc_obj: + state: list + kind: dc + name: docker-registry register: _default_registry - failed_when: false - changed_when: false when: temp_skip_router_registry_upgrade is not defined - name: Update registry image to current version - when: _default_registry.rc == 0 and temp_skip_router_registry_upgrade is not defined + when: + - _default_registry.results.results[0] != {} + - temp_skip_router_registry_upgrade is not defined command: > {{ oc_cmd }} patch dc/docker-registry -n default -p '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}' --api-version=v1 + # AUDIT:changed_when_note: `false` not being set here. What we + # need to do is check the current registry image version and see + # if this task needs to be ran. + + roles: + - openshift_manageiq + # Create the new templates shipped in 3.2, existing templates are left + # unmodified. This prevents the subsequent role definition for + # openshift_examples from failing when trying to replace templates that do + # not already exist. We could have potentially done a replace --force to + # create and update in one step. + - openshift_examples + - openshift_hosted_templates + # Update the existing templates + - role: openshift_examples + registry_url: "{{ openshift.master.registry_url }}" + openshift_examples_import_command: replace + - role: openshift_hosted_templates + registry_url: "{{ openshift.master.registry_url }}" + openshift_hosted_templates_import_command: replace # Check for warnings to be printed at the end of the upgrade: - name: Check for warnings hosts: oo_masters_to_config tasks: # Check if any masters are using pluginOrderOverride and warn if so, only for 1.3/3.3 and beyond: - - command: > - grep pluginOrderOverride {{ openshift.common.config_base }}/master/master-config.yaml + - name: grep pluginOrderOverride + command: grep pluginOrderOverride {{ openshift.common.config_base }}/master/master-config.yaml register: grep_plugin_order_override when: openshift.common.version_gte_3_3_or_1_3 | bool - failed_when: false + changed_when: false + - name: Warn if pluginOrderOverride is in use in master-config.yaml - debug: msg="WARNING pluginOrderOverride is being deprecated in master-config.yaml, please see https://docs.openshift.com/enterprise/latest/architecture/additional_concepts/admission_controllers.html for more information." - when: not grep_plugin_order_override | skipped and grep_plugin_order_override.rc == 0 + debug: + msg: "WARNING pluginOrderOverride is being deprecated in master-config.yaml, please see https://docs.openshift.com/enterprise/latest/architecture/additional_concepts/admission_controllers.html for more information." + when: + - not grep_plugin_order_override | skipped + - grep_plugin_order_override.rc == 0 - include: ../reset_excluder.yml tags: diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index db2c27919..a4aefcdac 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -238,29 +238,22 @@ any_errors_fatal: true pre_tasks: + - name: Load lib_openshift modules + include_role: + name: lib_openshift + # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node # or docker actually needs an upgrade before proceeding. Perhaps best to save this until # we merge upgrade functionality into the base roles and a normal config.yml playbook run. - - name: Determine if node is currently scheduleable - command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} get node {{ openshift.node.nodename | lower }} -o json - register: node_output - delegate_to: "{{ groups.oo_first_master.0 }}" - changed_when: false - - - set_fact: - was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}" - - name: Mark node unschedulable - command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false + oadm_manage_node: + node: "{{ openshift.node.nodename | lower }}" + schedulable: False delegate_to: "{{ groups.oo_first_master.0 }}" - # NOTE: There is a transient "object has been modified" error here, allow a couple - # retries for a more reliable upgrade. - register: node_unsched - until: node_unsched.rc == 0 - retries: 3 - delay: 1 + retries: 10 + delay: 5 + register: node_unschedulable + until: node_unschedulable|succeeded - name: Drain Node for Kubelet upgrade command: > @@ -268,17 +261,19 @@ delegate_to: "{{ groups.oo_first_master.0 }}" roles: + - lib_openshift - openshift_facts - docker - openshift_node_upgrade post_tasks: - name: Set node schedulability - command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true + oadm_manage_node: + node: "{{ openshift.node.nodename | lower }}" + schedulable: True delegate_to: "{{ groups.oo_first_master.0 }}" - when: was_schedulable | bool - register: node_sched - until: node_sched.rc == 0 - retries: 3 - delay: 1 + retries: 10 + delay: 5 + register: node_schedulable + until: node_schedulable|succeeded + when: node_unschedulable|changed diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index e45b635f7..e3a98fd9b 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -7,29 +7,22 @@ any_errors_fatal: true pre_tasks: + - name: Load lib_openshift modules + include_role: + name: lib_openshift + # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node # or docker actually needs an upgrade before proceeding. Perhaps best to save this until # we merge upgrade functionality into the base roles and a normal config.yml playbook run. - - name: Determine if node is currently scheduleable - command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} get node {{ openshift.node.nodename | lower }} -o json - register: node_output - delegate_to: "{{ groups.oo_first_master.0 }}" - changed_when: false - - - set_fact: - was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}" - - name: Mark node unschedulable - command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false + oadm_manage_node: + node: "{{ openshift.node.nodename | lower }}" + schedulable: False delegate_to: "{{ groups.oo_first_master.0 }}" - # NOTE: There is a transient "object has been modified" error here, allow a couple - # retries for a more reliable upgrade. - register: node_unsched - until: node_unsched.rc == 0 - retries: 3 - delay: 1 + retries: 10 + delay: 5 + register: node_unschedulable + until: node_unschedulable|succeeded - name: Drain Node for Kubelet upgrade command: > @@ -37,20 +30,22 @@ delegate_to: "{{ groups.oo_first_master.0 }}" roles: + - lib_openshift - openshift_facts - docker - openshift_node_upgrade post_tasks: - name: Set node schedulability - command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true + oadm_manage_node: + node: "{{ openshift.node.nodename | lower }}" + schedulable: True delegate_to: "{{ groups.oo_first_master.0 }}" - when: was_schedulable | bool - register: node_sched - until: node_sched.rc == 0 - retries: 3 - delay: 1 + retries: 10 + delay: 5 + register: node_schedulable + until: node_schedulable|succeeded + when: node_unschedulable|changed - include: ../reset_excluder.yml tags: diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml index a9750e40f..67ba0aa2e 100644 --- a/playbooks/common/openshift-master/restart_hosts.yml +++ b/playbooks/common/openshift-master/restart_hosts.yml @@ -7,14 +7,26 @@ ignore_errors: true become: yes +# WARNING: This process is riddled with weird behavior. + +# Workaround for https://github.com/ansible/ansible/issues/21269 +- set_fact: + wait_for_host: "{{ ansible_host }}" + +# Ansible's blog documents this *without* the port, which appears to now +# just wait until the timeout value and then proceed without checking anything. +# port is now required. +# +# However neither ansible_ssh_port or ansible_port are reliably defined, likely +# only if overridden. Assume a default of 22. - name: Wait for master to restart local_action: module: wait_for - host="{{ ansible_host }}" + host="{{ wait_for_host }}" state=started delay=10 timeout=600 - port="{{ ansible_ssh_port }}" + port="{{ ansible_port | default(ansible_ssh_port | default(22,boolean=True),boolean=True) }}" become: no # Now that ssh is back up we can wait for API on the remote system, |