diff options
Diffstat (limited to 'playbooks')
23 files changed, 270 insertions, 47 deletions
diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md index bdc98d1e0..cf811ca84 100644 --- a/playbooks/aws/README.md +++ b/playbooks/aws/README.md @@ -201,9 +201,7 @@ There are more enhancements that are arriving for provisioning. These will incl ## Uninstall / Deprovisioning -At this time, only deprovisioning of the output of the prerequisites step is provided. You can/must manually remove things like ELBs and scale groups before attempting to undo the work by the preprovisiong step. - -To undo the work done by the prerequisites playbook, simply call the uninstall_prerequisites.yml playbook. You should use the same inventory file and provisioning_vars.yml file that was used during provisioning. +To undo the work done by the prerequisites playbook, simply call the uninstall_prerequisites.yml playbook. You will have needed to remove any of the other objects (ie ELBs, instances, etc) before attempting. You should use the same inventory file and provisioning_vars.yml file that was used during provisioning. ``` ansible-playbook -i <previous inventory file> -e @<previous provisioning_vars file> uninstall_prerequisites.yml @@ -211,4 +209,10 @@ ansible-playbook -i <previous inventory file> -e @<previous provisioning_vars fi This should result in removal of the security groups and VPC that were created. +Cleaning up the S3 bucket contents can be accomplished with: + +``` +ansible-playbook -i <previous inventory file> -e @<previous provisioning_vars file> uninstall_s3.yml +``` + NOTE: If you want to also remove the ssh keys that were uploaded (**these ssh keys would be shared if you are running multiple clusters in the same AWS account** so we don't remove these by default) then you should add 'openshift_aws_enable_uninstall_shared_objects: True' to your provisioning_vars.yml file. diff --git a/playbooks/aws/openshift-cluster/uninstall_elb.yml b/playbooks/aws/openshift-cluster/uninstall_elb.yml new file mode 100644 index 000000000..c1b724f0c --- /dev/null +++ b/playbooks/aws/openshift-cluster/uninstall_elb.yml @@ -0,0 +1,9 @@ +--- +- name: Delete elb + hosts: localhost + connection: local + tasks: + - name: deprovision elb + include_role: + name: openshift_aws + tasks_from: uninstall_elb.yml diff --git a/playbooks/aws/openshift-cluster/uninstall_s3.yml b/playbooks/aws/openshift-cluster/uninstall_s3.yml new file mode 100644 index 000000000..448b47aee --- /dev/null +++ b/playbooks/aws/openshift-cluster/uninstall_s3.yml @@ -0,0 +1,10 @@ +--- +- name: Empty/delete s3 bucket + hosts: localhost + connection: local + tasks: + - name: empty/delete s3 bucket + include_role: + name: openshift_aws + tasks_from: uninstall_s3.yml + when: openshift_aws_create_s3 | default(true) | bool diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_10/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_10/README.md new file mode 100644 index 000000000..7ede3a28c --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_10/README.md @@ -0,0 +1,20 @@ +# v3.10 Major and Minor Upgrade Playbook + +## Overview +This playbook currently performs the following steps. + + * Upgrade and restart master services + * Unschedule node + * Upgrade and restart docker + * Upgrade and restart node services + * Modifies the subset of the configuration necessary + * Applies the latest cluster policies + * Updates the default router if one exists + * Updates the default registry if one exists + * Updates image streams and quickstarts + +## Usage + +``` +ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade.yml +``` diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade.yml new file mode 100644 index 000000000..977b4f381 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade.yml @@ -0,0 +1,5 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_10/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml new file mode 100644 index 000000000..8b76bf4ff --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml @@ -0,0 +1,16 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml + +- import_playbook: ../../../../openshift-master/private/restart.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml new file mode 100644 index 000000000..b4353edc2 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml @@ -0,0 +1,7 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_scale_groups.yml index 23a3fcbb5..23a3fcbb5 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_scale_groups.yml diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index f44ab3580..9c927c0a1 100644 --- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -119,6 +119,9 @@ - shell: > echo -n | openssl s_client -showcerts -servername docker-registry.default.svc -connect docker-registry.default.svc:5000 | openssl x509 -text | grep -A1 'X509v3 Subject Alternative Name:' | grep -Pq 'DNS:docker-registry\.default\.svc(,|$)' register: cert_output + changed_when: false + failed_when: + - cert_output.rc not in [0, 1] # Step 2: Set a fact to be used to determine if we should run the redeploy of registry certs - name: set a fact to include the registry certs playbook if needed diff --git a/playbooks/common/openshift-cluster/upgrades/pre/config.yml b/playbooks/common/openshift-cluster/upgrades/pre/config.yml index edc541ef9..44af37b2d 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/config.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/config.yml @@ -5,8 +5,6 @@ # Pre-upgrade - import_playbook: ../initialize_nodes_to_upgrade.yml -- import_playbook: verify_cluster.yml - - name: Update repos on upgrade hosts hosts: "{{ l_upgrade_repo_hosts }}" roles: @@ -53,6 +51,8 @@ # l_openshift_version_set_hosts is passed via upgrade_control_plane.yml # l_openshift_version_check_hosts is passed via upgrade_control_plane.yml +- import_playbook: verify_cluster.yml + # If we're only upgrading nodes, we need to ensure masters are already upgraded - name: Verify masters are already upgraded hosts: oo_masters_to_config diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml index 5ee8a9d78..463a05688 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml @@ -17,6 +17,7 @@ valid version for a {{ openshift_upgrade_target }} upgrade when: - openshift_pkg_version is defined + - openshift_pkg_version != "" - openshift_pkg_version.split('-',1).1 is version_compare(openshift_upgrade_target ,'<') - fail: @@ -25,6 +26,7 @@ valid version for a {{ openshift_upgrade_target }} upgrade when: - openshift_image_tag is defined + - openshift_image_tag != "" - openshift_image_tag.split('v',1).1 is version_compare(openshift_upgrade_target ,'<') - set_fact: diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 1dcd9a207..3c0b72832 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -3,29 +3,6 @@ # Upgrade Masters ############################################################################### -# Prior to 3.6, openshift-ansible created etcd serving certificates -# without a SubjectAlternativeName entry for the system hostname. The -# SAN list in Go 1.8 is now (correctly) authoritative and since -# openshift-ansible configures masters to talk to etcd hostnames -# rather than IP addresses, we must correct etcd certificates. -# -# This play examines the etcd serving certificate SANs on each etcd -# host and records whether or not the system hostname is missing. -- name: Examine etcd serving certificate SAN - hosts: oo_etcd_to_config - tasks: - - slurp: - src: /etc/etcd/server.crt - register: etcd_serving_cert - - set_fact: - __etcd_cert_lacks_hostname: "{{ (openshift.common.hostname not in (etcd_serving_cert.content | b64decode | lib_utils_oo_parse_certificate_san)) | bool }}" - -# Redeploy etcd certificates when hostnames were missing from etcd -# serving certificate SANs. -- import_playbook: ../../../openshift-etcd/redeploy-certificates.yml - when: - - true in hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config']) | lib_utils_oo_collect('__etcd_cert_lacks_hostname') | default([false]) - - name: Backup and upgrade etcd import_playbook: ../../../openshift-etcd/private/upgrade_main.yml @@ -56,7 +33,6 @@ register: l_pb_upgrade_control_plane_pre_upgrade_storage when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool failed_when: - - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0 - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool @@ -122,7 +98,6 @@ - openshift_upgrade_post_storage_migration_enabled | default(true) | bool - openshift_version is version_compare('3.7','<') failed_when: - - openshift_upgrade_post_storage_migration_enabled | default(true) | bool - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 - openshift_upgrade_post_storage_migration_fatal | default(false) | bool run_once: true @@ -258,7 +233,6 @@ register: l_pb_upgrade_control_plane_post_upgrade_storage when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool failed_when: - - openshift_upgrade_post_storage_migration_enabled | default(true) | bool - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 - openshift_upgrade_post_storage_migration_fatal | default(false) | bool @@ -337,9 +311,13 @@ post_tasks: - import_role: name: openshift_node + tasks_from: upgrade_pre.yml + - import_role: + name: openshift_node tasks_from: upgrade.yml - import_role: name: openshift_manage_node tasks_from: config.yml vars: openshift_master_host: "{{ groups.oo_first_master.0 }}" + openshift_manage_node_is_master: true diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_10/master_config_upgrade.yml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_10/master_config_upgrade.yml @@ -0,0 +1 @@ +--- diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/roles b/playbooks/common/openshift-cluster/upgrades/v3_10/roles new file mode 120000 index 000000000..415645be6 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_10/roles @@ -0,0 +1 @@ +../../../../../roles/
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade.yml new file mode 100644 index 000000000..ec1da6d39 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade.yml @@ -0,0 +1,7 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- import_playbook: upgrade_control_plane.yml + +- import_playbook: upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml new file mode 100644 index 000000000..64ee03562 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml @@ -0,0 +1,58 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- import_playbook: ../init.yml + vars: + l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + +- name: Configure the upgrade target for the common upgrade tasks 3.10 + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tasks: + - meta: clear_facts + - set_fact: + openshift_upgrade_target: '3.10' + openshift_upgrade_min: '3.9' + openshift_release: '3.10' + +- import_playbook: ../pre/config.yml + # These vars a meant to exclude oo_nodes from plays that would otherwise include + # them by default. + vars: + l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master" + l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master" + l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_upgrade_no_proxy_hosts: "oo_masters_to_config" + l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_upgrade_verify_targets_hosts: "oo_masters_to_config" + l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config" + l_upgrade_excluder_hosts: "oo_masters_to_config" + openshift_protect_installed_version: False + +- name: Flag pre-upgrade checks complete for hosts without errors + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - set_fact: + pre_upgrade_complete: True + +- import_playbook: ../upgrade_control_plane.yml + vars: + openshift_release: '3.10' + +- import_playbook: ../post_control_plane.yml + +- hosts: oo_masters + tasks: + - import_role: + name: openshift_web_console + tasks_from: remove_old_asset_config diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml new file mode 100644 index 000000000..eea1b250e --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml @@ -0,0 +1,35 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- import_playbook: ../init.yml + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tasks: + - set_fact: + openshift_upgrade_target: '3.10' + openshift_upgrade_min: '3.9' + openshift_release: '3.10' + +- import_playbook: ../pre/config.yml + vars: + l_upgrade_repo_hosts: "oo_nodes_to_config" + l_upgrade_no_proxy_hosts: "oo_all_hosts" + l_upgrade_health_check_hosts: "oo_nodes_to_config" + l_upgrade_verify_targets_hosts: "oo_nodes_to_config" + l_upgrade_docker_target_hosts: "oo_nodes_to_config" + l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config" + l_upgrade_nodes_only: True + +- name: Flag pre-upgrade checks complete for hosts without errors + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - set_fact: + pre_upgrade_complete: True + +# Pre-upgrade completed + +- import_playbook: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_10/validator.yml new file mode 100644 index 000000000..d8540abfb --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_10/validator.yml @@ -0,0 +1,7 @@ +--- +- name: Verify 3.8 specific upgrade checks + hosts: oo_first_master + roles: + - { role: lib_openshift } + tasks: + - debug: msg="noop" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml index b3bc121d7..8792295c6 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml @@ -26,6 +26,7 @@ openshift_upgrade_min: '3.7' openshift_release: '3.8' _requested_pkg_version: "{{ openshift_pkg_version if openshift_pkg_version is defined else omit }}" + openshift_pkg_version: '' _requested_image_tag: "{{ openshift_image_tag if openshift_image_tag is defined else omit }}" l_double_upgrade_cp: True when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<') @@ -61,10 +62,8 @@ # Pre-upgrade completed -- import_playbook: ../upgrade_control_plane.yml - vars: - openshift_release: '3.8' - openshift_pkg_version: '' +- name: Intermediate 3.8 Upgrade + import_playbook: ../upgrade_control_plane.yml when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<') ## 3.8 upgrade complete we should now be able to upgrade to 3.9 @@ -77,7 +76,7 @@ openshift_upgrade_target: '3.9' openshift_upgrade_min: '3.8' openshift_release: '3.9' - openshift_pkg_version: "{{ _requested_pkg_version | default ('-3.9*') }}" + openshift_pkg_version: "{{ _requested_pkg_version if _requested_pkg_version is defined else '' }}" # Set the user's specified image_tag for 3.9 upgrade if it was provided. - set_fact: openshift_image_tag: "{{ _requested_image_tag }}" @@ -106,6 +105,7 @@ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config" l_upgrade_excluder_hosts: "oo_masters_to_config" openshift_protect_installed_version: False + openshift_version_reinit: True - name: Flag pre-upgrade checks complete for hosts without errors hosts: oo_masters_to_config:oo_etcd_to_config @@ -114,8 +114,6 @@ pre_upgrade_complete: True - import_playbook: ../upgrade_control_plane.yml - vars: - openshift_release: '3.9' # All controllers must be stopped at the same time then restarted - name: Cycle all controller services to force new leader election mode diff --git a/playbooks/openshift-etcd/private/upgrade_main.yml b/playbooks/openshift-etcd/private/upgrade_main.yml index 8997680f9..fea588260 100644 --- a/playbooks/openshift-etcd/private/upgrade_main.yml +++ b/playbooks/openshift-etcd/private/upgrade_main.yml @@ -1,4 +1,37 @@ --- +# Prior to 3.6, openshift-ansible created etcd serving certificates +# without a SubjectAlternativeName entry for the system hostname. The +# SAN list in Go 1.8 is now (correctly) authoritative and since +# openshift-ansible configures masters to talk to etcd hostnames +# rather than IP addresses, we must correct etcd certificates. +# +# This play examines the etcd serving certificate SANs on each etcd +# host and records whether or not the system hostname is missing. +- name: Examine etcd serving certificate SAN + hosts: oo_etcd_to_config + tasks: + - slurp: + src: /etc/etcd/server.crt + register: etcd_serving_cert + - set_fact: + __etcd_cert_lacks_hostname: "{{ (openshift.common.hostname not in (etcd_serving_cert.content | b64decode | lib_utils_oo_parse_certificate_san)) | bool }}" + +# Redeploy etcd certificates when hostnames were missing from etcd +# serving certificate SANs. +- import_playbook: redeploy-certificates.yml + when: + - true in hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config']) | lib_utils_oo_collect('__etcd_cert_lacks_hostname') | default([false]) + +- import_playbook: restart.yml + vars: + g_etcd_certificates_expired: "{{ ('expired' in (hostvars | lib_utils_oo_select_keys(groups['etcd']) | lib_utils_oo_collect('check_results.check_results.etcd') | lib_utils_oo_collect('health'))) | bool }}" + when: + - true in hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config']) | lib_utils_oo_collect('__etcd_cert_lacks_hostname') | default([false]) + +- import_playbook: ../../openshift-master/private/restart.yml + when: + - true in hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config']) | lib_utils_oo_collect('__etcd_cert_lacks_hostname') | default([false]) + # For 1.4/3.4 we want to upgrade everyone to etcd-3.0. etcd docs say to # upgrade from 2.0.x to 2.1.x to 2.2.x to 2.3.x to 3.0.x. While this is a tedius # task for RHEL and CENTOS it's simply not possible in Fedora unless you've diff --git a/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml b/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml index b817221b8..d88209593 100644 --- a/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml +++ b/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml @@ -88,8 +88,7 @@ - name: Redeploy docker registry command: > - {{ openshift_client_binary }} deploy dc/docker-registry - --latest + {{ openshift_client_binary }} rollout latest dc/docker-registry --config={{ mktemp.stdout }}/admin.kubeconfig -n default diff --git a/playbooks/openshift-hosted/private/redeploy-router-certificates.yml b/playbooks/openshift-hosted/private/redeploy-router-certificates.yml index 0df748f47..952a5f4ee 100644 --- a/playbooks/openshift-hosted/private/redeploy-router-certificates.yml +++ b/playbooks/openshift-hosted/private/redeploy-router-certificates.yml @@ -129,8 +129,7 @@ - name: Redeploy router command: > - {{ openshift_client_binary }} deploy dc/router - --latest + {{ openshift_client_binary }} rollout latest dc/router --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig -n default diff --git a/playbooks/openshift-logging/private/config.yml b/playbooks/openshift-logging/private/config.yml index 07aa8bfde..f2a57f9f8 100644 --- a/playbooks/openshift-logging/private/config.yml +++ b/playbooks/openshift-logging/private/config.yml @@ -11,6 +11,38 @@ status: "In Progress" start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" +- name: Update vm.max_map_count for ES 5.x + hosts: all + gather_facts: false + tasks: + - when: + - openshift_logging_es5_techpreview | default(false) | bool + - openshift_deployment_type in ['origin'] + block: + - name: Checking vm max_map_count value + command: + cat /proc/sys/vm/max_map_count + register: _vm_max_map_count + + - stat: + path: /etc/sysctl.d/99-elasticsearch.conf + register: _99_es_conf + + - name: Check for current value of vm.max_map_count in 99-elasticsearch.conf + command: > + sed /etc/sysctl.d/99-elasticsearch.conf -e 's/vm.max_map_count=\(.*\)/\1/' + register: _curr_vm_max_map_count + when: _99_es_conf.stat.exists + + - name: Updating vm.max_map_count value + sysctl: + name: vm.max_map_count + value: 262144 + sysctl_file: "/etc/sysctl.d/99-elasticsearch.conf" + reload: yes + when: + - _vm_max_map_count.stdout | default(0) | int < 262144 | int or _curr_vm_max_map_count.stdout | default(0) | int < 262144 + - name: OpenShift Aggregated Logging hosts: oo_first_master roles: @@ -20,11 +52,10 @@ - name: Update Master configs hosts: oo_masters:!oo_first_master tasks: - - block: - - import_role: - name: openshift_logging - tasks_from: update_master_config - when: not openshift.common.version_gte_3_9 + - include_role: + name: openshift_logging + tasks_from: update_master_config + when: not openshift.common.version_gte_3_9 - name: Logging Install Checkpoint End hosts: all |