diff options
Diffstat (limited to 'playbooks')
50 files changed, 544 insertions, 85 deletions
diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml index d60b68885..8d64b0521 100644 --- a/playbooks/aws/openshift-cluster/config.yml +++ b/playbooks/aws/openshift-cluster/config.yml @@ -33,5 +33,6 @@ openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}" os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}" openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}" + openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}" openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}" openshift_use_dnsmasq: false diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml index 86eff4ca4..4db0720d0 100644 --- a/playbooks/byo/openshift-cluster/config.yml +++ b/playbooks/byo/openshift-cluster/config.yml @@ -7,5 +7,4 @@ vars: openshift_cluster_id: "{{ cluster_id | default('default') }}" openshift_debug_level: "{{ debug_level | default(2) }}" - openshift_deployment_type: "{{ deployment_type }}" openshift_deployment_subtype: "{{ deployment_subtype | default(none) }}" diff --git a/playbooks/byo/openshift-cluster/openshift-logging.yml b/playbooks/byo/openshift-cluster/openshift-logging.yml index eebfcd20d..f8eebe898 100644 --- a/playbooks/byo/openshift-cluster/openshift-logging.yml +++ b/playbooks/byo/openshift-cluster/openshift-logging.yml @@ -32,4 +32,3 @@ vars: openshift_cluster_id: "{{ cluster_id | default('default') }}" openshift_debug_level: "{{ debug_level | default(2) }}" - openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml index 4ee6afe2a..304559f6e 100644 --- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -28,7 +28,7 @@ tasks: - name: Mark node unschedulable - oadm_manage_node: + oc_adm_manage_node: node: "{{ openshift.node.nodename | lower }}" schedulable: False delegate_to: "{{ groups.oo_first_master.0 }}" @@ -51,7 +51,7 @@ when: l_docker_upgrade is defined and l_docker_upgrade | bool - name: Set node schedulability - oadm_manage_node: + oc_adm_manage_node: node: "{{ openshift.node.nodename | lower }}" schedulable: True delegate_to: "{{ groups.oo_first_master.0 }}" diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml index d5fd7c424..5feb33be4 100644 --- a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml @@ -30,7 +30,6 @@ g_new_master_hosts: [] g_new_node_hosts: [] openshift_cluster_id: "{{ cluster_id | default('default') }}" - openshift_deployment_type: "{{ deployment_type }}" - include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml index e4db65b02..86f5a36ca 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml @@ -50,6 +50,8 @@ tags: - pre_upgrade +# Note: During upgrade the openshift excluder is not unexcluded inside the initialize_openshift_version.yml play. +# So it is necassary to run the play after running disable_excluder.yml. - include: ../../../../common/openshift-cluster/initialize_openshift_version.yml tags: - pre_upgrade diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_6/README.md new file mode 100644 index 000000000..930cc753c --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/README.md @@ -0,0 +1,18 @@ +# v3.5 Major and Minor Upgrade Playbook + +## Overview +This playbook currently performs the +following steps. + + * Upgrade and restart master services + * Unschedule node. + * Upgrade and restart docker + * Upgrade and restart node services + * Modifies the subset of the configuration necessary + * Applies the latest cluster policies + * Updates the default router if one exists + * Updates the default registry if one exists + * Updates image streams and quickstarts + +## Usage +ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/roles b/playbooks/byo/openshift-cluster/upgrades/v3_6/roles new file mode 120000 index 000000000..6bc1a7aef --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/roles @@ -0,0 +1 @@ +../../../../../roles
\ No newline at end of file diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml new file mode 100644 index 000000000..900bbc8d8 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -0,0 +1,111 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../../../../common/openshift-cluster/upgrades/init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: l_oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.6' + openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + +# Pre-upgrade + +- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos and initialize facts on all hosts + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml + tags: + - pre_upgrade + +# Note: During upgrade the openshift excluder is not unexcluded inside the initialize_openshift_version.yml play. +# So it is necassary to run the play after running disable_excluder.yml. +- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/upgrades/v3_6/validator.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml + +- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml + +- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml + +- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml + +- include: ../../../../common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml new file mode 100644 index 000000000..5bd0f7ac5 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -0,0 +1,115 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../../../../common/openshift-cluster/upgrades/init.yml + tags: + - pre_upgrade + +# Configure the upgrade target for the common upgrade tasks: +- hosts: l_oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.6' + openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + +# Pre-upgrade +- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on control plane hosts + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/upgrades/v3_6/validator.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml + +- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml + +- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml + +- include: ../../../../common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml new file mode 100644 index 000000000..96d89dbdd --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml @@ -0,0 +1,104 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../../../../common/openshift-cluster/upgrades/init.yml + tags: + - pre_upgrade + +# Configure the upgrade target for the common upgrade tasks: +- hosts: l_oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.6' + openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + +# Pre-upgrade +- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on nodes + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + tags: + - pre_upgrade + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- name: Verify masters are already upgraded + hosts: oo_masters_to_config + tags: + - pre_upgrade + tasks: + - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." + when: openshift.common.version != openshift_version + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_nodes_to_upgrade + tasks: + - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml + +- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-etcd/restart.yml b/playbooks/byo/openshift-etcd/restart.yml index 6713f07e3..19403116d 100644 --- a/playbooks/byo/openshift-etcd/restart.yml +++ b/playbooks/byo/openshift-etcd/restart.yml @@ -4,5 +4,3 @@ - always - include: ../../common/openshift-etcd/restart.yml - vars: - openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/byo/openshift-master/restart.yml b/playbooks/byo/openshift-master/restart.yml index 2d20f69f4..21e4cff1b 100644 --- a/playbooks/byo/openshift-master/restart.yml +++ b/playbooks/byo/openshift-master/restart.yml @@ -4,5 +4,3 @@ - always - include: ../../common/openshift-master/restart.yml - vars: - openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/byo/openshift-master/scaleup.yml b/playbooks/byo/openshift-master/scaleup.yml index 7075bb59e..a5705e990 100644 --- a/playbooks/byo/openshift-master/scaleup.yml +++ b/playbooks/byo/openshift-master/scaleup.yml @@ -27,4 +27,3 @@ vars: openshift_cluster_id: "{{ cluster_id | default('default') }}" openshift_debug_level: "{{ debug_level | default(2) }}" - openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/byo/openshift-node/restart.yml b/playbooks/byo/openshift-node/restart.yml index 3985a83bb..6861625b9 100644 --- a/playbooks/byo/openshift-node/restart.yml +++ b/playbooks/byo/openshift-node/restart.yml @@ -4,5 +4,3 @@ - always - include: ../../common/openshift-node/restart.yml - vars: - openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/byo/openshift-node/scaleup.yml b/playbooks/byo/openshift-node/scaleup.yml index 2b10b6c76..88d236b53 100644 --- a/playbooks/byo/openshift-node/scaleup.yml +++ b/playbooks/byo/openshift-node/scaleup.yml @@ -27,6 +27,5 @@ vars: openshift_cluster_id: "{{ cluster_id | default('default') }}" openshift_debug_level: "{{ debug_level | default(2) }}" - openshift_deployment_type: "{{ deployment_type }}" openshift_master_etcd_hosts: "{{ groups.etcd | default([]) }}" openshift_master_etcd_port: 2379 diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml index 65c0b1c01..8c6d77024 100644 --- a/playbooks/byo/rhel_subscribe.yml +++ b/playbooks/byo/rhel_subscribe.yml @@ -5,8 +5,6 @@ - name: Subscribe hosts, update repos and update OS packages hosts: l_oo_all_hosts - vars: - openshift_deployment_type: "{{ deployment_type }}" roles: - role: rhel_subscribe when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and diff --git a/playbooks/certificate_expiry/easy-mode-upload.yaml b/playbooks/certificate_expiry/easy-mode-upload.yaml new file mode 100644 index 000000000..378d1f154 --- /dev/null +++ b/playbooks/certificate_expiry/easy-mode-upload.yaml @@ -0,0 +1,40 @@ +# This example generates HTML and JSON reports and +# +# Copies of the generated HTML and JSON reports are uploaded to the masters, +# which is particularly useful when this playbook is run from a container. +# +# All certificates (healthy or not) are included in the results +# +# Optional environment variables to alter the behaviour of the playbook: +# CERT_EXPIRY_WARN_DAYS: Length of the warning window in days (45) +# COPY_TO_PATH: path to copy reports to in the masters (/etc/origin/certificate_expiration_report) +--- +- name: Generate certificate expiration reports + hosts: nodes:masters:etcd + gather_facts: no + vars: + openshift_certificate_expiry_save_json_results: yes + openshift_certificate_expiry_generate_html_report: yes + openshift_certificate_expiry_show_all: yes + openshift_certificate_expiry_warning_days: "{{ lookup('env', 'CERT_EXPIRY_WARN_DAYS') | default('45', true) }}" + roles: + - role: openshift_certificate_expiry + +- name: Upload reports to master + hosts: masters + gather_facts: no + vars: + destination_path: "{{ lookup('env', 'COPY_TO_PATH') | default('/etc/origin/certificate_expiration_report', true) }}" + timestamp: "{{ lookup('pipe', 'date +%Y%m%d') }}" + tasks: + - name: Ensure that the target directory exists + file: + path: "{{ destination_path }}" + state: directory + - name: Copy the reports + copy: + dest: "{{ destination_path }}/{{ timestamp }}-{{ item }}" + src: "/tmp/{{ item }}" + with_items: + - "cert-expiry-report.html" + - "cert-expiry-report.json" diff --git a/playbooks/certificate_expiry/html_and_json_timestamp.yaml b/playbooks/certificate_expiry/html_and_json_timestamp.yaml new file mode 100644 index 000000000..2189455b7 --- /dev/null +++ b/playbooks/certificate_expiry/html_and_json_timestamp.yaml @@ -0,0 +1,16 @@ +--- +# Generate timestamped HTML and JSON reports in /var/lib/certcheck + +- name: Check cert expirys + hosts: nodes:masters:etcd + become: yes + gather_facts: no + vars: + openshift_certificate_expiry_generate_html_report: yes + openshift_certificate_expiry_save_json_results: yes + openshift_certificate_expiry_show_all: yes + timestamp: "{{ lookup('pipe', 'date +%Y%m%d') }}" + openshift_certificate_expiry_html_report_path: "/var/lib/certcheck/{{ timestamp }}-cert-expiry-report.html" + openshift_certificate_expiry_json_results_path: "/var/lib/certcheck/{{ timestamp }}-cert-expiry-report.json" + roles: + - role: openshift_certificate_expiry diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 82f711f40..1b967b7f1 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -27,9 +27,6 @@ when: openshift_docker_selinux_enabled is not defined - include: disable_excluder.yml - vars: - # the excluders needs to be disabled no matter what status says - with_status_check: false tags: - always @@ -60,3 +57,7 @@ - include: openshift_hosted.yml tags: - hosted + +- include: reset_excluder.yml + tags: + - always diff --git a/playbooks/common/openshift-cluster/disable_excluder.yml b/playbooks/common/openshift-cluster/disable_excluder.yml index b2e025cb8..f664c51c9 100644 --- a/playbooks/common/openshift-cluster/disable_excluder.yml +++ b/playbooks/common/openshift-cluster/disable_excluder.yml @@ -1,6 +1,6 @@ --- -- name: Record excluder state and disable - hosts: l_oo_all_hosts +- name: Disable excluders + hosts: oo_masters_to_config:oo_nodes_to_config gather_facts: no tasks: diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml index ca5177852..5425f448f 100644 --- a/playbooks/common/openshift-cluster/enable_dnsmasq.yml +++ b/playbooks/common/openshift-cluster/enable_dnsmasq.yml @@ -56,8 +56,6 @@ - role: node local_facts: dns_ip: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}" - vars: - openshift_deployment_type: "{{ deployment_type }}" roles: - openshift_node_dnsmasq post_tasks: diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml index 18f99728c..9cebecd68 100644 --- a/playbooks/common/openshift-cluster/initialize_facts.yml +++ b/playbooks/common/openshift-cluster/initialize_facts.yml @@ -15,5 +15,3 @@ hostname: "{{ openshift_hostname | default(None) }}" - set_fact: openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" - - set_fact: - openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml index 7f37c606f..07b38920f 100644 --- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml +++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml @@ -18,13 +18,6 @@ msg: Incompatible versions of yum and subscription-manager found. You may need to update yum and yum-utils. when: "not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout" -- include: disable_excluder.yml - vars: - # the excluders needs to be disabled no matter what status says - with_status_check: false - tags: - - always - - name: Determine openshift_version to configure on first master hosts: oo_first_master roles: diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml index 06cda36a5..5db71b857 100644 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -53,6 +53,8 @@ pre_tasks: - set_fact: openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" + - set_fact: + openshift_metrics_hawkular_hostname: "{{ g_metrics_hostname | default('hawkular-metrics.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" tasks: - block: @@ -60,3 +62,9 @@ name: openshift_logging tasks_from: update_master_config when: openshift_hosted_logging_deploy | default(false) | bool + + - block: + - include_role: + name: openshift_metrics + tasks_from: update_master_config + when: openshift_hosted_metrics_deploy | default(false) | bool diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml index 2af699209..cbb4a2434 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml @@ -31,7 +31,7 @@ - name: Generate new etcd CA hosts: oo_first_etcd roles: - - role: etcd_ca + - role: openshift_etcd_ca etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml index 6771cc98d..8c8062585 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml @@ -48,10 +48,6 @@ # Replace dc/docker-registry certificate secret contents if set. - block: - - name: Load lib_openshift modules - include_role: - name: lib_openshift - - name: Retrieve registry service IP oc_service: namespace: default @@ -70,9 +66,12 @@ --signer-cert={{ openshift.common.config_base }}/master/ca.crt --signer-key={{ openshift.common.config_base }}/master/ca.key --signer-serial={{ openshift.common.config_base }}/master/ca.serial.txt - --hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}" + --hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc,docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}" --cert={{ openshift.common.config_base }}/master/registry.crt --key={{ openshift.common.config_base }}/master/registry.key + {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %} + --expire-days={{ openshift_hosted_registry_cert_expire_days | default(730) }} + {% endif %} - name: Update registry certificates secret oc_secret: diff --git a/playbooks/common/openshift-cluster/reset_excluder.yml b/playbooks/common/openshift-cluster/reset_excluder.yml index 7c544ee32..eaa8ce39c 100644 --- a/playbooks/common/openshift-cluster/reset_excluder.yml +++ b/playbooks/common/openshift-cluster/reset_excluder.yml @@ -1,6 +1,6 @@ --- - name: Re-enable excluder if it was previously enabled - hosts: l_oo_all_hosts + hosts: oo_masters_to_config:oo_nodes_to_config gather_facts: no tasks: - include_role: diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml index 078991b12..74cc1d527 100644 --- a/playbooks/common/openshift-cluster/std_include.yml +++ b/playbooks/common/openshift-cluster/std_include.yml @@ -22,8 +22,6 @@ - always tasks: - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml - - set_fact: - openshift_deployment_type: "{{ deployment_type }}" - include: evaluate_groups.yml tags: diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml index b83e4d821..be956fca5 100644 --- a/playbooks/common/openshift-cluster/update_repos_and_packages.yml +++ b/playbooks/common/openshift-cluster/update_repos_and_packages.yml @@ -3,8 +3,6 @@ - name: Subscribe hosts, update repos and update OS packages hosts: oo_hosts_to_update - vars: - openshift_deployment_type: "{{ deployment_type }}" roles: # Explicitly calling openshift_facts because it appears that when # rhel_subscribe is skipped that the openshift_facts dependency for diff --git a/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml b/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml index 2a85dc92e..d1e431c5e 100644 --- a/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml +++ b/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml @@ -1,6 +1,6 @@ --- - name: Record excluder state and disable - hosts: l_oo_all_hosts + hosts: oo_masters_to_config:oo_nodes_to_config gather_facts: no tasks: - include: pre/validate_excluder.yml diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml index a3b8c489e..01d151eb9 100644 --- a/playbooks/common/openshift-cluster/upgrades/init.yml +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -29,7 +29,6 @@ g_new_master_hosts: [] g_new_node_hosts: [] openshift_cluster_id: "{{ cluster_id | default('default') }}" - openshift_deployment_type: "{{ deployment_type }}" - name: Set oo_options hosts: oo_all_hosts @@ -71,8 +70,8 @@ tasks: - name: Check if iptables is running command: systemctl status iptables - ignore_errors: true changed_when: false + failed_when: false register: service_iptables_status - name: Set fact os_firewall_use_firewalld FALSE for iptables diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index 6f096f705..c00795a8d 100644 --- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -5,7 +5,6 @@ - name: Upgrade default router and default registry hosts: oo_first_master vars: - openshift_deployment_type: "{{ deployment_type }}" registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', openshift_image_tag ) }}" router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', openshift_image_tag ) }}" oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig" diff --git a/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml b/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml index 5078638b7..6de1ed061 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml @@ -3,20 +3,27 @@ # - repoquery_cmd # - excluder # - openshift_upgrade_target -- name: Get available excluder version - command: > - {{ repoquery_cmd }} --qf '%{version}' "{{ excluder }}" - register: excluder_version - failed_when: false - changed_when: false +- block: + - name: Get available excluder version + command: > + {{ repoquery_cmd }} --qf '%{version}' "{{ excluder }}" + register: excluder_version + failed_when: false + changed_when: false -- name: Docker excluder version detected - debug: - msg: "{{ excluder }}: {{ excluder_version.stdout }}" + - name: Docker excluder version detected + debug: + msg: "{{ excluder }}: {{ excluder_version.stdout }}" -- name: Check the available {{ excluder }} version is at most of the upgrade target version - fail: - msg: "Available {{ excluder }} version {{ excluder_version.stdout }} is higher than the upgrade target version {{ openshift_upgrade_target }}" - when: + - name: Printing upgrade target version + debug: + msg: "{{ openshift_upgrade_target }}" + + - name: Check the available {{ excluder }} version is at most of the upgrade target version + fail: + msg: "Available {{ excluder }} version {{ excluder_version.stdout }} is higher than the upgrade target version" + when: - "{{ excluder_version.stdout != '' }}" - - "{{ excluder_version.stdout.split('.')[0:2] | join('.') | version_compare(openshift_upgrade_target, '>', strict=True) }}" + - "{{ excluder_version.stdout.split('.')[0:2] | join('.') | version_compare(openshift_upgrade_target.split('.')[0:2] | join('.'), '>', strict=True) }}" + when: + - not openshift.common.is_atomic | bool diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml index df2b664d4..03ac02e9f 100644 --- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml @@ -1,7 +1,26 @@ --- # We verified latest rpm available is suitable, so just yum update. -- name: Upgrade packages - package: "name={{ openshift.common.service_type }}-{{ component }}{{ openshift_pkg_version }} state=present" + +# Master package upgrade ends up depending on node and sdn packages, we need to be explicit +# with all versions to avoid yum from accidentally jumping to something newer than intended: +- name: Upgrade master packages + package: name={{ item }} state=present + when: component == "master" + with_items: + - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" + +- name: Upgrade node packages + package: name={{ item }} state=present + when: component == "node" + with_items: + - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" - name: Ensure python-yaml present for config upgrade package: name=PyYAML state=present diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index babb7191d..c6e799261 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -64,6 +64,7 @@ static: yes roles: - openshift_facts + - lib_utils post_tasks: # Run the pre-upgrade hook if defined: @@ -113,6 +114,13 @@ state: link when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists + - name: Update oreg value + yedit: + src: "{{ openshift.common.config_base }}/master/master-config.yaml" + key: 'imageConfig.format' + value: "{{ oreg_url }}" + when: oreg_url is defined + # Run the upgrade hook prior to restarting services/system if defined: - debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}" when: openshift_master_upgrade_hook is defined @@ -262,7 +270,7 @@ # or docker actually needs an upgrade before proceeding. Perhaps best to save this until # we merge upgrade functionality into the base roles and a normal config.yml playbook run. - name: Mark node unschedulable - oadm_manage_node: + oc_adm_manage_node: node: "{{ openshift.node.nodename | lower }}" schedulable: False delegate_to: "{{ groups.oo_first_master.0 }}" @@ -284,7 +292,7 @@ post_tasks: - name: Set node schedulability - oadm_manage_node: + oc_adm_manage_node: node: "{{ openshift.node.nodename | lower }}" schedulable: True delegate_to: "{{ groups.oo_first_master.0 }}" diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index 4e1838c71..e9f894942 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -15,7 +15,7 @@ # or docker actually needs an upgrade before proceeding. Perhaps best to save this until # we merge upgrade functionality into the base roles and a normal config.yml playbook run. - name: Mark node unschedulable - oadm_manage_node: + oc_adm_manage_node: node: "{{ openshift.node.nodename | lower }}" schedulable: False delegate_to: "{{ groups.oo_first_master.0 }}" @@ -37,7 +37,7 @@ post_tasks: - name: Set node schedulability - oadm_manage_node: + oc_adm_manage_node: node: "{{ openshift.node.nodename | lower }}" schedulable: True delegate_to: "{{ groups.oo_first_master.0 }}" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml index 9c126033c..ae63c9ca9 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml @@ -35,7 +35,7 @@ kind: petsets register: l_do_petsets_exist - - name: FAIL ON Resource migration 'PetSets' unsupported + - name: Fail on unsupported resource migration 'PetSets' fail: msg: > PetSet objects were detected in your cluster. These are an @@ -59,9 +59,9 @@ migrating to StatefulSets, run this command as a user with cluster-admin privileges: - $ oc get petsets --all-namespaces -o yaml | oc delete -f - --cascale=false + $ oc get petsets --all-namespaces -o yaml | oc delete -f - --cascade=false when: # Search did not fail, valid resource type found - - l_do_petsets_exist.results.returncode == "0" + - l_do_petsets_exist.results.returncode == 0 # Items do exist in the search results - l_do_petsets_exist.results.results.0['items'] | length > 0 diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins new file mode 120000 index 000000000..7de3c1dd7 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins @@ -0,0 +1 @@ +../../../../../filter_plugins/
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/roles b/playbooks/common/openshift-cluster/upgrades/v3_6/roles new file mode 120000 index 000000000..415645be6 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/roles @@ -0,0 +1 @@ +../../../../../roles/
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml new file mode 100644 index 000000000..48c69eccd --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml @@ -0,0 +1,18 @@ +--- +############################################################################### +# Post upgrade - Upgrade job storage +############################################################################### +- name: Upgrade job storage + hosts: oo_first_master + roles: + - { role: openshift_cli } + vars: + # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe + # restart. + skip_docker_role: True + tasks: + - name: Upgrade job storage + command: > + {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig + migrate storage --include=jobs --confirm + run_once: true diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml new file mode 100644 index 000000000..ac5704f69 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml @@ -0,0 +1,10 @@ +--- +############################################################################### +# Pre upgrade checks for known data problems, if this playbook fails you should +# contact support. If you're not supported contact users@lists.openshift.com +############################################################################### +- name: Verify 3.6 specific upgrade checks + hosts: oo_first_master + roles: + - { role: lib_openshift } + tasks: [] diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 7a334e771..60cf56108 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -48,12 +48,6 @@ - set_fact: openshift_hosted_metrics_resolution: "{{ lookup('oo_option', 'openshift_hosted_metrics_resolution') | default('10s', true) }}" when: openshift_hosted_metrics_resolution is not defined - - set_fact: - openshift_hosted_metrics_deployer_prefix: "{{ lookup('oo_option', 'openshift_hosted_metrics_deployer_prefix') | default('openshift') }}" - when: openshift_hosted_metrics_deployer_prefix is not defined - - set_fact: - openshift_hosted_metrics_deployer_version: "{{ lookup('oo_option', 'openshift_hosted_metrics_deployer_version') | default('latest') }}" - when: openshift_hosted_metrics_deployer_version is not defined roles: - openshift_facts post_tasks: @@ -127,6 +121,10 @@ etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}" etcd_cert_config_dir: "{{ openshift.common.config_base }}/master" etcd_cert_prefix: "master.etcd-" + - role: nuage_master + when: openshift.common.use_nuage | bool + - role: calico_master + when: openshift.common.use_calico | bool post_tasks: - name: Create group for deployment type diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml index 18e5c665f..92f16dc47 100644 --- a/playbooks/common/openshift-master/scaleup.yml +++ b/playbooks/common/openshift-master/scaleup.yml @@ -60,8 +60,16 @@ - openshift_facts - openshift_docker +- include: ../openshift-cluster/disable_excluder.yml + tags: + - always + - include: ../openshift-master/config.yml - include: ../openshift-loadbalancer/config.yml - include: ../openshift-node/config.yml + +- include: ../openshift-cluster/reset_excluder.yml + tags: + - always diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 6c5a299c1..792ffb4e2 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -82,6 +82,8 @@ etcd_cert_subdir: "openshift-node-{{ openshift.common.hostname }}" etcd_cert_config_dir: "{{ openshift.common.config_base }}/node" when: openshift.common.use_flannel | bool + - role: calico + when: openshift.common.use_calico | bool - role: nuage_node when: openshift.common.use_nuage | bool - role: contiv diff --git a/playbooks/common/openshift-node/scaleup.yml b/playbooks/common/openshift-node/scaleup.yml index bb3b1e780..c31aca62b 100644 --- a/playbooks/common/openshift-node/scaleup.yml +++ b/playbooks/common/openshift-node/scaleup.yml @@ -27,4 +27,12 @@ - openshift_facts - openshift_docker +- include: ../openshift-cluster/disable_excluder.yml + tags: + - always + - include: ../openshift-node/config.yml + +- include: ../openshift-cluster/reset_excluder.yml + tags: + - always diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml index 8e46c5919..2625d4d05 100644 --- a/playbooks/gce/openshift-cluster/config.yml +++ b/playbooks/gce/openshift-cluster/config.yml @@ -32,4 +32,5 @@ openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}" os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}" openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}" + openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}" openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}" diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml index 44b0f5a3c..f782d6dab 100644 --- a/playbooks/libvirt/openshift-cluster/config.yml +++ b/playbooks/libvirt/openshift-cluster/config.yml @@ -33,5 +33,6 @@ openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}" os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}" openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}" + openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}" openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}" openshift_use_dnsmasq: false diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml index 1366c83ca..f9ddb9469 100644 --- a/playbooks/openstack/openshift-cluster/config.yml +++ b/playbooks/openstack/openshift-cluster/config.yml @@ -29,4 +29,5 @@ openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}" os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}" openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}" + openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}" openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}" diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml index 20ce47c07..82329eac1 100644 --- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml +++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml @@ -340,16 +340,6 @@ resources: port_range_max: 10250 remote_mode: remote_group_id - direction: ingress - protocol: tcp - port_range_min: 10255 - port_range_max: 10255 - remote_mode: remote_group_id - - direction: ingress - protocol: udp - port_range_min: 10255 - port_range_max: 10255 - remote_mode: remote_group_id - - direction: ingress protocol: udp port_range_min: 4789 port_range_max: 4789 |