diff options
author | Brenton Leanhardt <bleanhar@redhat.com> | 2016-03-29 15:00:54 -0400 |
---|---|---|
committer | Brenton Leanhardt <bleanhar@redhat.com> | 2016-03-29 15:00:54 -0400 |
commit | 7b42f91f8637f6c3ce49f357bea75a3e6528b1ba (patch) | |
tree | 9d2e2cc1605034debe7cf917af5e74f5838427f8 | |
parent | 6d74968b30f3dff2046ab50330c2de55ac103a8b (diff) | |
parent | e6e5c64508ff876973cc5db768012ec836aea31e (diff) | |
download | openshift-7b42f91f8637f6c3ce49f357bea75a3e6528b1ba.tar.gz openshift-7b42f91f8637f6c3ce49f357bea75a3e6528b1ba.tar.bz2 openshift-7b42f91f8637f6c3ce49f357bea75a3e6528b1ba.tar.xz openshift-7b42f91f8637f6c3ce49f357bea75a3e6528b1ba.zip |
Merge pull request #1666 from brenton/systemd1
Reusing the systemd templates for the upgrade
11 files changed, 209 insertions, 163 deletions
diff --git a/playbooks/common/openshift-cluster/upgrades/files/ensure_system_units_have_version.sh b/playbooks/common/openshift-cluster/upgrades/files/ensure_system_units_have_version.sh deleted file mode 100644 index 239f43314..000000000 --- a/playbooks/common/openshift-cluster/upgrades/files/ensure_system_units_have_version.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash -set -e - -SERVICE_TYPE=$1 -DEPLOYMENT_TYPE=$2 -VERSION="v${3}" - -add_image_version_to_sysconfig () { - unit_name=$2 - sysconfig_file=/etc/sysconfig/${unit_name} - - if ! grep IMAGE_VERSION ${sysconfig_file}; then - sed -i "/CONFIG_FILE/a IMAGE_VERSION=${1}" ${sysconfig_file} - else - sed -i "s/\(IMAGE_VERSION=\).*/\1${1}/" ${sysconfig_file} - fi -} - -add_image_version_to_unit () { - deployment_type=$1 - unit_file=$2 - - if ! grep IMAGE_VERSION $unit_file; then - image_namespace="openshift/" - if [ $deployment_type == "atomic-enterprise" ]; then - image_namespace="aep3/" - elif [ $deployment_type == "openshift-enterprise" ]; then - image_namespace="openshift3/" - fi - - sed -i "s|\(${image_namespace}[a-zA-Z0-9]\+\)|\1:\${IMAGE_VERSION}|" $unit_file - fi -} - -for unit_file in $(ls /etc/systemd/system/${SERVICE_TYPE}*.service); do - unit_name=$(basename -s .service ${unit_file}) - add_image_version_to_sysconfig $VERSION $unit_name - add_image_version_to_unit $DEPLOYMENT_TYPE $unit_file -done - -if [ -e /etc/sysconfig/openvswitch ]; then - add_image_version_to_sysconfig $VERSION openvswitch -else - echo IMAGE_VERSION=${VERSION} > /etc/sysconfig/openvswitch -fi -if ! grep EnvironmentFile /etc/systemd/system/openvswitch.service > /dev/null; then - sed -i "/Service/a EnvironmentFile=/etc/sysconfig/openvswitch" /etc/systemd/system/openvswitch.service -fi -add_image_version_to_unit $DEPLOYMENT_TYPE /etc/systemd/system/openvswitch.service - -systemctl daemon-reload diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml index 696994688..cc587bfa1 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml @@ -1,8 +1,10 @@ -- name: Update system_units - script: ../files/ensure_system_units_have_version.sh {{ openshift.common.service_type }} {{ openshift.common.deployment_type }} {{ g_new_version }} +- include_vars: ../../../../../roles/openshift_node/vars/main.yml + +- name: Update systemd units + include: ../../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version=g_aos_versions.avail_version - name: Verifying the correct version was configured - command: grep {{ verify_upgrade_version }} {{ item }} + shell: grep {{ verify_upgrade_version }} {{ item }} with_items: - /etc/sysconfig/openvswitch - /etc/sysconfig/{{ openshift.common.service_type }}* diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml index 668a80996..d491575f8 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml @@ -47,32 +47,34 @@ - name: Verify upgrade can proceed hosts: oo_masters_to_config - vars: - openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" + roles: + - openshift_facts tasks: - name: Ensure Master is running service: name: "{{ openshift.common.service_type }}-master" state: started enabled: yes - when: not openshift_master_ha | bool and openshift.common.is_containerized | bool + when: openshift.master.ha is defined and not openshift.master.ha | bool and openshift.common.is_containerized | bool - name: Ensure HA Master is running service: name: "{{ openshift.common.service_type }}-master-api" state: started enabled: yes - when: openshift_master_ha | bool and openshift.common.is_containerized | bool + when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool - name: Ensure HA Master is running service: name: "{{ openshift.common.service_type }}-master-controllers" state: started enabled: yes - when: openshift_master_ha | bool and openshift.common.is_containerized | bool + when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool - name: Verify upgrade can proceed hosts: oo_nodes_to_config + roles: + - openshift_facts tasks: - name: Ensure Node is running service: @@ -85,6 +87,9 @@ hosts: oo_masters_to_config:oo_nodes_to_config vars: target_version: "{{ '1.2' if deployment_type == 'origin' else '3.1.1.900' }}" + handlers: + - include: ../../../../../roles/openshift_master/handlers/main.yml + - include: ../../../../../roles/openshift_node/handlers/main.yml roles: - openshift_cli tasks: @@ -130,9 +135,19 @@ msg: Verifying the correct version was found when: verify_upgrade_version is defined and g_new_version != verify_upgrade_version + - include_vars: ../../../../../roles/openshift_master/vars/main.yml + when: inventory_hostname in groups.oo_masters_to_config + - name: Update systemd units - script: ../files/ensure_system_units_have_version.sh {{ openshift.common.service_type }} {{ openshift.common.deployment_type }} {{ g_aos_versions.curr_version }} - when: openshift.common.is_containerized | bool + include: ../../../../../roles/openshift_master/tasks/systemd_units.yml openshift_version=g_aos_versions.curr_version + when: inventory_hostname in groups.oo_masters_to_config + + - include_vars: ../../../../../roles/openshift_node/vars/main.yml + when: inventory_hostname in groups.oo_nodes_to_config + + - name: Update systemd units + include: ../../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version=g_aos_versions.curr_version + when: inventory_hostname in groups.oo_nodes_to_config # Note: the version number is hardcoded here in hopes of catching potential # bugs in how g_aos_versions.curr_version is set @@ -143,6 +158,13 @@ - /etc/sysconfig/{{ openshift.common.service_type }}* when: verify_upgrade_version is defined + - name: Verifying the image version is used in the systemd unit + shell: grep IMAGE_VERSION {{ item }} + with_items: + - /etc/systemd/system/openvswitch.service + - /etc/systemd/system/{{ openshift.common.service_type }}*.service + when: openshift.common.is_containerized | bool + - fail: msg: This playbook requires Origin 1.1 or later when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.1','<') diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml index d84d9f674..481d8e67a 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml @@ -1,4 +1,33 @@ --- +# This is a workaround for authenticated registries +- name: Download new images + hosts: oo_nodes_to_config + roles: + - openshift_facts + tasks: + - name: Pull Images + command: > + docker pull {{ item }}:v{{ g_new_version }} + with_items: + - "{{ openshift.node.node_image }}" + - "{{ openshift.node.ovs_image }}" + - "{{ openshift.common.pod_image }}" + - "{{ openshift.common.router_image }}" + - "{{ openshift.common.registry_image }}" + - "{{ openshift.common.deployer_image }}" + +# This is a workaround for authenticated registries +- name: Download new images + hosts: oo_masters_to_config + roles: + - openshift_facts + tasks: + - name: Pull Images + command: > + docker pull {{ item }}:v{{ g_new_version }} + with_items: + - "{{ openshift.master.master_image }}" + ############################################################################### # The restart playbook should be run after this playbook completes. ############################################################################### @@ -16,14 +45,18 @@ ############################################################################### - name: Upgrade master hosts: oo_masters_to_config + handlers: + - include: ../../../../../roles/openshift_master/handlers/main.yml roles: - openshift_facts tasks: - include: rpm_upgrade.yml component=master when: not openshift.common.is_containerized | bool - - include: containerized_upgrade.yml - when: openshift.common.is_containerized | bool + - include_vars: ../../../../../roles/openshift_master/vars/main.yml + + - name: Update systemd units + include: ../../../../../roles/openshift_master/tasks/systemd_units.yml openshift_version=g_aos_versions.avail_version # - name: Upgrade master configuration # openshift_upgrade_config: @@ -63,6 +96,8 @@ hosts: oo_nodes_to_config roles: - openshift_facts + handlers: + - include: ../../../../../roles/openshift_node/handlers/main.yml tasks: - include: rpm_upgrade.yml vars: @@ -108,7 +143,6 @@ vars: origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}" ent_reconcile_bindings: true - openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" tasks: - name: Verifying the correct commandline tools are available shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}} diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 30e29787a..ea7406e5b 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -1167,6 +1167,7 @@ def safe_get_bool(fact): """ return bool(strtobool(str(fact))) +# pylint: disable=too-many-statements def set_container_facts_if_unset(facts): """ Set containerized facts. @@ -1183,24 +1184,44 @@ def set_container_facts_if_unset(facts): node_image = 'openshift3/node' ovs_image = 'openshift3/openvswitch' etcd_image = 'registry.access.redhat.com/rhel7/etcd' + pod_image = 'openshift3/ose-pod' + router_image = 'openshift3/ose-haproxy-router' + registry_image = 'openshift3/ose-docker-registry' + deployer_image = 'openshift3/ose-deployer' elif deployment_type == 'atomic-enterprise': master_image = 'aep3_beta/aep' cli_image = master_image node_image = 'aep3_beta/node' ovs_image = 'aep3_beta/openvswitch' etcd_image = 'registry.access.redhat.com/rhel7/etcd' + pod_image = 'aep3_beta/aep-pod' + router_image = 'aep3_beta/aep-haproxy-router' + registry_image = 'aep3_beta/aep-docker-registry' + deployer_image = 'aep3_beta/aep-deployer' else: master_image = 'openshift/origin' cli_image = master_image node_image = 'openshift/node' ovs_image = 'openshift/openvswitch' etcd_image = 'registry.access.redhat.com/rhel7/etcd' + pod_image = 'openshift/origin-pod' + router_image = 'openshift/origin-haproxy-router' + registry_image = 'openshift/origin-docker-registry' + deployer_image = 'openshift/origin-deployer' facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted') if 'is_containerized' not in facts['common']: facts['common']['is_containerized'] = facts['common']['is_atomic'] if 'cli_image' not in facts['common']: facts['common']['cli_image'] = cli_image + if 'pod_image' not in facts['common']: + facts['common']['pod_image'] = pod_image + if 'router_image' not in facts['common']: + facts['common']['router_image'] = router_image + if 'registry_image' not in facts['common']: + facts['common']['registry_image'] = registry_image + if 'deployer_image' not in facts['common']: + facts['common']['deployer_image'] = deployer_image if 'etcd' in facts and 'etcd_image' not in facts['etcd']: facts['etcd']['etcd_image'] = etcd_image if 'master' in facts and 'master_image' not in facts['master']: @@ -1350,11 +1371,11 @@ class OpenShiftFacts(object): facts = set_identity_providers_if_unset(facts) facts = set_sdn_facts_if_unset(facts, self.system_facts) facts = set_deployment_facts_if_unset(facts) + facts = set_container_facts_if_unset(facts) facts = set_version_facts_if_unset(facts) facts = set_manageiq_facts_if_unset(facts) facts = set_aggregate_facts(facts) facts = set_etcd_facts_if_unset(facts) - facts = set_container_facts_if_unset(facts) if not safe_get_bool(facts['common']['is_containerized']): facts = set_installed_variant_rpm_facts(facts) return dict(openshift=facts) diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml index e5b9e4977..70c7ef4e4 100644 --- a/roles/openshift_master/handlers/main.yml +++ b/roles/openshift_master/handlers/main.yml @@ -1,17 +1,17 @@ --- - name: restart master service: name={{ openshift.common.service_type }}-master state=restarted - when: (not openshift_master_ha | bool) and (not (master_service_status_changed | default(false) | bool)) + when: (openshift.master.ha is defined and not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool)) notify: Verify API Server - name: restart master api service: name={{ openshift.common.service_type }}-master-api state=restarted - when: (openshift_master_ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + when: (openshift.master.ha is defined and openshift_master_ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' notify: Verify API Server - name: restart master controllers service: name={{ openshift.common.service_type }}-master-controllers state=restarted - when: (openshift_master_ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + when: (openshift.master.ha is defined and openshift_master_ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' - name: Verify API Server # Using curl here since the uri module requires python-httplib2 and diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 9c3d09d09..e64339ea6 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -97,13 +97,6 @@ docker pull {{ openshift.master.master_image }}:{{ openshift_version }} when: openshift.common.is_containerized | bool -- name: Install Master docker service file - template: - dest: "/etc/systemd/system/{{ openshift.common.service_type }}-master.service" - src: docker/master.docker.service.j2 - register: install_result - when: openshift.common.is_containerized | bool and not openshift_master_ha | bool - - name: Create openshift.common.data_dir file: path: "{{ openshift.common.data_dir }}" @@ -168,54 +161,8 @@ when: item.kind == 'HTPasswdPasswordIdentityProvider' with_items: openshift.master.identity_providers -- name: Init HA Service Info - set_fact: - ha_suffix: "" - ha_svcdir: "/usr/lib/systemd/system" - -- name: Set HA Service Info for containerized installs - set_fact: - ha_suffix: ".docker" - ha_svcdir: "/etc/systemd/system" - when: openshift.common.is_containerized | bool - -# workaround for missing systemd unit files for controllers/api -- name: Create the systemd unit files - template: - src: "{{ ha_svc_template_path }}/atomic-openshift-master-{{ item }}.service.j2" - dest: "{{ ha_svcdir }}/{{ openshift.common.service_type }}-master-{{ item }}.service" - when: openshift_master_ha | bool and openshift_master_cluster_method == "native" - with_items: - - api - - controllers - register: create_unit_files - -- command: systemctl daemon-reload - when: create_unit_files | changed -# end workaround for missing systemd unit files - -- name: Create the master api service env file - template: - src: "{{ ha_svc_template_path }}/atomic-openshift-master-api.j2" - dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api - when: openshift_master_ha | bool and openshift_master_cluster_method == "native" - notify: - - restart master api - -- name: Create the master controllers service env file - template: - src: "{{ ha_svc_template_path }}/atomic-openshift-master-controllers.j2" - dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers - when: openshift_master_ha | bool and openshift_master_cluster_method == "native" - notify: - - restart master controllers - -- name: Create the master service env file - template: - src: "atomic-openshift-master.j2" - dest: /etc/sysconfig/{{ openshift.common.service_type }}-master - notify: - - restart master +- name: Install the systemd units + include: systemd_units.yml - name: Create session secrets file template: diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml new file mode 100644 index 000000000..a81270bab --- /dev/null +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -0,0 +1,69 @@ +# This file is included both in the openshift_master role and in the upgrade +# playbooks. For that reason the ha_svc variables are use set_fact instead of +# the vars directory on the role. + +- name: Init HA Service Info + set_fact: + containerized_svc_dir: "/usr/lib/systemd/system" + ha_svc_template_path: "native-cluster" + +- name: Set HA Service Info for containerized installs + set_fact: + containerized_svc_dir: "/etc/systemd/system" + ha_svc_template_path: "docker-cluster" + when: openshift.common.is_containerized | bool + +# workaround for missing systemd unit files +- name: Create the systemd unit files + template: + src: "docker/master.docker.service.j2" + dest: "{{ containerized_svc_dir }}/{{ openshift.common.service_type }}-master.service" + when: openshift.common.is_containerized | bool and (openshift.master.ha is not defined or not openshift.master.ha | bool) + register: create_master_unit_file + +- command: systemctl daemon-reload + when: create_master_unit_file | changed + +- name: Create the ha systemd unit files + template: + src: "{{ ha_svc_template_path }}/atomic-openshift-master-{{ item }}.service.j2" + dest: "{{ containerized_svc_dir }}/{{ openshift.common.service_type }}-master-{{ item }}.service" + when: openshift.master.ha is defined and openshift.master.ha | bool and openshift_master_cluster_method == "native" + with_items: + - api + - controllers + register: create_ha_unit_files + +- command: systemctl daemon-reload + when: create_ha_unit_files | changed +# end workaround for missing systemd unit files + +- name: Create the master api service env file + template: + src: "{{ ha_svc_template_path }}/atomic-openshift-master-api.j2" + dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api + when: openshift.master.ha is defined and openshift.master.ha | bool and openshift_master_cluster_method == "native" + notify: + - restart master api + +- name: Create the master controllers service env file + template: + src: "{{ ha_svc_template_path }}/atomic-openshift-master-controllers.j2" + dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers + when: openshift.master.ha is defined and openshift.master.ha | bool and openshift_master_cluster_method == "native" + notify: + - restart master controllers + +- name: Install Master docker service file + template: + dest: "/etc/systemd/system/{{ openshift.common.service_type }}-master.service" + src: docker/master.docker.service.j2 + register: install_result + when: openshift.common.is_containerized | bool and openshift.master.ha is defined and not openshift.master.ha | bool + +- name: Create the master service env file + template: + src: "atomic-openshift-master.j2" + dest: /etc/sysconfig/{{ openshift.common.service_type }}-master + notify: + - restart master diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml index 6b5a73238..75f08e378 100644 --- a/roles/openshift_master/vars/main.yml +++ b/roles/openshift_master/vars/main.yml @@ -8,9 +8,6 @@ openshift_master_session_secrets_file: "{{ openshift_master_config_dir }}/sessio openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json" openshift_version: "{{ openshift_pkg_version | default(openshift_image_tag) | default(openshift.common.image_tag) | default('') }}" -ha_svc_template_path: "{{ 'docker-cluster' if openshift.common.is_containerized | bool else 'native-cluster' }}" -ha_svc_svc_dir: "{{ '/etc/systemd/system' if openshift.common.is_containerized | bool else '/usr/lib/systemd/system' }}" - openshift_master_valid_grant_methods: - auto - prompt diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index ca1e26459..993c8c0cd 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -53,27 +53,8 @@ docker pull {{ openshift.node.ovs_image }}:{{ openshift_version }} when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool -- name: Install Node docker service file - template: - dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service" - src: openshift.docker.node.service - register: install_node_result - when: openshift.common.is_containerized | bool - -- name: Create the openvswitch service env file - template: - src: openvswitch.sysconfig.j2 - dest: /etc/sysconfig/openvswitch - when: openshift.common.is_containerized | bool - register: install_ovs_sysconfig - -- name: Install OpenvSwitch docker service file - template: - dest: "/etc/systemd/system/openvswitch.service" - src: openvswitch.docker.service - when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool - notify: - - restart openvswitch +- name: Install the systemd units + include: systemd_units.yml - name: Reload systemd units command: systemctl daemon-reload @@ -100,22 +81,6 @@ notify: - restart node -- name: Configure Node settings - lineinfile: - dest: /etc/sysconfig/{{ openshift.common.service_type }}-node - regexp: "{{ item.regex }}" - line: "{{ item.line }}" - create: true - with_items: - - regex: '^OPTIONS=' - line: "OPTIONS=--loglevel={{ openshift.node.debug_level }}" - - regex: '^CONFIG_FILE=' - line: "CONFIG_FILE={{ openshift_node_config_file }}" - - regex: '^IMAGE_VERSION=' - line: "IMAGE_VERSION={{ openshift_version }}" - notify: - - restart node - - name: Additional storage plugin configuration include: storage_plugins/main.yml diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml new file mode 100644 index 000000000..be4b4ed61 --- /dev/null +++ b/roles/openshift_node/tasks/systemd_units.yml @@ -0,0 +1,40 @@ +# This file is included both in the openshift_master role and in the upgrade +# playbooks. + +- name: Install Node docker service file + template: + dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service" + src: openshift.docker.node.service + register: install_node_result + when: openshift.common.is_containerized | bool + +- name: Create the openvswitch service env file + template: + src: openvswitch.sysconfig.j2 + dest: /etc/sysconfig/openvswitch + when: openshift.common.is_containerized | bool + register: install_ovs_sysconfig + +- name: Install OpenvSwitch docker service file + template: + dest: "/etc/systemd/system/openvswitch.service" + src: openvswitch.docker.service + when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool + notify: + - restart openvswitch + +- name: Configure Node settings + lineinfile: + dest: /etc/sysconfig/{{ openshift.common.service_type }}-node + regexp: "{{ item.regex }}" + line: "{{ item.line }}" + create: true + with_items: + - regex: '^OPTIONS=' + line: "OPTIONS=--loglevel={{ openshift.node.debug_level }}" + - regex: '^CONFIG_FILE=' + line: "CONFIG_FILE={{ openshift_node_config_file }}" + - regex: '^IMAGE_VERSION=' + line: "IMAGE_VERSION={{ openshift_version }}" + notify: + - restart node |