diff options
Diffstat (limited to 'roles')
26 files changed, 316 insertions, 17 deletions
diff --git a/roles/cockpit/tasks/main.yml b/roles/cockpit/tasks/main.yml index 6e9f3a8bd..e83f72a3d 100644 --- a/roles/cockpit/tasks/main.yml +++ b/roles/cockpit/tasks/main.yml @@ -6,7 +6,7 @@ - cockpit-shell - cockpit-bridge - "{{ cockpit_plugins }}" - when: not openshift.common.is_containerized | bool + when: not openshift.common.is_atomic | bool - name: Enable cockpit-ws service: diff --git a/roles/etcd_ca/tasks/main.yml b/roles/etcd_ca/tasks/main.yml index d32f5e48c..cf7bc00a3 100644 --- a/roles/etcd_ca/tasks/main.yml +++ b/roles/etcd_ca/tasks/main.yml @@ -1,4 +1,8 @@ --- +- name: Install openssl + action: "{{ ansible_pkg_mgr }} name=openssl state=present" + when: not openshift.common.is_atomic | bool + - file: path: "{{ item }}" state: directory diff --git a/roles/fluentd_master/tasks/main.yml b/roles/fluentd_master/tasks/main.yml index 1c87d562a..32f972f0a 100644 --- a/roles/fluentd_master/tasks/main.yml +++ b/roles/fluentd_master/tasks/main.yml @@ -1,12 +1,12 @@ --- - fail: msg: "fluentd master is not yet supported on atomic hosts" - when: openshift.common.is_containerized | bool + when: openshift.common.is_atomic | bool # TODO: Update fluentd install and configuration when packaging is complete - name: download and install td-agent action: "{{ ansible_pkg_mgr }} name='http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm' state=present" - when: not openshift.common.is_containerized | bool + when: not openshift.common.is_atomic | bool - name: Verify fluentd plugin installed command: '/opt/td-agent/embedded/bin/gem query -i fluent-plugin-kubernetes' diff --git a/roles/fluentd_node/tasks/main.yml b/roles/fluentd_node/tasks/main.yml index 8d34c0b19..9fd908687 100644 --- a/roles/fluentd_node/tasks/main.yml +++ b/roles/fluentd_node/tasks/main.yml @@ -1,12 +1,12 @@ --- - fail: msg: "fluentd node is not yet supported on atomic hosts" - when: openshift.common.is_containerized | bool + when: openshift.common.is_atomic | bool # TODO: Update fluentd install and configuration when packaging is complete - name: download and install td-agent action: "{{ ansible_pkg_mgr }} name='http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm' state=present" - when: not openshift.common.is_containerized | bool + when: not openshift.common.is_atomic | bool - name: Verify fluentd plugin installed command: '/opt/td-agent/embedded/bin/gem query -i fluent-plugin-kubernetes' @@ -55,4 +55,3 @@ name: 'td-agent' state: started enabled: yes - diff --git a/roles/lib_zabbix/library/zbx_host.py b/roles/lib_zabbix/library/zbx_host.py index e26c9caf3..560749f07 100644 --- a/roles/lib_zabbix/library/zbx_host.py +++ b/roles/lib_zabbix/library/zbx_host.py @@ -63,6 +63,19 @@ def get_template_ids(zapi, template_names): template_ids.append({'templateid': content['result'][0]['templateid']}) return template_ids +def interfaces_equal(zbx_interfaces, user_interfaces): + ''' + compare interfaces from zabbix and interfaces from user + ''' + + for u_int in user_interfaces: + for z_int in zbx_interfaces: + for u_key, u_val in u_int.items(): + if str(z_int[u_key]) != str(u_val): + return False + + return True + def main(): ''' Ansible module for zabbix host @@ -120,8 +133,9 @@ def main(): 'dns': '', # dns for host 'port': '10050', # port for interface? 10050 }] + hostgroup_names = list(set(module.params['hostgroup_names'])) params = {'host': hname, - 'groups': get_group_ids(zapi, module.params['hostgroup_names']), + 'groups': get_group_ids(zapi, hostgroup_names), 'templates': get_template_ids(zapi, module.params['template_names']), 'interfaces': ifs, } @@ -140,6 +154,11 @@ def main(): if zab_results['parentTemplates'] != value: differences[key] = value + + elif key == "interfaces": + if not interfaces_equal(zab_results[key], value): + differences[key] = value + elif zab_results[key] != value and zab_results[key] != str(value): differences[key] = value diff --git a/roles/nickhammond.logrotate/tasks/main.yml b/roles/nickhammond.logrotate/tasks/main.yml index 0a0cf1fae..e2c51a903 100644 --- a/roles/nickhammond.logrotate/tasks/main.yml +++ b/roles/nickhammond.logrotate/tasks/main.yml @@ -1,6 +1,7 @@ --- - name: nickhammond.logrotate | Install logrotate action: "{{ ansible_pkg_mgr }} name=logrotate state=present" + when: not openshift.common.is_atomic | bool - name: nickhammond.logrotate | Setup logrotate.d scripts template: diff --git a/roles/nuage_master/README.md b/roles/nuage_master/README.md new file mode 100644 index 000000000..de101dd19 --- /dev/null +++ b/roles/nuage_master/README.md @@ -0,0 +1,8 @@ +Nuage Master +============ +Setup Nuage Kubernetes Monitor on the Master node + + +Requirements +------------ +This role assumes it has been deployed on RHEL/Fedora diff --git a/roles/nuage_master/files/serviceaccount.sh b/roles/nuage_master/files/serviceaccount.sh new file mode 100644 index 000000000..f6fdb8a8d --- /dev/null +++ b/roles/nuage_master/files/serviceaccount.sh @@ -0,0 +1,63 @@ +#!/bin/bash +# Parse CLI options +for i in "$@"; do + case $i in + --master-cert-dir=*) + MASTER_DIR="${i#*=}" + CA_CERT=${MASTER_DIR}/ca.crt + CA_KEY=${MASTER_DIR}/ca.key + CA_SERIAL=${MASTER_DIR}/ca.serial.txt + ADMIN_FILE=${MASTER_DIR}/admin.kubeconfig + ;; + --server=*) + SERVER="${i#*=}" + ;; + --output-cert-dir=*) + OUTDIR="${i#*=}" + CONFIG_FILE=${OUTDIR}/nuage.kubeconfig + ;; + esac +done + +# If any are missing, print the usage and exit +if [ -z $SERVER ] || [ -z $OUTDIR ] || [ -z $MASTER_DIR ]; then + echo "Invalid syntax: $@" + echo "Usage:" + echo " $0 --server=<address>:<port> --output-cert-dir=/path/to/output/dir/ --master-cert-dir=/path/to/master/" + echo "--master-cert-dir: Directory where the master's configuration is held" + echo "--server: Address of Kubernetes API server (default port is 8443)" + echo "--output-cert-dir: Directory to put artifacts in" + echo "" + echo "All options are required" + exit 1 +fi + +# Login as admin so that we can create the service account +oc login -u system:admin --config=$ADMIN_FILE || exit 1 +oc project default --config=$ADMIN_FILE + +ACCOUNT_CONFIG=' +{ + "apiVersion": "v1", + "kind": "ServiceAccount", + "metadata": { + "name": "nuage" + } +} +' + +# Create the account with the included info +echo $ACCOUNT_CONFIG|oc create --config=$ADMIN_FILE -f - + +# Add the cluser-reader role, which allows this service account read access to +# everything in the cluster except secrets +oadm policy add-cluster-role-to-user cluster-reader system:serviceaccounts:default:nuage --config=$ADMIN_FILE + +# Generate certificates and a kubeconfig for the service account +oadm create-api-client-config --certificate-authority=${CA_CERT} --client-dir=${OUTDIR} --signer-cert=${CA_CERT} --signer-key=${CA_KEY} --signer-serial=${CA_SERIAL} --user=system:serviceaccounts:default:nuage --master=${SERVER} --public-master=${SERVER} --basename='nuage' + +# Verify the finalized kubeconfig +if ! [ $(oc whoami --config=$CONFIG_FILE) == 'system:serviceaccounts:default:nuage' ]; then + echo "Service account creation failed!" + exit 1 +fi diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml new file mode 100644 index 000000000..635d8a419 --- /dev/null +++ b/roles/nuage_master/handlers/main.yaml @@ -0,0 +1,18 @@ +--- +- name: restart nuagekubemon + sudo: true + service: name=nuagekubemon state=restarted + +- name: restart master + service: name={{ openshift.common.service_type }}-master state=restarted + when: (not openshift_master_ha | bool) and (not master_service_status_changed | default(false)) + +- name: restart master api + service: name={{ openshift.common.service_type }}-master-api state=restarted + when: (openshift_master_ha | bool) and (not master_api_service_status_changed | default(false)) and openshift.master.cluster_method == 'native' + +# TODO: need to fix up ignore_errors here +- name: restart master controllers + service: name={{ openshift.common.service_type }}-master-controllers state=restarted + when: (openshift_master_ha | bool) and (not master_controllers_service_status_changed | default(false)) and openshift.master.cluster_method == 'native' + ignore_errors: yes diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml new file mode 100644 index 000000000..a7baadc76 --- /dev/null +++ b/roles/nuage_master/tasks/main.yaml @@ -0,0 +1,34 @@ +--- +- name: Create directory /usr/share/nuagekubemon + sudo: true + file: path=/usr/share/nuagekubemon state=directory + +- name: Create the log directory + sudo: true + file: path={{ nuagekubemon_log_dir }} state=directory + +- name: Install Nuage Kubemon + sudo: true + yum: name={{ nuage_kubemon_rpm }} state=present + +- name: Run the service account creation script + sudo: true + script: serviceaccount.sh --server={{ openshift.master.api_url }} --output-cert-dir={{ cert_output_dir }} --master-cert-dir={{ openshift_master_config_dir }} + +- name: Download the certs and keys + sudo: true + fetch: src={{ cert_output_dir }}/{{ item }} dest=/tmp/{{ item }} flat=yes + with_items: + - ca.crt + - nuage.crt + - nuage.key + - nuage.kubeconfig + +- name: Create nuagekubemon.yaml + sudo: true + template: src=nuagekubemon.j2 dest=/usr/share/nuagekubemon/nuagekubemon.yaml owner=root mode=0644 + notify: + - restart master + - restart master api + - restart master controllers + - restart nuagekubemon diff --git a/roles/nuage_master/templates/nuagekubemon.j2 b/roles/nuage_master/templates/nuagekubemon.j2 new file mode 100644 index 000000000..fb586bcee --- /dev/null +++ b/roles/nuage_master/templates/nuagekubemon.j2 @@ -0,0 +1,19 @@ +# .kubeconfig that includes the nuage service account +kubeConfig: {{ kube_config }} +# name of the nuage service account, or another account with 'cluster-reader' +# permissions +# Openshift master config file +openshiftMasterConfig: {{ master_config_yaml }} +# URL of the VSD Architect +vsdApiUrl: {{ vsd_api_url }} +# API version to query against. Usually "v3_2" +vspVersion: {{ vsp_version }} +# File containing a VSP license to install. Only necessary if no license has +# been installed on the VSD Architect before, only valid for standalone vsd install +# licenseFile: "/path/to/base_vsp_license.txt" +# Name of the enterprise in which pods will reside +enterpriseName: {{ enterprise }} +# Name of the domain in which pods will reside +domainName: {{ domain }} +# Location where logs should be saved +log_dir: {{ nuagekubemon_log_dir }} diff --git a/roles/nuage_master/vars/main.yaml b/roles/nuage_master/vars/main.yaml new file mode 100644 index 000000000..db901fea6 --- /dev/null +++ b/roles/nuage_master/vars/main.yaml @@ -0,0 +1,7 @@ +openshift_master_config_dir: "{{ openshift.common.config_base }}/master" +ca_cert: "{{ openshift_master_config_dir }}/ca.crt" +admin_config: "{{ openshift.common.config_base }}/master/admin.kubeconfig" +cert_output_dir: /usr/share/nuagekubemon +kube_config: /usr/share/nuagekubemon/nuage.kubeconfig +kubemon_yaml: /usr/share/nuagekubemon/nuagekubemon.yaml +master_config_yaml: "{{ openshift_master_config_dir }}/master-config.yaml" diff --git a/roles/nuage_node/README.md b/roles/nuage_node/README.md new file mode 100644 index 000000000..02a3cbc77 --- /dev/null +++ b/roles/nuage_node/README.md @@ -0,0 +1,9 @@ +Nuage Node +========== + +Setup Nuage VRS (Virtual Routing Switching) on the Openshift Node + +Requirements +------------ + +This role assumes it has been deployed on RHEL/Fedora diff --git a/roles/nuage_node/handlers/main.yaml b/roles/nuage_node/handlers/main.yaml new file mode 100644 index 000000000..d82d4b67b --- /dev/null +++ b/roles/nuage_node/handlers/main.yaml @@ -0,0 +1,4 @@ +--- +- name: restart vrs + sudo: true + service: name=openvswitch state=restarted diff --git a/roles/nuage_node/tasks/main.yaml b/roles/nuage_node/tasks/main.yaml new file mode 100644 index 000000000..e0117bf71 --- /dev/null +++ b/roles/nuage_node/tasks/main.yaml @@ -0,0 +1,37 @@ +--- +- name: Install Nuage VRS + sudo: true + yum: name={{ vrs_rpm }} state=present + +- name: Set the uplink interface + sudo: true + lineinfile: dest={{ vrs_config }} regexp=^NETWORK_UPLINK_INTF line='NETWORK_UPLINK_INTF={{ uplink_interface }}' + +- name: Set the Active Controller + sudo: true + lineinfile: dest={{ vrs_config }} regexp=^ACTIVE_CONTROLLER line='ACTIVE_CONTROLLER={{ vsc_active_ip }}' + +- name: Set the Standby Controller + sudo: true + lineinfile: dest={{ vrs_config }} regexp=^STANDBY_CONTROLLER line='STANDBY_CONTROLLER={{ vsc_standby_ip }}' + when: vsc_standby_ip is defined + +- name: Install plugin rpm + sudo: true + yum: name={{ plugin_rpm }} state=present + +- name: Copy the certificates and keys + sudo: true + copy: src="/tmp/{{ item }}" dest="{{ vsp_k8s_dir }}/{{ item }}" + with_items: + - ca.crt + - nuage.crt + - nuage.key + - nuage.kubeconfig + +- name: Set the vsp-k8s.yaml + sudo: true + template: src=vsp-k8s.j2 dest={{ vsp_k8s_yaml }} owner=root mode=0644 + notify: + - restart vrs + - restart node diff --git a/roles/nuage_node/templates/vsp-k8s.j2 b/roles/nuage_node/templates/vsp-k8s.j2 new file mode 100644 index 000000000..98d6c3a9c --- /dev/null +++ b/roles/nuage_node/templates/vsp-k8s.j2 @@ -0,0 +1,14 @@ +clientCert: {{ client_cert }} +# The key to the certificate in clientCert above +clientKey: {{ client_key }} +# The certificate authority's certificate for the local kubelet. Usually the +# same as the CA cert used to create the client Cert/Key pair. +CACert: {{ ca_cert }} +# Name of the enterprise in which pods will reside +enterpriseName: {{ enterprise }} +# Name of the domain in which pods will reside +domainName: {{ domain }} +# IP address and port number of master API server +masterApiServer: {{ api_server }} +# Bridge name for the docker bridge +dockerBridgeName: {{ docker_bridge }} diff --git a/roles/nuage_node/vars/main.yaml b/roles/nuage_node/vars/main.yaml new file mode 100644 index 000000000..a6b7cf997 --- /dev/null +++ b/roles/nuage_node/vars/main.yaml @@ -0,0 +1,9 @@ +--- +vrs_config: /etc/default/openvswitch +vsp_k8s_dir: /usr/share/vsp-k8s +vsp_k8s_yaml: "{{ vsp_k8s_dir }}/vsp-k8s.yaml" +client_cert: "{{ vsp_k8s_dir }}/nuage.crt" +client_key: "{{ vsp_k8s_dir }}/nuage.key" +ca_cert: "{{ vsp_k8s_dir }}/ca.crt" +api_server: "{{ openshift_node_master_api_url }}" +docker_bridge: "docker0" diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml index 2b53c9b8e..a6b6b1925 100644 --- a/roles/openshift_cli/tasks/main.yml +++ b/roles/openshift_cli/tasks/main.yml @@ -6,7 +6,7 @@ cli_image: "{{ osm_image | default(None) }}" - name: Install clients - yum: pkg={{ openshift.common.service_type }}-clients state=installed + action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-clients state=present" when: not openshift.common.is_containerized | bool - name: Pull CLI Image diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml index 3a2ccb59a..ff8c3b50f 100644 --- a/roles/openshift_common/tasks/main.yml +++ b/roles/openshift_common/tasks/main.yml @@ -4,6 +4,14 @@ when: openshift_use_openshift_sdn | default(false) | bool and openshift_use_flannel | default(false) | bool - fail: + msg: Nuage sdn can not be used with openshift sdn + when: openshift_use_openshift_sdn | default(false) | bool and openshift_use_nuage | default(false) | bool + +- fail: + msg: Nuage sdn can not be used with flannel + when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool + +- fail: msg: openshift_hostname must be 64 characters or less when: openshift_hostname is defined and openshift_hostname | length > 64 @@ -23,7 +31,9 @@ deployment_type: "{{ openshift_deployment_type }}" use_fluentd: "{{ openshift_use_fluentd | default(None) }}" use_flannel: "{{ openshift_use_flannel | default(None) }}" + use_nuage: "{{ openshift_use_nuage | default(None) }}" use_manageiq: "{{ openshift_use_manageiq | default(None) }}" + data_dir: "{{ openshift_data_dir | default(None) }}" - name: Install the base package for versioning action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}{{ openshift_version | default('') }} state=present" diff --git a/roles/openshift_common/vars/main.yml b/roles/openshift_common/vars/main.yml index 50816d319..b163f8aae 100644 --- a/roles/openshift_common/vars/main.yml +++ b/roles/openshift_common/vars/main.yml @@ -5,3 +5,4 @@ # chains with the public zone (or the zone associated with the correct # interfaces) os_firewall_use_firewalld: False +openshift_version: "{{ openshift_pkg_version | default('') }}" diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index af819e218..40e54d706 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -338,6 +338,23 @@ def set_flannel_facts_if_unset(facts): facts['common']['use_flannel'] = use_flannel return facts +def set_nuage_facts_if_unset(facts): + """ Set nuage facts if not already present in facts dict + dict: the facts dict updated with the nuage facts if + missing + Args: + facts (dict): existing facts + Returns: + dict: the facts dict updated with the nuage + facts if they were not already present + + """ + if 'common' in facts: + if 'use_nuage' not in facts['common']: + use_nuage = False + facts['common']['use_nuage'] = use_nuage + return facts + def set_node_schedulability(facts): """ Set schedulable facts if not already present in facts dict Args: @@ -1128,6 +1145,7 @@ class OpenShiftFacts(object): facts = set_project_cfg_facts_if_unset(facts) facts = set_fluentd_facts_if_unset(facts) facts = set_flannel_facts_if_unset(facts) + facts = set_nuage_facts_if_unset(facts) facts = set_node_schedulability(facts) facts = set_master_selectors(facts) facts = set_metrics_facts_if_unset(facts) diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml index e40a1b329..0dbac1b54 100644 --- a/roles/openshift_facts/tasks/main.yml +++ b/roles/openshift_facts/tasks/main.yml @@ -1,11 +1,9 @@ --- -- name: Verify Ansible version is greater than 1.8.0 and not 1.9.0 and not 1.9.0.1 - assert: - that: - - ansible_version | version_compare('1.8.0', 'ge') - - ansible_version | version_compare('1.9.0', 'ne') - - ansible_version | version_compare('1.9.0.1', 'ne') - +- name: Verify Ansible version is greater than or equal to 1.9.4 and less than 2.0 + fail: + msg: "Unsupported ansible version: {{ ansible_version }} found" + when: ansible_version.full | version_compare('1.9.4', 'lt') or ansible_version.full | version_compare('2.0', 'ge') + - name: Detecting Operating System shell: ls /run/ostree-booted ignore_errors: yes diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index 9a7ae96f0..1eeab46fe 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -117,7 +117,7 @@ masterPublicURL: {{ openshift.master.public_api_url }} networkConfig: clusterNetworkCIDR: {{ openshift.master.sdn_cluster_network_cidr }} hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }} -{% if openshift.common.use_openshift_sdn %} +{% if openshift.common.use_openshift_sdn or openshift.common.use_nuage %} networkPluginName: {{ openshift.common.sdn_network_plugin_name }} {% endif %} # serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index cbe811f83..44065f4bd 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -20,7 +20,7 @@ networkPluginName: {{ openshift.common.sdn_network_plugin_name }} # deprecates networkPluginName above. The two should match. networkConfig: mtu: {{ openshift.node.sdn_mtu }} -{% if openshift.common.use_openshift_sdn %} +{% if openshift.common.use_openshift_sdn or openshift.common.use_nuage %} networkPluginName: {{ openshift.common.sdn_network_plugin_name }} {% endif %} {% if openshift.node.set_node_ip | bool %} diff --git a/roles/os_zabbix/vars/template_openshift_master.yml b/roles/os_zabbix/vars/template_openshift_master.yml index a0ba8d104..12ea36c8b 100644 --- a/roles/os_zabbix/vars/template_openshift_master.yml +++ b/roles/os_zabbix/vars/template_openshift_master.yml @@ -98,6 +98,18 @@ g_template_openshift_master: applications: - Openshift Master + - key: openshift.master.skydns.port.open + description: State of the SkyDNS port open and listening + type: int + applications: + - Openshift Master + + - key: openshift.master.skydns.query + description: SkyDNS can be queried or not + type: int + applications: + - Openshift Master + - key: openshift.master.etcd.create.success description: Show number of successful create actions type: int @@ -305,6 +317,20 @@ g_template_openshift_master: - 'Openshift Master process not running on {HOST.NAME}' priority: high + - name: 'SkyDNS port not listening on {HOST.NAME}' + expression: '{Template Openshift Master:openshift.master.skydns.port.open.max(#3)}<1' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc' + dependencies: + - 'Openshift Master process not running on {HOST.NAME}' + priority: high + + - name: 'SkyDNS query failed on {HOST.NAME}' + expression: '{Template Openshift Master:openshift.master.skydns.query.max(#3)}<1' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc' + dependencies: + - 'Openshift Master API health check is failing on {HOST.NAME}' + priority: high + zgraphs: - name: Openshift Master API Server Latency Pods LIST Quantiles width: 900 diff --git a/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2 b/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2 index 31f7d4caa..ac950b4e5 100644 --- a/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2 +++ b/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2 @@ -50,6 +50,7 @@ ExecStart=/usr/bin/docker run --name {{ osohm_host_monitoring }} -e OSO_ENVIRONMENT={{ oo_environment }} \ -e OSO_HOST_TYPE={{ hostvars[inventory_hostname]['ec2_tag_host-type'] }} \ -e OSO_SUB_HOST_TYPE={{ hostvars[inventory_hostname]['ec2_tag_sub-host-type'] }} \ + -e OSO_MASTER_HA={{ osohm_master_ha }} \ -v /etc/localtime:/etc/localtime \ -v /sys:/sys:ro \ -v /sys/fs/selinux \ |