summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--filter_plugins/oo_filters.py12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/main.yml3
-rw-r--r--roles/cockpit/meta/main.yml2
-rw-r--r--roles/cockpit/tasks/main.yml2
-rw-r--r--roles/dns/README.md2
-rw-r--r--roles/dns/handlers/main.yml3
-rw-r--r--roles/dns/meta/main.yml1
-rw-r--r--roles/dns/tasks/main.yml9
-rw-r--r--roles/docker/handlers/main.yml5
-rw-r--r--roles/docker/tasks/udev_workaround.yml8
-rw-r--r--roles/etcd/README.md3
-rw-r--r--roles/etcd/handlers/main.yml2
-rw-r--r--roles/etcd/meta/main.yml2
-rw-r--r--roles/etcd/tasks/main.yml32
-rw-r--r--roles/flannel/README.md3
-rw-r--r--roles/flannel/handlers/main.yml4
-rw-r--r--roles/flannel/meta/main.yml2
-rw-r--r--roles/flannel/tasks/main.yml2
-rw-r--r--roles/kube_nfs_volumes/README.md2
-rw-r--r--roles/kube_nfs_volumes/handlers/main.yml2
-rw-r--r--roles/kube_nfs_volumes/meta/main.yml2
-rw-r--r--roles/kube_nfs_volumes/tasks/nfs.yml10
-rw-r--r--roles/nuage_master/README.md4
-rw-r--r--roles/nuage_master/handlers/main.yaml18
-rw-r--r--roles/nuage_master/meta/main.yml6
-rw-r--r--roles/nuage_node/README.md3
-rw-r--r--roles/nuage_node/handlers/main.yaml4
-rw-r--r--roles/nuage_node/meta/main.yml6
-rw-r--r--roles/openshift_loadbalancer/README.md2
-rw-r--r--roles/openshift_loadbalancer/handlers/main.yml2
-rw-r--r--roles/openshift_loadbalancer/meta/main.yml2
-rw-r--r--roles/openshift_loadbalancer/tasks/main.yml8
-rw-r--r--roles/openshift_master/README.md3
-rw-r--r--roles/openshift_master/handlers/main.yml6
-rw-r--r--roles/openshift_master/meta/main.yml2
-rw-r--r--roles/openshift_master/tasks/main.yml64
-rw-r--r--roles/openshift_master_cluster/README.md2
-rw-r--r--roles/openshift_master_cluster/meta/main.yml2
-rw-r--r--roles/openshift_metrics/README.md6
-rw-r--r--roles/openshift_metrics/handlers/main.yml6
-rw-r--r--roles/openshift_metrics/meta/main.yaml16
-rw-r--r--roles/openshift_node/README.md8
-rw-r--r--roles/openshift_node/handlers/main.yml4
-rw-r--r--roles/openshift_node/tasks/main.yml69
-rw-r--r--roles/openshift_node_certificates/README.md2
-rw-r--r--roles/openshift_node_certificates/handlers/main.yml4
-rw-r--r--roles/openshift_node_certificates/meta/main.yml2
-rw-r--r--roles/openshift_node_dnsmasq/handlers/main.yml4
-rw-r--r--roles/openshift_node_dnsmasq/meta/main.yml2
-rw-r--r--roles/openshift_node_dnsmasq/tasks/main.yml12
-rw-r--r--roles/openshift_storage_nfs/README.md6
-rw-r--r--roles/openshift_storage_nfs/handlers/main.yml2
-rw-r--r--roles/openshift_storage_nfs/meta/main.yml2
-rw-r--r--roles/openshift_storage_nfs/tasks/main.yml18
-rw-r--r--roles/openshift_storage_nfs_lvm/README.md5
-rw-r--r--roles/openshift_storage_nfs_lvm/handlers/main.yml2
-rw-r--r--roles/openshift_storage_nfs_lvm/meta/main.yml2
-rw-r--r--roles/openshift_storage_nfs_lvm/tasks/nfs.yml15
58 files changed, 244 insertions, 190 deletions
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 38bc3ad6b..997634777 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -10,7 +10,14 @@ from collections import Mapping
from distutils.util import strtobool
from distutils.version import LooseVersion
from operator import itemgetter
-import OpenSSL.crypto
+
+HAS_OPENSSL=False
+try:
+ import OpenSSL.crypto
+ HAS_OPENSSL=True
+except ImportError:
+ pass
+
import os
import pdb
import pkg_resources
@@ -516,6 +523,9 @@ class FilterModule(object):
if not isinstance(internal_hostnames, list):
raise errors.AnsibleFilterError("|failed expects internal_hostnames is list")
+ if not HAS_OPENSSL:
+ raise errors.AnsibleFilterError("|missing OpenSSL python bindings")
+
for certificate in certificates:
if 'names' in certificate.keys():
continue
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
index cce844403..192799376 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
@@ -14,6 +14,9 @@
connection: local
become: no
tasks:
+ - fail:
+ msg: 'The etcd upgrade playbook does not support upgrading embedded etcd, simply run the normal playbooks and etcd will be upgraded when your master is updated.'
+ when: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- name: Evaluate etcd_hosts_to_upgrade
add_host:
name: "{{ item }}"
diff --git a/roles/cockpit/meta/main.yml b/roles/cockpit/meta/main.yml
index 43047902d..0f507e75e 100644
--- a/roles/cockpit/meta/main.yml
+++ b/roles/cockpit/meta/main.yml
@@ -4,7 +4,7 @@ galaxy_info:
description: Deploy and Enable cockpit-ws plus optional plugins
company: Red Hat, Inc.
license: Apache License, Version 2.0
- min_ansible_version: 1.7
+ min_ansible_version: 2.2
platforms:
- name: EL
versions:
diff --git a/roles/cockpit/tasks/main.yml b/roles/cockpit/tasks/main.yml
index 1975b92e6..bddad778f 100644
--- a/roles/cockpit/tasks/main.yml
+++ b/roles/cockpit/tasks/main.yml
@@ -10,7 +10,7 @@
when: not openshift.common.is_containerized | bool
- name: Enable cockpit-ws
- service:
+ systemd:
name: cockpit.socket
enabled: true
state: started
diff --git a/roles/dns/README.md b/roles/dns/README.md
index 7e0140772..9a88ce97c 100644
--- a/roles/dns/README.md
+++ b/roles/dns/README.md
@@ -6,7 +6,7 @@ Configure a DNS server serving IPs of all the nodes of the cluster
Requirements
------------
-None
+Ansible 2.2
Role Variables
--------------
diff --git a/roles/dns/handlers/main.yml b/roles/dns/handlers/main.yml
index ef101785e..61fd7a10e 100644
--- a/roles/dns/handlers/main.yml
+++ b/roles/dns/handlers/main.yml
@@ -1,4 +1,5 @@
+---
- name: restart bind
- service:
+ systemd:
name: named
state: restarted
diff --git a/roles/dns/meta/main.yml b/roles/dns/meta/main.yml
index 048274c49..64d56114e 100644
--- a/roles/dns/meta/main.yml
+++ b/roles/dns/meta/main.yml
@@ -4,5 +4,6 @@ galaxy_info:
description: Deploy and configure a DNS server
company: Amadeus SAS
license: ASL 2.0
+ min_ansible_version: 2.2
dependencies:
- { role: openshift_facts }
diff --git a/roles/dns/tasks/main.yml b/roles/dns/tasks/main.yml
index 2abe0d9dd..c5ab53b4d 100644
--- a/roles/dns/tasks/main.yml
+++ b/roles/dns/tasks/main.yml
@@ -11,7 +11,6 @@
template:
dest: "/tmp/dockerbuild/Dockerfile"
src: Dockerfile
- register: install_result
when: openshift.common.is_containerized | bool
- name: Build Bind image
@@ -22,13 +21,8 @@
template:
dest: "/etc/systemd/system/named.service"
src: named.service.j2
- register: install_result
when: openshift.common.is_containerized | bool
-- name: reload systemd
- command: /usr/bin/systemctl --system daemon-reload
- when: openshift.common.is_containerized | bool and install_result | changed
-
- name: Create bind zone dir
file: path=/var/named state=directory
when: openshift.common.is_containerized | bool
@@ -45,7 +39,8 @@
notify: restart bind
- name: Enable Bind
- service:
+ systemd:
name: named
state: started
enabled: yes
+ daemon_reload: yes
diff --git a/roles/docker/handlers/main.yml b/roles/docker/handlers/main.yml
index aff905bc8..9ccb306fc 100644
--- a/roles/docker/handlers/main.yml
+++ b/roles/docker/handlers/main.yml
@@ -1,12 +1,13 @@
---
- name: restart docker
- service:
+ systemd:
name: docker
state: restarted
when: not docker_service_status_changed | default(false) | bool
- name: restart udev
- service:
+ systemd:
name: systemd-udevd
state: restarted
+ daemon_reload: yes
diff --git a/roles/docker/tasks/udev_workaround.yml b/roles/docker/tasks/udev_workaround.yml
index aa7af0cb3..257c3123d 100644
--- a/roles/docker/tasks/udev_workaround.yml
+++ b/roles/docker/tasks/udev_workaround.yml
@@ -21,10 +21,4 @@
owner: root
mode: "0644"
notify:
- - restart udev
- register: udevw_override_conf
-
-- name: reload systemd config files
- command: systemctl daemon-reload
- when: udevw_override_conf | changed
-
+ - restart udev
diff --git a/roles/etcd/README.md b/roles/etcd/README.md
index 329a926c0..c936dbabc 100644
--- a/roles/etcd/README.md
+++ b/roles/etcd/README.md
@@ -6,7 +6,8 @@ Configures an etcd cluster for an arbitrary number of hosts
Requirements
------------
-This role assumes it's being deployed on a RHEL/Fedora based host with package
+* Ansible 2.2
+* This role assumes it's being deployed on a RHEL/Fedora based host with package
named 'etcd' available via yum or dnf (conditionally).
Role Variables
diff --git a/roles/etcd/handlers/main.yml b/roles/etcd/handlers/main.yml
index e00e1cac4..95076b19e 100644
--- a/roles/etcd/handlers/main.yml
+++ b/roles/etcd/handlers/main.yml
@@ -1,5 +1,5 @@
---
- name: restart etcd
- service: name={{ etcd_service }} state=restarted
+ systemd: name={{ etcd_service }} state=restarted
when: not (etcd_service_status_changed | default(false) | bool)
diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml
index cfd72dfbc..532f9e313 100644
--- a/roles/etcd/meta/main.yml
+++ b/roles/etcd/meta/main.yml
@@ -7,7 +7,7 @@ galaxy_info:
description: etcd management
company: Red Hat, Inc.
license: Apache License, Version 2.0
- min_ansible_version: 2.1
+ min_ansible_version: 2.2
platforms:
- name: EL
versions:
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index 7b61e9b73..41f25be70 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -20,36 +20,25 @@
template:
dest: "/etc/systemd/system/etcd_container.service"
src: etcd.docker.service
- register: install_etcd_result
when: etcd_is_containerized | bool
-- name: Ensure etcd datadir exists
- when: etcd_is_containerized | bool
+- name: Ensure etcd datadir exists when containerized
file:
path: "{{ etcd_data_dir }}"
state: directory
mode: 0700
-
-- name: Check for etcd service presence
- command: systemctl show etcd.service
- register: etcd_show
- changed_when: false
- failed_when: false
+ when: etcd_is_containerized | bool
- name: Disable system etcd when containerized
- when: etcd_is_containerized | bool and etcd_show.rc == 0 and 'LoadState=not-found' not in etcd_show.stdout
- service:
+ systemd:
name: etcd
state: stopped
enabled: no
-
-- name: Mask system etcd when containerized
- when: etcd_is_containerized | bool and etcd_show.rc == 0 and 'LoadState=not-found' not in etcd_show.stdout
- command: systemctl mask etcd
-
-- name: Reload systemd units
- command: systemctl daemon-reload
- when: etcd_is_containerized | bool and ( install_etcd_result | changed )
+ masked: yes
+ daemon_reload: yes
+ when: etcd_is_containerized | bool
+ register: task_result
+ failed_when: "task_result|failed and 'could not' not in task_result.msg|lower"
- name: Validate permissions on the config dir
file:
@@ -68,7 +57,7 @@
- restart etcd
- name: Enable etcd
- service:
+ systemd:
name: "{{ etcd_service }}"
state: started
enabled: yes
@@ -77,5 +66,6 @@
- include: etcdctl.yml
when: openshift_etcd_etcdctl_profile | default(true) | bool
-- set_fact:
+- name: Set fact etcd_service_status_changed
+ set_fact:
etcd_service_status_changed: "{{ start_result | changed }}"
diff --git a/roles/flannel/README.md b/roles/flannel/README.md
index 84e2c5c49..0c7347603 100644
--- a/roles/flannel/README.md
+++ b/roles/flannel/README.md
@@ -6,7 +6,8 @@ Configure flannel on openshift nodes
Requirements
------------
-This role assumes it's being deployed on a RHEL/Fedora based host with package
+* Ansible 2.2
+* This role assumes it's being deployed on a RHEL/Fedora based host with package
named 'flannel' available via yum or dnf (conditionally), in version superior
to 0.3.
diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml
index 981ea5c7a..94d1d18fb 100644
--- a/roles/flannel/handlers/main.yml
+++ b/roles/flannel/handlers/main.yml
@@ -1,8 +1,8 @@
---
- name: restart flanneld
become: yes
- service: name=flanneld state=restarted
+ systemd: name=flanneld state=restarted
- name: restart docker
become: yes
- service: name=docker state=restarted
+ systemd: name=docker state=restarted
diff --git a/roles/flannel/meta/main.yml b/roles/flannel/meta/main.yml
index 616ae61d2..35f825586 100644
--- a/roles/flannel/meta/main.yml
+++ b/roles/flannel/meta/main.yml
@@ -4,7 +4,7 @@ galaxy_info:
description: flannel management
company: Red Hat, Inc.
license: Apache License, Version 2.0
- min_ansible_version: 2.1
+ min_ansible_version: 2.2
platforms:
- name: EL
versions:
diff --git a/roles/flannel/tasks/main.yml b/roles/flannel/tasks/main.yml
index a51455bae..3a8945a82 100644
--- a/roles/flannel/tasks/main.yml
+++ b/roles/flannel/tasks/main.yml
@@ -27,7 +27,7 @@
- name: Enable flanneld
become: yes
- service:
+ systemd:
name: flanneld
state: started
enabled: yes
diff --git a/roles/kube_nfs_volumes/README.md b/roles/kube_nfs_volumes/README.md
index dd91ad8b1..8cf7c0cd4 100644
--- a/roles/kube_nfs_volumes/README.md
+++ b/roles/kube_nfs_volumes/README.md
@@ -11,8 +11,8 @@ system) on the disks!
## Requirements
+* Ansible 2.2
* Running Kubernetes with NFS persistent volume support (on a remote machine).
-
* Works only on RHEL/Fedora-like distros.
## Role Variables
diff --git a/roles/kube_nfs_volumes/handlers/main.yml b/roles/kube_nfs_volumes/handlers/main.yml
index 52f3ceffe..9ce8b783d 100644
--- a/roles/kube_nfs_volumes/handlers/main.yml
+++ b/roles/kube_nfs_volumes/handlers/main.yml
@@ -1,3 +1,3 @@
---
- name: restart nfs
- service: name=nfs-server state=restarted
+ systemd: name=nfs-server state=restarted
diff --git a/roles/kube_nfs_volumes/meta/main.yml b/roles/kube_nfs_volumes/meta/main.yml
index dc4ccdfee..be6ca6b88 100644
--- a/roles/kube_nfs_volumes/meta/main.yml
+++ b/roles/kube_nfs_volumes/meta/main.yml
@@ -4,7 +4,7 @@ galaxy_info:
description: Partition disks and use them as Kubernetes NFS physical volumes.
company: Red Hat, Inc.
license: license (Apache)
- min_ansible_version: 1.4
+ min_ansible_version: 2.2
platforms:
- name: EL
versions:
diff --git a/roles/kube_nfs_volumes/tasks/nfs.yml b/roles/kube_nfs_volumes/tasks/nfs.yml
index ebd3d349a..9eeff9260 100644
--- a/roles/kube_nfs_volumes/tasks/nfs.yml
+++ b/roles/kube_nfs_volumes/tasks/nfs.yml
@@ -4,10 +4,16 @@
when: not openshift.common.is_containerized | bool
- name: Start rpcbind on Fedora/Red Hat
- service: name=rpcbind state=started enabled=yes
+ systemd:
+ name: rpcbind
+ state: started
+ enabled: yes
- name: Start nfs on Fedora/Red Hat
- service: name=nfs-server state=started enabled=yes
+ systemd:
+ name: nfs-server
+ state: started
+ enabled: yes
- name: Export the directories
lineinfile: dest=/etc/exports
diff --git a/roles/nuage_master/README.md b/roles/nuage_master/README.md
index de101dd19..0f1f6f2b1 100644
--- a/roles/nuage_master/README.md
+++ b/roles/nuage_master/README.md
@@ -5,4 +5,6 @@ Setup Nuage Kubernetes Monitor on the Master node
Requirements
------------
-This role assumes it has been deployed on RHEL/Fedora
+
+* Ansible 2.2
+* This role assumes it has been deployed on RHEL/Fedora
diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml
index 56224cf82..162aaae1a 100644
--- a/roles/nuage_master/handlers/main.yaml
+++ b/roles/nuage_master/handlers/main.yaml
@@ -1,18 +1,24 @@
---
- name: restart nuage-openshift-monitor
become: yes
- service: name=nuage-openshift-monitor state=restarted
+ systemd: name=nuage-openshift-monitor state=restarted
- name: restart master
- service: name={{ openshift.common.service_type }}-master state=restarted
+ systemd: name={{ openshift.common.service_type }}-master state=restarted
when: (not openshift_master_ha | bool) and (not master_service_status_changed | default(false))
- name: restart master api
- service: name={{ openshift.common.service_type }}-master-api state=restarted
- when: (openshift_master_ha | bool) and (not master_api_service_status_changed | default(false)) and openshift.master.cluster_method == 'native'
+ systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ when: >
+ (openshift_master_ha | bool) and
+ (not master_api_service_status_changed | default(false)) and
+ openshift.master.cluster_method == 'native'
# TODO: need to fix up ignore_errors here
- name: restart master controllers
- service: name={{ openshift.common.service_type }}-master-controllers state=restarted
- when: (openshift_master_ha | bool) and (not master_controllers_service_status_changed | default(false)) and openshift.master.cluster_method == 'native'
+ systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ when: >
+ (openshift_master_ha | bool) and
+ (not master_controllers_service_status_changed | default(false)) and
+ openshift.master.cluster_method == 'native'
ignore_errors: yes
diff --git a/roles/nuage_master/meta/main.yml b/roles/nuage_master/meta/main.yml
index 51b89fbf6..b2a47ef71 100644
--- a/roles/nuage_master/meta/main.yml
+++ b/roles/nuage_master/meta/main.yml
@@ -1,10 +1,10 @@
---
galaxy_info:
- author: Vishal Patil
+ author: Vishal Patil
description:
company: Nuage Networks
license: Apache License, Version 2.0
- min_ansible_version: 1.8
+ min_ansible_version: 2.2
platforms:
- name: EL
versions:
@@ -18,5 +18,5 @@ dependencies:
- role: openshift_etcd_client_certificates
- role: os_firewall
os_firewall_allow:
- - service: openshift-monitor
+ - service: openshift-monitor
port: "{{ nuage_mon_rest_server_port }}/tcp"
diff --git a/roles/nuage_node/README.md b/roles/nuage_node/README.md
index 02a3cbc77..75a75ca6b 100644
--- a/roles/nuage_node/README.md
+++ b/roles/nuage_node/README.md
@@ -6,4 +6,5 @@ Setup Nuage VRS (Virtual Routing Switching) on the Openshift Node
Requirements
------------
-This role assumes it has been deployed on RHEL/Fedora
+* Ansible 2.2
+* This role assumes it has been deployed on RHEL/Fedora
diff --git a/roles/nuage_node/handlers/main.yaml b/roles/nuage_node/handlers/main.yaml
index fd06d9025..8384856ff 100644
--- a/roles/nuage_node/handlers/main.yaml
+++ b/roles/nuage_node/handlers/main.yaml
@@ -1,11 +1,11 @@
---
- name: restart vrs
become: yes
- service: name=openvswitch state=restarted
+ systemd: name=openvswitch state=restarted
- name: restart node
become: yes
- service: name={{ openshift.common.service_type }}-node state=restarted
+ systemd: name={{ openshift.common.service_type }}-node state=restarted
- name: save iptable rules
become: yes
diff --git a/roles/nuage_node/meta/main.yml b/roles/nuage_node/meta/main.yml
index a6fbcba61..f96318611 100644
--- a/roles/nuage_node/meta/main.yml
+++ b/roles/nuage_node/meta/main.yml
@@ -1,10 +1,10 @@
---
galaxy_info:
- author: Vishal Patil
+ author: Vishal Patil
description:
company: Nuage Networks
license: Apache License, Version 2.0
- min_ansible_version: 1.8
+ min_ansible_version: 2.2
platforms:
- name: EL
versions:
@@ -17,7 +17,7 @@ dependencies:
- role: nuage_ca
- role: os_firewall
os_firewall_allow:
- - service: vxlan
+ - service: vxlan
port: 4789/udp
- service: nuage-monitor
port: "{{ nuage_mon_rest_server_port }}/tcp"
diff --git a/roles/openshift_loadbalancer/README.md b/roles/openshift_loadbalancer/README.md
index 03e837e46..bea4c509b 100644
--- a/roles/openshift_loadbalancer/README.md
+++ b/roles/openshift_loadbalancer/README.md
@@ -6,6 +6,8 @@ OpenShift HaProxy Loadbalancer Configuration
Requirements
------------
+* Ansible 2.2
+
This role is intended to be applied to the [lb] host group which is
separate from OpenShift infrastructure components.
diff --git a/roles/openshift_loadbalancer/handlers/main.yml b/roles/openshift_loadbalancer/handlers/main.yml
index 5b8691b26..3bf052460 100644
--- a/roles/openshift_loadbalancer/handlers/main.yml
+++ b/roles/openshift_loadbalancer/handlers/main.yml
@@ -1,6 +1,6 @@
---
- name: restart haproxy
- service:
+ systemd:
name: haproxy
state: restarted
when: not (haproxy_start_result_changed | default(false) | bool)
diff --git a/roles/openshift_loadbalancer/meta/main.yml b/roles/openshift_loadbalancer/meta/main.yml
index 0b29df2a0..0dffb545f 100644
--- a/roles/openshift_loadbalancer/meta/main.yml
+++ b/roles/openshift_loadbalancer/meta/main.yml
@@ -4,7 +4,7 @@ galaxy_info:
description: OpenShift haproxy loadbalancer
company: Red Hat, Inc.
license: Apache License, Version 2.0
- min_ansible_version: 1.9
+ min_ansible_version: 2.2
platforms:
- name: EL
versions:
diff --git a/roles/openshift_loadbalancer/tasks/main.yml b/roles/openshift_loadbalancer/tasks/main.yml
index 1d2804279..400f80715 100644
--- a/roles/openshift_loadbalancer/tasks/main.yml
+++ b/roles/openshift_loadbalancer/tasks/main.yml
@@ -27,11 +27,6 @@
option: LimitNOFILE
value: "{{ openshift_loadbalancer_limit_nofile | default(100000) }}"
notify: restart haproxy
- register: nofile_limit_result
-
-- name: Reload systemd if needed
- command: systemctl daemon-reload
- when: nofile_limit_result | changed
- name: Configure haproxy
template:
@@ -43,10 +38,11 @@
notify: restart haproxy
- name: Enable and start haproxy
- service:
+ systemd:
name: haproxy
state: started
enabled: yes
+ daemon_reload: yes
register: start_result
- set_fact:
diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md
index 663ac08b8..c3300a7ef 100644
--- a/roles/openshift_master/README.md
+++ b/roles/openshift_master/README.md
@@ -6,7 +6,8 @@ Master service installation
Requirements
------------
-A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms,
+* Ansible 2.2
+* A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms,
rhel-7-server-extras-rpms, and rhel-7-server-ose-3.0-rpms repos.
Role Variables
diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml
index e119db1a2..69c5a1663 100644
--- a/roles/openshift_master/handlers/main.yml
+++ b/roles/openshift_master/handlers/main.yml
@@ -1,16 +1,16 @@
---
- name: restart master
- service: name={{ openshift.common.service_type }}-master state=restarted
+ systemd: name={{ openshift.common.service_type }}-master state=restarted
when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool))
notify: Verify API Server
- name: restart master api
- service: name={{ openshift.common.service_type }}-master-api state=restarted
+ systemd: name={{ openshift.common.service_type }}-master-api state=restarted
when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
notify: Verify API Server
- name: restart master controllers
- service: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
- name: Verify API Server
diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml
index a2f665702..7457e4378 100644
--- a/roles/openshift_master/meta/main.yml
+++ b/roles/openshift_master/meta/main.yml
@@ -4,7 +4,7 @@ galaxy_info:
description: Master
company: Red Hat, Inc.
license: Apache License, Version 2.0
- min_ansible_version: 2.1
+ min_ansible_version: 2.2
platforms:
- name: EL
versions:
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 79c62e985..2de5cd3f3 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -64,9 +64,9 @@
args:
creates: "{{ openshift_master_policy }}"
notify:
- - restart master
- - restart master api
- - restart master controllers
+ - restart master
+ - restart master api
+ - restart master controllers
- name: Create the scheduler config
copy:
@@ -74,9 +74,9 @@
dest: "{{ openshift_master_scheduler_conf }}"
backup: true
notify:
- - restart master
- - restart master api
- - restart master controllers
+ - restart master
+ - restart master api
+ - restart master controllers
- name: Install httpd-tools if needed
package: name=httpd-tools state=present
@@ -147,8 +147,8 @@
mode: 0600
when: openshift.master.session_auth_secrets is defined and openshift.master.session_encryption_secrets is defined
notify:
- - restart master
- - restart master api
+ - restart master
+ - restart master api
- set_fact:
translated_identity_providers: "{{ openshift.master.identity_providers | translate_idps('v1', openshift.common.version, openshift.common.deployment_type) }}"
@@ -163,9 +163,9 @@
group: root
mode: 0600
notify:
- - restart master
- - restart master api
- - restart master controllers
+ - restart master
+ - restart master api
+ - restart master controllers
- include: set_loopback_context.yml
when: openshift.common.version_gte_3_2_or_1_2
@@ -179,7 +179,10 @@
# https://github.com/openshift/origin/issues/6065
# https://github.com/openshift/origin/issues/6447
- name: Start and enable master
- service: name={{ openshift.common.service_type }}-master enabled=yes state=started
+ systemd:
+ name: "{{ openshift.common.service_type }}-master"
+ enabled: yes
+ state: started
when: not openshift_master_ha | bool
register: start_result
until: not start_result | failed
@@ -187,29 +190,30 @@
delay: 60
notify: Verify API Server
-- name: Check for non-HA master service presence
- command: systemctl show {{ openshift.common.service_type }}-master.service
- register: master_svc_show
- changed_when: false
- failed_when: false
-
- name: Stop and disable non-HA master when running HA
- service:
+ systemd:
name: "{{ openshift.common.service_type }}-master"
enabled: no
state: stopped
- when: openshift_master_ha | bool and master_svc_show.rc == 0 and 'LoadState=not-found' not in master_svc_show.stdout
+ when: openshift_master_ha | bool
+ register: task_result
+ failed_when: "task_result|failed and 'could not' not in task_result.msg|lower"
- set_fact:
master_service_status_changed: "{{ start_result | changed }}"
when: not openshift_master_ha | bool
- name: Mask master service
- command: systemctl mask {{ openshift.common.service_type }}-master
- when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and not openshift.common.is_containerized | bool
+ systemd:
+ name: "{{ openshift.common.service_type }}-master"
+ masked: yes
+ when: >
+ openshift_master_ha | bool and
+ openshift.master.cluster_method == 'native' and
+ not openshift.common.is_containerized | bool
- name: Start and enable master api on first master
- service:
+ systemd:
name: "{{ openshift.common.service_type }}-master-api"
enabled: yes
state: started
@@ -228,7 +232,7 @@
when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
- name: Start and enable master api all masters
- service:
+ systemd:
name: "{{ openshift.common.service_type }}-master-api"
enabled: yes
state: started
@@ -264,7 +268,7 @@
when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and master_api_service_status_changed | bool
- name: Start and enable master controller on first master
- service:
+ systemd:
name: "{{ openshift.common.service_type }}-master-controllers"
enabled: yes
state: started
@@ -274,12 +278,13 @@
retries: 1
delay: 60
-- pause:
+- name: Wait for master controller service to start on first master
+ pause:
seconds: 15
when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
- name: Start and enable master controller on all masters
- service:
+ systemd:
name: "{{ openshift.common.service_type }}-master-controllers"
enabled: yes
state: started
@@ -300,7 +305,10 @@
register: install_result
- name: Start and enable cluster service
- service: name=pcsd enabled=yes state=started
+ systemd:
+ name: pcsd
+ enabled: yes
+ state: started
when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker'
and not openshift.common.is_containerized | bool
diff --git a/roles/openshift_master_cluster/README.md b/roles/openshift_master_cluster/README.md
index f150981fa..58dd19ac3 100644
--- a/roles/openshift_master_cluster/README.md
+++ b/roles/openshift_master_cluster/README.md
@@ -6,7 +6,7 @@ TODO
Requirements
------------
-TODO
+* Ansible 2.2
Role Variables
--------------
diff --git a/roles/openshift_master_cluster/meta/main.yml b/roles/openshift_master_cluster/meta/main.yml
index 0c8881521..f2a67bc54 100644
--- a/roles/openshift_master_cluster/meta/main.yml
+++ b/roles/openshift_master_cluster/meta/main.yml
@@ -4,7 +4,7 @@ galaxy_info:
description:
company: Red Hat, Inc.
license: Apache License, Version 2.0
- min_ansible_version: 1.8
+ min_ansible_version: 2.2
platforms:
- name: EL
versions:
diff --git a/roles/openshift_metrics/README.md b/roles/openshift_metrics/README.md
index 30a0a608d..f3c0f3474 100644
--- a/roles/openshift_metrics/README.md
+++ b/roles/openshift_metrics/README.md
@@ -5,8 +5,10 @@ OpenShift Metrics Installation
Requirements
------------
-It requires subdomain fqdn to be set.
-If persistence is enabled, then it also requires NFS.
+
+* Ansible 2.2
+* It requires subdomain fqdn to be set.
+* If persistence is enabled, then it also requires NFS.
Role Variables
--------------
diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_metrics/handlers/main.yml
index e119db1a2..69c5a1663 100644
--- a/roles/openshift_metrics/handlers/main.yml
+++ b/roles/openshift_metrics/handlers/main.yml
@@ -1,16 +1,16 @@
---
- name: restart master
- service: name={{ openshift.common.service_type }}-master state=restarted
+ systemd: name={{ openshift.common.service_type }}-master state=restarted
when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool))
notify: Verify API Server
- name: restart master api
- service: name={{ openshift.common.service_type }}-master-api state=restarted
+ systemd: name={{ openshift.common.service_type }}-master-api state=restarted
when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
notify: Verify API Server
- name: restart master controllers
- service: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
- name: Verify API Server
diff --git a/roles/openshift_metrics/meta/main.yaml b/roles/openshift_metrics/meta/main.yaml
index 5f8d4f5c5..a89467de5 100644
--- a/roles/openshift_metrics/meta/main.yaml
+++ b/roles/openshift_metrics/meta/main.yaml
@@ -1,3 +1,17 @@
+---
+galaxy_info:
+ author: David Martín
+ description:
+ company:
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
dependencies:
- { role: openshift_examples }
-- { role: openshift_facts } \ No newline at end of file
+- { role: openshift_facts }
diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md
index cafecd343..d1920c485 100644
--- a/roles/openshift_node/README.md
+++ b/roles/openshift_node/README.md
@@ -6,10 +6,10 @@ Node service installation
Requirements
------------
-One or more Master servers.
-
-A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms,
-rhel-7-server-extras-rpms, and rhel-7-server-ose-3.0-rpms repos.
+* Ansible 2.2
+* One or more Master servers
+* A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms,
+rhel-7-server-extras-rpms, and rhel-7-server-ose-3.0-rpms repos
Role Variables
--------------
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index 34071964a..ebe584588 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -1,6 +1,6 @@
---
- name: restart openvswitch
- service: name=openvswitch state=restarted
+ systemd: name=openvswitch state=restarted
when: not (ovs_service_status_changed | default(false) | bool) and openshift.common.use_openshift_sdn | bool
notify:
- restart openvswitch pause
@@ -10,5 +10,5 @@
when: openshift.common.is_containerized | bool
- name: restart node
- service: name={{ openshift.common.service_type }}-node state=restarted
+ systemd: name={{ openshift.common.service_type }}-node state=restarted
when: not (node_service_status_changed | default(false) | bool)
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 612cc0e20..31d07838d 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -2,35 +2,37 @@
# TODO: allow for overriding default ports where possible
- fail:
msg: "SELinux is disabled, This deployment type requires that SELinux is enabled."
- when: (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']
+ when: >
+ (not ansible_selinux or ansible_selinux.status != 'enabled') and
+ deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']
- name: Set node facts
openshift_facts:
role: "{{ item.role }}"
local_facts: "{{ item.local_facts }}"
with_items:
- # Reset node labels to an empty dictionary.
- - role: node
- local_facts:
- labels: {}
- - role: node
- local_facts:
- annotations: "{{ openshift_node_annotations | default(none) }}"
- debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}"
- iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}"
- kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
- labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}"
- registry_url: "{{ oreg_url | default(none) }}"
- schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
- sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
- storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}"
- set_node_ip: "{{ openshift_set_node_ip | default(None) }}"
- node_image: "{{ osn_image | default(None) }}"
- ovs_image: "{{ osn_ovs_image | default(None) }}"
- proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}"
- local_quota_per_fsgroup: "{{ openshift_node_local_quota_per_fsgroup | default(None) }}"
- dns_ip: "{{ openshift_dns_ip | default(none) | get_dns_ip(hostvars[inventory_hostname])}}"
- env_vars: "{{ openshift_node_env_vars | default(None) }}"
+ # Reset node labels to an empty dictionary.
+ - role: node
+ local_facts:
+ labels: {}
+ - role: node
+ local_facts:
+ annotations: "{{ openshift_node_annotations | default(none) }}"
+ debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}"
+ iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}"
+ kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
+ labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}"
+ registry_url: "{{ oreg_url | default(none) }}"
+ schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
+ sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
+ storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}"
+ set_node_ip: "{{ openshift_set_node_ip | default(None) }}"
+ node_image: "{{ osn_image | default(None) }}"
+ ovs_image: "{{ osn_ovs_image | default(None) }}"
+ proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}"
+ local_quota_per_fsgroup: "{{ openshift_node_local_quota_per_fsgroup | default(None) }}"
+ dns_ip: "{{ openshift_dns_ip | default(none) | get_dns_ip(hostvars[inventory_hostname])}}"
+ env_vars: "{{ openshift_node_env_vars | default(None) }}"
# We have to add tuned-profiles in the same transaction otherwise we run into depsolving
# problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging.
@@ -80,7 +82,10 @@
sysctl: name="net.ipv4.ip_forward" value=1 sysctl_set=yes state=present reload=yes
- name: Start and enable openvswitch docker service
- service: name=openvswitch.service enabled=yes state=started
+ systemd:
+ name: openvswitch.service
+ enabled: yes
+ state: started
when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool
register: ovs_start_result
@@ -102,7 +107,7 @@
group: root
mode: 0600
notify:
- - restart node
+ - restart node
- name: Configure AWS Cloud Provider Settings
lineinfile:
@@ -118,7 +123,7 @@
no_log: True
when: "openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined"
notify:
- - restart node
+ - restart node
- name: Configure Node Environment Variables
lineinfile:
@@ -128,7 +133,7 @@
create: true
with_dict: "{{ openshift.node.env_vars | default({}) }}"
notify:
- - restart node
+ - restart node
- name: NFS storage plugin configuration
include: storage_plugins/nfs.yml
@@ -168,11 +173,17 @@
when: openshift.common.is_containerized | bool
- name: Start and enable node dep
- service: name={{ openshift.common.service_type }}-node-dep enabled=yes state=started
+ systemd:
+ name: "{{ openshift.common.service_type }}-node-dep"
+ enabled: yes
+ state: started
when: openshift.common.is_containerized | bool
- name: Start and enable node
- service: name={{ openshift.common.service_type }}-node enabled=yes state=started
+ systemd:
+ name: "{{ openshift.common.service_type }}-node"
+ enabled: yes
+ state: started
register: node_start_result
until: not node_start_result | failed
retries: 1
diff --git a/roles/openshift_node_certificates/README.md b/roles/openshift_node_certificates/README.md
index f56066b29..f4215950f 100644
--- a/roles/openshift_node_certificates/README.md
+++ b/roles/openshift_node_certificates/README.md
@@ -6,6 +6,8 @@ This role determines if OpenShift node certificates must be created, delegates c
Requirements
------------
+* Ansible 2.2
+
Role Variables
--------------
diff --git a/roles/openshift_node_certificates/handlers/main.yml b/roles/openshift_node_certificates/handlers/main.yml
index f2299cecf..a74668b13 100644
--- a/roles/openshift_node_certificates/handlers/main.yml
+++ b/roles/openshift_node_certificates/handlers/main.yml
@@ -2,9 +2,9 @@
- name: update ca trust
command: update-ca-trust
notify:
- - restart docker after updating ca trust
+ - restart docker after updating ca trust
- name: restart docker after updating ca trust
- service:
+ systemd:
name: docker
state: restarted
diff --git a/roles/openshift_node_certificates/meta/main.yml b/roles/openshift_node_certificates/meta/main.yml
index 50a862ee9..93216c1d2 100644
--- a/roles/openshift_node_certificates/meta/main.yml
+++ b/roles/openshift_node_certificates/meta/main.yml
@@ -4,7 +4,7 @@ galaxy_info:
description: OpenShift Node Certificates
company: Red Hat, Inc.
license: Apache License, Version 2.0
- min_ansible_version: 2.1
+ min_ansible_version: 2.2
platforms:
- name: EL
versions:
diff --git a/roles/openshift_node_dnsmasq/handlers/main.yml b/roles/openshift_node_dnsmasq/handlers/main.yml
index 7d43b6106..b4a0c3583 100644
--- a/roles/openshift_node_dnsmasq/handlers/main.yml
+++ b/roles/openshift_node_dnsmasq/handlers/main.yml
@@ -1,10 +1,10 @@
---
- name: restart NetworkManager
- service:
+ systemd:
name: NetworkManager
state: restarted
- name: restart dnsmasq
- service:
+ systemd:
name: dnsmasq
state: restarted
diff --git a/roles/openshift_node_dnsmasq/meta/main.yml b/roles/openshift_node_dnsmasq/meta/main.yml
index c83d64ae4..18e04e06d 100644
--- a/roles/openshift_node_dnsmasq/meta/main.yml
+++ b/roles/openshift_node_dnsmasq/meta/main.yml
@@ -4,7 +4,7 @@ galaxy_info:
description: OpenShift Node DNSMasq support
company: Red Hat, Inc.
license: Apache License, Version 2.0
- min_ansible_version: 1.7
+ min_ansible_version: 2.2
platforms:
- name: EL
versions:
diff --git a/roles/openshift_node_dnsmasq/tasks/main.yml b/roles/openshift_node_dnsmasq/tasks/main.yml
index 0167b02b1..3311f7006 100644
--- a/roles/openshift_node_dnsmasq/tasks/main.yml
+++ b/roles/openshift_node_dnsmasq/tasks/main.yml
@@ -22,16 +22,16 @@
- name: Deploy additional dnsmasq.conf
template:
- src: "{{ openshift_node_dnsmasq_additional_config_file }}"
- dest: /etc/dnsmasq.d/openshift-ansible.conf
- owner: root
- group: root
- mode: 0644
+ src: "{{ openshift_node_dnsmasq_additional_config_file }}"
+ dest: /etc/dnsmasq.d/openshift-ansible.conf
+ owner: root
+ group: root
+ mode: 0644
when: openshift_node_dnsmasq_additional_config_file is defined
notify: restart dnsmasq
- name: Enable dnsmasq
- service:
+ systemd:
name: dnsmasq
enabled: yes
state: started
diff --git a/roles/openshift_storage_nfs/README.md b/roles/openshift_storage_nfs/README.md
index b0480a958..817b007e8 100644
--- a/roles/openshift_storage_nfs/README.md
+++ b/roles/openshift_storage_nfs/README.md
@@ -6,10 +6,10 @@ OpenShift NFS Server Installation
Requirements
------------
-This role is intended to be applied to the [nfs] host group which is
+* Ansible 2.2
+* This role is intended to be applied to the [nfs] host group which is
separate from OpenShift infrastructure components.
-
-Requires access to the 'nfs-utils' package.
+* Requires access to the 'nfs-utils' package.
Role Variables
--------------
diff --git a/roles/openshift_storage_nfs/handlers/main.yml b/roles/openshift_storage_nfs/handlers/main.yml
index a1377a203..0d1149db8 100644
--- a/roles/openshift_storage_nfs/handlers/main.yml
+++ b/roles/openshift_storage_nfs/handlers/main.yml
@@ -1,6 +1,6 @@
---
- name: restart nfs-server
- service:
+ systemd:
name: nfs-server
state: restarted
when: not (nfs_service_status_changed | default(false))
diff --git a/roles/openshift_storage_nfs/meta/main.yml b/roles/openshift_storage_nfs/meta/main.yml
index 865865d9c..62e38bd8c 100644
--- a/roles/openshift_storage_nfs/meta/main.yml
+++ b/roles/openshift_storage_nfs/meta/main.yml
@@ -4,7 +4,7 @@ galaxy_info:
description: OpenShift NFS Server
company: Red Hat, Inc.
license: Apache License, Version 2.0
- min_ansible_version: 1.9
+ min_ansible_version: 2.2
platforms:
- name: EL
versions:
diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml
index ecc52e4af..fd935f105 100644
--- a/roles/openshift_storage_nfs/tasks/main.yml
+++ b/roles/openshift_storage_nfs/tasks/main.yml
@@ -10,7 +10,7 @@
register: nfs_config
- name: Restart nfs-config
- service: name=nfs-config state=restarted
+ systemd: name=nfs-config state=restarted
when: nfs_config | changed
- name: Ensure exports directory exists
@@ -26,9 +26,9 @@
owner: nfsnobody
group: nfsnobody
with_items:
- - "{{ openshift.hosted.registry }}"
- - "{{ openshift.hosted.metrics }}"
- - "{{ openshift.hosted.logging }}"
+ - "{{ openshift.hosted.registry }}"
+ - "{{ openshift.hosted.metrics }}"
+ - "{{ openshift.hosted.logging }}"
- name: Configure exports
@@ -36,7 +36,7 @@
dest: /etc/exports.d/openshift-ansible.exports
src: exports.j2
notify:
- - restart nfs-server
+ - restart nfs-server
# Now that we're putting our exports in our own file clean up the old ones
- name: register exports
@@ -51,16 +51,14 @@
with_items: "{{ exports_out.stdout_lines | default([]) }}"
when: exports_out.rc == 0
notify:
- - restart nfs-server
+ - restart nfs-server
- name: Enable and start services
- service:
- name: "{{ item }}"
+ systemd:
+ name: nfs-server
state: started
enabled: yes
register: start_result
- with_items:
- - nfs-server
- set_fact:
nfs_service_status_changed: "{{ start_result | changed }}"
diff --git a/roles/openshift_storage_nfs_lvm/README.md b/roles/openshift_storage_nfs_lvm/README.md
index 3680ef5b5..8b8471745 100644
--- a/roles/openshift_storage_nfs_lvm/README.md
+++ b/roles/openshift_storage_nfs_lvm/README.md
@@ -8,10 +8,9 @@ create persistent volumes.
## Requirements
-* NFS server with NFS, iptables, and everything setup.
-
+* Ansible 2.2
+* NFS server with NFS, iptables, and everything setup
* A lvm volume group created on the nfs server (default: openshiftvg)
-
* The lvm volume needs to have as much free space as you are allocating
## Role Variables
diff --git a/roles/openshift_storage_nfs_lvm/handlers/main.yml b/roles/openshift_storage_nfs_lvm/handlers/main.yml
index 52f3ceffe..9ce8b783d 100644
--- a/roles/openshift_storage_nfs_lvm/handlers/main.yml
+++ b/roles/openshift_storage_nfs_lvm/handlers/main.yml
@@ -1,3 +1,3 @@
---
- name: restart nfs
- service: name=nfs-server state=restarted
+ systemd: name=nfs-server state=restarted
diff --git a/roles/openshift_storage_nfs_lvm/meta/main.yml b/roles/openshift_storage_nfs_lvm/meta/main.yml
index 62ea54883..bed1216f8 100644
--- a/roles/openshift_storage_nfs_lvm/meta/main.yml
+++ b/roles/openshift_storage_nfs_lvm/meta/main.yml
@@ -4,7 +4,7 @@ galaxy_info:
description: Create LVM volumes and use them as openshift persistent volumes.
company: Red Hat, Inc.
license: license (Apache)
- min_ansible_version: 1.4
+ min_ansible_version: 2.2
platforms:
- name: EL
versions:
diff --git a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml
index e0be9f0b7..03f4fcec0 100644
--- a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml
+++ b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml
@@ -4,14 +4,23 @@
when: not openshift.common.is_containerized | bool
- name: Start rpcbind
- service: name=rpcbind state=started enabled=yes
+ systemd:
+ name: rpcbind
+ state: started
+ enabled: yes
- name: Start nfs
- service: name=nfs-server state=started enabled=yes
+ systemd:
+ name: nfs-server
+ state: started
+ enabled: yes
- name: Export the directories
lineinfile: dest=/etc/exports
regexp="^{{ osnl_mount_dir }}/{{ item }} "
line="{{ osnl_mount_dir }}/{{ item }} {{osnl_nfs_export_options}}"
- with_sequence: start={{osnl_volume_num_start}} count={{osnl_number_of_volumes}} format={{osnl_volume_prefix}}{{osnl_volume_size}}g%04d
+ with_sequence:
+ start: "{{osnl_volume_num_start}}"
+ count: "{{osnl_number_of_volumes}}"
+ format: "{{osnl_volume_prefix}}{{osnl_volume_size}}g%04d"
notify: restart nfs