summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--README.md13
-rw-r--r--README_AEP.md4
-rw-r--r--README_AWS.md4
-rw-r--r--README_vagrant.md2
-rwxr-xr-xbin/cluster2
-rw-r--r--filter_plugins/oo_filters.py52
-rw-r--r--filter_plugins/openshift_master.py31
-rw-r--r--filter_plugins/openshift_node.py43
-rw-r--r--inventory/aws/hosts/hosts2
-rw-r--r--inventory/byo/hosts.aep.example59
-rw-r--r--inventory/byo/hosts.openstack2
-rw-r--r--inventory/byo/hosts.origin.example58
-rw-r--r--inventory/byo/hosts.ose.example58
-rw-r--r--inventory/gce/hosts/hosts2
-rw-r--r--inventory/libvirt/hosts/hosts2
-rw-r--r--inventory/openstack/hosts/hosts2
-rw-r--r--lookup_plugins/oo_option.py26
-rw-r--r--lookup_plugins/sequence.py215
-rw-r--r--openshift-ansible.spec165
-rw-r--r--playbooks/adhoc/atomic_openshift_tutorial_reset.yml2
l---------playbooks/adhoc/noc/filter_plugins1
l---------playbooks/adhoc/noc/roles1
-rw-r--r--playbooks/adhoc/setupnfs.yml2
-rw-r--r--playbooks/adhoc/uninstall.yml9
-rw-r--r--playbooks/aws/openshift-cluster/add_nodes.yml5
-rw-r--r--playbooks/aws/openshift-cluster/config.yml8
-rw-r--r--playbooks/aws/openshift-cluster/launch.yml5
-rw-r--r--playbooks/aws/openshift-cluster/list.yml2
-rw-r--r--playbooks/aws/openshift-cluster/scaleup.yml4
-rw-r--r--playbooks/aws/openshift-cluster/service.yml4
-rw-r--r--playbooks/aws/openshift-cluster/tasks/launch_instances.yml97
-rw-r--r--playbooks/aws/openshift-cluster/templates/user_data.j233
-rw-r--r--playbooks/aws/openshift-cluster/terminate.yml2
-rw-r--r--playbooks/aws/openshift-cluster/update.yml2
-rw-r--r--playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml5
-rw-r--r--playbooks/aws/openshift-cluster/vars.defaults.yml1
-rw-r--r--playbooks/aws/openshift-cluster/vars.online.int.yml15
-rw-r--r--playbooks/aws/openshift-cluster/vars.online.prod.yml15
-rw-r--r--playbooks/aws/openshift-cluster/vars.online.stage.yml15
-rw-r--r--playbooks/aws/openshift-cluster/vars.yml49
-rw-r--r--playbooks/byo/openshift_facts.yml10
-rw-r--r--playbooks/common/openshift-cluster/additional_config.yml2
-rw-r--r--playbooks/common/openshift-cluster/config.yml2
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml20
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml24
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml9
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml23
-rw-r--r--playbooks/common/openshift-etcd/config.yml12
-rw-r--r--playbooks/common/openshift-master/config.yml30
-rw-r--r--playbooks/common/openshift-master/restart.yml4
-rw-r--r--playbooks/common/openshift-node/config.yml22
-rw-r--r--playbooks/gce/openshift-cluster/config.yml4
-rw-r--r--playbooks/gce/openshift-cluster/list.yml2
-rw-r--r--playbooks/gce/openshift-cluster/service.yml4
-rw-r--r--playbooks/gce/openshift-cluster/tasks/launch_instances.yml2
-rw-r--r--playbooks/gce/openshift-cluster/terminate.yml2
-rw-r--r--playbooks/gce/openshift-cluster/update.yml2
-rw-r--r--playbooks/gce/openshift-cluster/vars.yml9
-rw-r--r--playbooks/libvirt/openshift-cluster/config.yml4
-rw-r--r--playbooks/libvirt/openshift-cluster/launch.yml3
-rw-r--r--playbooks/libvirt/openshift-cluster/list.yml2
-rw-r--r--playbooks/libvirt/openshift-cluster/service.yml4
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml4
-rw-r--r--playbooks/libvirt/openshift-cluster/terminate.yml2
-rw-r--r--playbooks/libvirt/openshift-cluster/update.yml2
-rw-r--r--playbooks/libvirt/openshift-cluster/vars.yml11
-rw-r--r--playbooks/openstack/openshift-cluster/config.yml4
-rw-r--r--playbooks/openstack/openshift-cluster/dns.yml4
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack.yaml1
-rw-r--r--playbooks/openstack/openshift-cluster/launch.yml14
-rw-r--r--playbooks/openstack/openshift-cluster/list.yml2
-rw-r--r--playbooks/openstack/openshift-cluster/terminate.yml2
-rw-r--r--playbooks/openstack/openshift-cluster/update.yml2
-rw-r--r--playbooks/openstack/openshift-cluster/vars.yml8
-rw-r--r--roles/docker/tasks/main.yml18
-rw-r--r--roles/etcd/defaults/main.yaml11
-rw-r--r--roles/etcd/meta/main.yml1
-rw-r--r--roles/etcd/tasks/main.yml39
-rw-r--r--roles/etcd/templates/etcd.conf.j212
-rw-r--r--roles/etcd_ca/tasks/main.yml2
-rw-r--r--roles/etcd_certificates/tasks/client.yml14
-rw-r--r--roles/etcd_certificates/tasks/main.yml4
-rw-r--r--roles/etcd_certificates/tasks/server.yml24
-rw-r--r--roles/etcd_common/defaults/main.yml8
-rw-r--r--roles/etcd_common/tasks/main.yml13
-rw-r--r--roles/etcd_common/templates/host_int_map.j213
-rw-r--r--roles/flannel/handlers/main.yml4
-rw-r--r--roles/flannel/tasks/main.yml12
-rw-r--r--roles/flannel_register/README.md2
-rw-r--r--roles/flannel_register/defaults/main.yaml2
-rw-r--r--roles/flannel_register/tasks/main.yml6
-rw-r--r--roles/kube_nfs_volumes/README.md2
-rw-r--r--roles/nuage_master/files/serviceaccount.sh63
-rw-r--r--roles/nuage_master/handlers/main.yaml2
-rw-r--r--roles/nuage_master/tasks/main.yaml14
-rw-r--r--roles/nuage_master/tasks/serviceaccount.yml51
-rw-r--r--roles/nuage_master/templates/nuage-openshift-monitor.j26
-rw-r--r--roles/nuage_master/vars/main.yaml17
-rw-r--r--roles/nuage_node/handlers/main.yaml4
-rw-r--r--roles/nuage_node/tasks/main.yaml14
-rw-r--r--roles/nuage_node/vars/main.yaml2
-rw-r--r--roles/openshift_builddefaults/meta/main.yml15
-rw-r--r--roles/openshift_builddefaults/tasks/main.yml24
-rw-r--r--roles/openshift_builddefaults/vars/main.yml15
-rw-r--r--roles/openshift_cloud_provider/templates/openstack.conf.j27
-rw-r--r--roles/openshift_cluster_metrics/tasks/main.yml1
-rw-r--r--roles/openshift_common/README.md1
-rw-r--r--roles/openshift_common/tasks/main.yml1
-rw-r--r--roles/openshift_docker/tasks/main.yml9
-rw-r--r--roles/openshift_docker_facts/tasks/main.yml3
-rw-r--r--roles/openshift_etcd/meta/main.yml2
-rw-r--r--roles/openshift_etcd_certificates/meta/main.yml16
-rw-r--r--roles/openshift_etcd_facts/meta/main.yml15
-rw-r--r--roles/openshift_etcd_facts/vars/main.yml5
-rwxr-xr-xroles/openshift_examples/examples-sync.sh4
-rw-r--r--roles/openshift_examples/files/examples/v1.2/db-templates/mongodb-persistent-template.json4
-rw-r--r--roles/openshift_examples/files/examples/v1.2/db-templates/mysql-persistent-template.json4
-rw-r--r--roles/openshift_examples/files/examples/v1.2/db-templates/postgresql-persistent-template.json4
-rw-r--r--roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/logging-deployer.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/logging-deployer.yaml438
-rw-r--r--roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/metrics-deployer.yaml28
-rw-r--r--roles/openshift_examples/files/examples/v1.2/quickstart-templates/jenkins-persistent-template.json4
-rw-r--r--roles/openshift_examples/files/examples/v1.2/quickstart-templates/nodejs-mongodb.json5
-rw-r--r--roles/openshift_examples/files/examples/v1.2/quickstart-templates/nodejs.json5
-rw-r--r--roles/openshift_expand_partition/README.md4
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py212
-rw-r--r--roles/openshift_facts/tasks/main.yml26
-rw-r--r--roles/openshift_hosted/tasks/router.yml1
-rw-r--r--roles/openshift_hosted_logging/tasks/deploy_logging.yaml40
-rw-r--r--roles/openshift_manage_node/tasks/main.yml2
-rw-r--r--roles/openshift_master/defaults/main.yml8
-rw-r--r--roles/openshift_master/meta/main.yml1
-rw-r--r--roles/openshift_master/tasks/main.yml45
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master.j215
-rw-r--r--roles/openshift_master/templates/htpasswd.j25
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j26
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j215
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j215
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml8
-rw-r--r--roles/openshift_master_facts/tasks/main.yml11
-rw-r--r--roles/openshift_master_facts/vars/main.yml14
-rw-r--r--roles/openshift_metrics/README.md57
-rw-r--r--roles/openshift_metrics/meta/main.yaml3
-rw-r--r--roles/openshift_metrics/tasks/main.yaml57
-rw-r--r--roles/openshift_metrics/vars/main.yaml19
-rw-r--r--roles/openshift_node/meta/main.yml3
-rw-r--r--roles/openshift_node/tasks/main.yml8
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j24
-rw-r--r--roles/openshift_node_certificates/tasks/main.yml6
-rwxr-xr-xroles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh55
-rw-r--r--roles/openshift_node_dnsmasq/handlers/main.yml5
-rw-r--r--roles/openshift_node_dnsmasq/meta/main.yml15
-rw-r--r--roles/openshift_node_dnsmasq/tasks/main.yml27
-rw-r--r--roles/openshift_node_dnsmasq/tasks/network-manager.yml9
-rw-r--r--roles/openshift_node_dnsmasq/tasks/no-network-manager.yml2
-rw-r--r--roles/openshift_node_dnsmasq/templates/origin-dns.conf.j24
-rw-r--r--roles/openshift_serviceaccounts/tasks/main.yml12
-rw-r--r--roles/openshift_storage_nfs/defaults/main.yml8
-rw-r--r--roles/openshift_storage_nfs_lvm/README.md4
-rw-r--r--roles/os_firewall/defaults/main.yml2
-rw-r--r--roles/os_firewall/tasks/firewall/firewalld.yml12
-rw-r--r--roles/os_firewall/tasks/firewall/iptables.yml6
-rw-r--r--utils/src/ooinstall/cli_installer.py37
-rw-r--r--utils/src/ooinstall/oo_config.py2
-rw-r--r--utils/src/ooinstall/openshift_ansible.py7
-rw-r--r--utils/test/fixture.py3
172 files changed, 1921 insertions, 1143 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index a22da0715..67ff60a4a 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.0.81-1 ./
+3.0.87-1 ./
diff --git a/README.md b/README.md
index cf78d04f2..f948319c2 100644
--- a/README.md
+++ b/README.md
@@ -1,19 +1,19 @@
-#OpenShift and Atomic Enterprise Ansible
+[![Join the chat at https://gitter.im/openshift/openshift-ansible](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/openshift/openshift-ansible)
-This repo contains Ansible code for OpenShift and Atomic Enterprise.
+#OpenShift Ansible
+
+This repo contains Ansible code for OpenShift
##Setup
- Install base dependencies:
- Fedora:
```
- dnf install -y ansible-1.9.4 rubygem-thor rubygem-parseconfig util-linux pyOpenSSL libffi-devel python-cryptography
+ dnf install -y ansible-1.9.4 pyOpenSSL python-cryptography
```
- OSX:
```
# Install ansible 1.9.4 and python 2
brew install ansible python
- # Required ruby gems
- gem install thor parseconfig
```
- Setup for a specific cloud:
- [AWS](README_AWS.md)
@@ -40,6 +40,3 @@ This repo contains Ansible code for OpenShift and Atomic Enterprise.
- [Best Practices Guide](docs/best_practices_guide.adoc)
- [Core Concepts](docs/core_concepts_guide.adoc)
- [Style Guide](docs/style_guide.adoc)
-
-###Feature Roadmap
-Our Feature Roadmap is available on the OpenShift Origin Infrastructure [Trello board](https://trello.com/b/nbkIrqKa/openshift-origin-infrastructure). All ansible items will be tagged with [installv3].
diff --git a/README_AEP.md b/README_AEP.md
index 739c4baeb..1b926f2ab 100644
--- a/README_AEP.md
+++ b/README_AEP.md
@@ -71,8 +71,8 @@ nodes
# SSH user, this user should allow ssh based auth without requiring a password
ansible_ssh_user=root
-# If ansible_ssh_user is not root, ansible_sudo must be set to true
-#ansible_sudo=true
+# If ansible_ssh_user is not root, ansible_become must be set to true
+#ansible_become=yes
# See DEPLOYMENT_TYPES.md
deployment_type=atomic-enterprise
diff --git a/README_AWS.md b/README_AWS.md
index 978905f68..29689fccb 100644
--- a/README_AWS.md
+++ b/README_AWS.md
@@ -178,9 +178,9 @@ Terminating a cluster
Specifying a deployment type
---------------------------
The --deployment-type flag can be passed to bin/cluster to specify the deployment type
-1. To launch an online cluster (requires access to private repositories and amis):
+1. To launch an OpenShift Enterprise cluster (requires a valid subscription):
```
- bin/cluster create aws --deployment-type=online <cluster-id>
+ bin/cluster create aws --deployment-type=openshift-enterprise <cluster-id>
```
Note: If no deployment type is specified, then the default is origin.
diff --git a/README_vagrant.md b/README_vagrant.md
index a0bb0afbb..bda474f14 100644
--- a/README_vagrant.md
+++ b/README_vagrant.md
@@ -42,7 +42,7 @@ vagrant provision
Environment Variables
---------------------
The following environment variables can be overriden:
-- ``OPENSHIFT_DEPLOYMENT_TYPE`` (defaults to origin, choices: origin, enterprise, online)
+- ``OPENSHIFT_DEPLOYMENT_TYPE`` (defaults to origin, choices: origin, openshift-enterprise)
- ``OPENSHIFT_NUM_NODES`` (the number of nodes to create, defaults to 2)
Note that if ``OPENSHIFT_DEPLOYMENT_TYPE`` is ``enterprise`` you should also specify environment variables related to ``subscription-manager`` which are used by the ``rhel_subscribe`` role:
diff --git a/bin/cluster b/bin/cluster
index fcab685ef..080bf244a 100755
--- a/bin/cluster
+++ b/bin/cluster
@@ -316,7 +316,7 @@ This wrapper is overriding the following ansible variables:
meta_parser.add_argument('cluster_id', help='prefix for cluster VM names')
meta_parser.add_argument('-t', '--deployment-type',
- choices=['origin', 'online', 'enterprise', 'atomic-enterprise', 'openshift-enterprise'],
+ choices=['origin', 'atomic-enterprise', 'openshift-enterprise'],
help='Deployment type. (default: origin)')
meta_parser.add_argument('-o', '--option', action='append',
help='options')
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index f6cc2edde..b08670678 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -6,6 +6,7 @@ Custom filters for use in openshift-ansible
"""
from ansible import errors
+from collections import Mapping
from operator import itemgetter
import OpenSSL.crypto
import os
@@ -128,14 +129,14 @@ class FilterModule(object):
returns [1, 3]
"""
- if not isinstance(data, dict):
- raise errors.AnsibleFilterError("|failed expects to filter on a dict")
+ if not isinstance(data, Mapping):
+ raise errors.AnsibleFilterError("|failed expects to filter on a dict or object")
if not isinstance(keys, list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
# Gather up the values for the list of keys passed in
- retval = [data[key] for key in keys if data.has_key(key)]
+ retval = [data[key] for key in keys if key in data]
return retval
@@ -259,8 +260,11 @@ class FilterModule(object):
@staticmethod
def oo_split(string, separator=','):
- """ This splits the input string into a list
+ """ This splits the input string into a list. If the input string is
+ already a list we will return it as is.
"""
+ if isinstance(string, list):
+ return string
return string.split(separator)
@staticmethod
@@ -296,7 +300,7 @@ class FilterModule(object):
raise errors.AnsibleFilterError("|failed expects filter_attr is a str or unicode")
# Gather up the values for the list of keys passed in
- return [x for x in data if x.has_key(filter_attr) and x[filter_attr]]
+ return [x for x in data if filter_attr in x and x[filter_attr]]
@staticmethod
def oo_oc_nodes_matching_selector(nodes, selector):
@@ -311,6 +315,16 @@ class FilterModule(object):
"color": "red"}}}]
selector = 'color=green'
returns = ['node1.example.com']
+
+ nodes = [{"kind": "Node", "metadata": {"name": "node1.example.com",
+ "labels": {"kubernetes.io/hostname": "node1.example.com",
+ "color": "green"}}},
+ {"kind": "Node", "metadata": {"name": "node2.example.com",
+ "labels": {"kubernetes.io/hostname": "node2.example.com",
+ "color": "red"}}}]
+ selector = 'color=green,color=red'
+ returns = ['node1.example.com','node2.example.com']
+
Args:
nodes (list[dict]): list of node definitions
selector (str): "label=value" node selector to filter `nodes` by
@@ -323,9 +337,15 @@ class FilterModule(object):
raise errors.AnsibleFilterError("failed expects selector to be a string")
if not re.match('.*=.*', selector):
raise errors.AnsibleFilterError("failed selector does not match \"label=value\" format")
- label = selector.split('=')[0]
- value = selector.split('=')[1]
- return FilterModule.oo_oc_nodes_with_label(nodes, label, value)
+ node_lists = []
+ for node_selector in ''.join(selector.split()).split(','):
+ label = node_selector.split('=')[0]
+ value = node_selector.split('=')[1]
+ node_lists.append(FilterModule.oo_oc_nodes_with_label(nodes, label, value))
+ nodes = set(node_lists[0])
+ for node_list in node_lists[1:]:
+ nodes.intersection_update(node_list)
+ return list(nodes)
@staticmethod
def oo_oc_nodes_with_label(nodes, label, value):
@@ -634,7 +654,9 @@ class FilterModule(object):
@staticmethod
def oo_openshift_env(hostvars):
- ''' Return facts which begin with "openshift_"
+ ''' Return facts which begin with "openshift_" and translate
+ legacy facts to their openshift_env counterparts.
+
Ex: hostvars = {'openshift_fact': 42,
'theyre_taking_the_hobbits_to': 'isengard'}
returns = {'openshift_fact': 42}
@@ -647,6 +669,11 @@ class FilterModule(object):
for key in hostvars:
if regex.match(key):
facts[key] = hostvars[key]
+
+ migrations = {'openshift_router_selector': 'openshift_hosted_router_selector'}
+ for old_fact, new_fact in migrations.iteritems():
+ if old_fact in facts and new_fact not in facts:
+ facts[new_fact] = facts[old_fact]
return facts
@staticmethod
@@ -794,15 +821,18 @@ class FilterModule(object):
def oo_image_tag_to_rpm_version(version, include_dash=False):
""" Convert an image tag string to an RPM version if necessary
Empty strings and strings that are already in rpm version format
- are ignored.
+ are ignored. Also remove non semantic version components.
Ex. v3.2.0.10 -> -3.2.0.10
+ v1.2.0-rc1 -> -1.2.0
"""
if not isinstance(version, basestring):
raise errors.AnsibleFilterError("|failed expects a string or unicode")
-
+ # TODO: Do we need to make this actually convert v1.2.0-rc1 into 1.2.0-0.rc1
+ # We'd need to be really strict about how we build the RPM Version+Release
if version.startswith("v"):
version = version.replace("v", "")
+ version = version.split('-')[0]
if include_dash:
version = "-" + version
diff --git a/filter_plugins/openshift_master.py b/filter_plugins/openshift_master.py
index 34d9aef75..bb2f5ba7a 100644
--- a/filter_plugins/openshift_master.py
+++ b/filter_plugins/openshift_master.py
@@ -9,8 +9,14 @@ import sys
import yaml
from ansible import errors
-from ansible.runner.filter_plugins.core import bool as ansible_bool
+# pylint: disable=no-name-in-module,import-error
+try:
+ # ansible-2.0
+ from ansible.runner.filter_plugins.core import bool as ansible_bool
+except ImportError:
+ # ansible-1.9.x
+ from ansible.plugins.filter.core import bool as ansible_bool
class IdentityProviderBase(object):
""" IdentityProviderBase
@@ -527,9 +533,30 @@ class FilterModule(object):
'openshift-master.kubeconfig']
return certs
+ @staticmethod
+ def oo_htpasswd_users_from_file(file_contents):
+ ''' return a dictionary of htpasswd users from htpasswd file contents '''
+ htpasswd_entries = {}
+ if not isinstance(file_contents, basestring):
+ raise errors.AnsibleFilterError("failed, expects to filter on a string")
+ for line in file_contents.splitlines():
+ user = None
+ passwd = None
+ if len(line) == 0:
+ continue
+ if ':' in line:
+ user, passwd = line.split(':', 1)
+
+ if user is None or len(user) == 0 or passwd is None or len(passwd) == 0:
+ error_msg = "failed, expects each line to be a colon separated string representing the user and passwd"
+ raise errors.AnsibleFilterError(error_msg)
+ htpasswd_entries[user] = passwd
+ return htpasswd_entries
+
def filters(self):
''' returns a mapping of filters to methods '''
return {"translate_idps": self.translate_idps,
"validate_pcs_cluster": self.validate_pcs_cluster,
- "certificates_to_synchronize": self.certificates_to_synchronize}
+ "certificates_to_synchronize": self.certificates_to_synchronize,
+ "oo_htpasswd_users_from_file": self.oo_htpasswd_users_from_file}
diff --git a/filter_plugins/openshift_node.py b/filter_plugins/openshift_node.py
new file mode 100644
index 000000000..4ef92ba03
--- /dev/null
+++ b/filter_plugins/openshift_node.py
@@ -0,0 +1,43 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+'''
+Custom filters for use in openshift-node
+'''
+from ansible import errors
+
+class FilterModule(object):
+ ''' Custom ansible filters for use by openshift_node role'''
+
+ @staticmethod
+ def get_dns_ip(openshift_dns_ip, hostvars):
+ ''' Navigates the complicated logic of when to set dnsIP
+
+ In all situations if they've set openshift_dns_ip use that
+ For 1.0/3.0 installs we use the openshift_master_cluster_vip, openshift_node_first_master_ip, else None
+ For 1.1/3.1 installs we use openshift_master_cluster_vip, else None (product will use kube svc ip)
+ For 1.2/3.2+ installs we set to the node's default interface ip
+ '''
+
+ if not issubclass(type(hostvars), dict):
+ raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
+
+ # We always use what they've specified if they've specified a value
+ if openshift_dns_ip != None:
+ return openshift_dns_ip
+
+ if bool(hostvars['openshift']['common']['version_gte_3_2_or_1_2']):
+ return hostvars['ansible_default_ipv4']['address']
+ elif bool(hostvars['openshift']['common']['version_gte_3_1_or_1_1']):
+ if 'openshift_master_cluster_vip' in hostvars:
+ return hostvars['openshift_master_cluster_vip']
+ else:
+ if 'openshift_master_cluster_vip' in hostvars:
+ return hostvars['openshift_master_cluster_vip']
+ elif 'openshift_node_first_master_ip' in hostvars:
+ return hostvars['openshift_node_first_master_ip']
+ return None
+
+ def filters(self):
+ ''' returns a mapping of filters to methods '''
+ return {'get_dns_ip': self.get_dns_ip}
diff --git a/inventory/aws/hosts/hosts b/inventory/aws/hosts/hosts
index bf4e0845a..3996e577e 100644
--- a/inventory/aws/hosts/hosts
+++ b/inventory/aws/hosts/hosts
@@ -1 +1 @@
-localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter='/usr/bin/env python2'
+localhost ansible_connection=local ansible_become=no ansible_python_interpreter='/usr/bin/env python2'
diff --git a/inventory/byo/hosts.aep.example b/inventory/byo/hosts.aep.example
index 62891e6c3..c18a423bf 100644
--- a/inventory/byo/hosts.aep.example
+++ b/inventory/byo/hosts.aep.example
@@ -14,9 +14,9 @@ lb
# ssh agent.
ansible_ssh_user=root
-# If ansible_ssh_user is not root, ansible_sudo must be set to true and the
+# If ansible_ssh_user is not root, ansible_become must be set to true and the
# user must be configured for passwordless sudo
-#ansible_sudo=true
+#ansible_become=yes
# Debug level for all Atomic Enterprise components (Defaults to 2)
debug_level=2
@@ -75,7 +75,7 @@ deployment_type=atomic-enterprise
#openshift_docker_options="-l warn --ipv6=false"
# Deprecated methods to set --log-driver and --log-opts flags, use openshift_docker_options instead
#openshift_docker_log_driver=json
-#openshift_docker_log_options="tag=mailer"
+#openshift_docker_log_options=["tag=mailer"]
# Alternate image format string. If you're not modifying the format string and
# only need to inject your own registry you may want to consider
@@ -87,12 +87,26 @@ deployment_type=atomic-enterprise
# htpasswd auth
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
+# Defining htpasswd users
+#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'
+# or
+#openshift_master_htpasswd_file=<path to local pre-generated htpasswd file>
# Allow all auth
#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
# LDAP auth
#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': '', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}]
+# Configuring the ldap ca certificate
+#openshift_master_ldap_ca=<ca text>
+# or
+#openshift_master_ldap_ca_file=<path to local ca file to use>
+
+# Available variables for configuring certificates for other identity providers:
+#openshift_master_openid_ca
+#openshift_master_openid_ca_file
+#openshift_master_request_header_ca
+#openshift_master_request_header_ca_file
# Cloud Provider Configuration
#
@@ -113,7 +127,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_cloudprovider_openstack_auth_url=http://openstack.example.com:35357/v2.0/
#openshift_cloudprovider_openstack_username=username
#openshift_cloudprovider_openstack_password=password
-#openshift_cloudprovider_openstack_tenand_id=tenant_id
+#openshift_cloudprovider_openstack_tenant_id=tenant_id
#openshift_cloudprovider_openstack_tenant_name=tenant_name
#openshift_cloudprovider_openstack_region=region
#openshift_cloudprovider_openstack_lb_subnet_id=subnet_id
@@ -248,7 +262,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# will add the newly provided certificates to the cached set of certificates.
# If you would like openshift_master_named_certificates to be overwritten with
# the provided value, specify openshift_master_overwrite_named_certificates.
-#openshift_master_overwrite_named_certificates: true
+#openshift_master_overwrite_named_certificates=true
#
# Provide local certificate paths which will be deployed to masters
#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key"}]
@@ -336,6 +350,41 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Setting this variable to true will override that check.
#openshift_override_hostname_check=true
+# Configure dnsmasq for cluster dns, switch the host's local resolver to use dnsmasq
+# and configure node's dnsIP to point at the node's local dnsmasq instance. Defaults
+# to True for Origin 1.2 and OSE 3.2. False for 1.1 / 3.1 installs, this cannot
+# be used with 1.0 and 3.0.
+# openshift_use_dnsmasq=False
+
+# Global Proxy Configuration
+# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment
+# variables for docker and master services.
+#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT
+#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT
+#openshift_no_proxy='.hosts.example.com,some-host.com'
+#
+# Most environments don't require a proxy between openshift masters, nodes, and
+# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list.
+# If all of your hosts share a common domain you may wish to disable this and
+# specify that domain above.
+#openshift_generate_no_proxy_hosts: True
+#
+# These options configure the BuildDefaults admission controller which injects
+# environment variables into Builds. These values will default to their
+# corresponding values above but you may set them independently. See BuildDefaults
+# documentation at https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
+#openshift_builddefaults_http_proxy=openshift_http_proxy
+#openshift_builddefaults_https_proxy=openshift_https_proxy
+#openshift_builddefaults_no_proxy=openshift_noproxy
+#openshift_builddefaults_git_http_proxy=openshift_builddefaults_http_proxy
+#openshift_builddefaults_git_https_proxy=openshift_builddefaults_https_proxy
+# Or you may optionally define your own serialized as json
+#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","kind":"BuildDefaultsConfig"}}}'
+
+# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
+#openshift_master_dynamic_provisioning_enabled=False
+
+
# host group for masters
[masters]
aep3-master[1:3]-ansible.test.example.com
diff --git a/inventory/byo/hosts.openstack b/inventory/byo/hosts.openstack
index 05df75c2f..ea7e905cb 100644
--- a/inventory/byo/hosts.openstack
+++ b/inventory/byo/hosts.openstack
@@ -10,7 +10,7 @@ lb
# Set variables common for all OSEv3 hosts
[OSEv3:vars]
ansible_ssh_user=cloud-user
-ansible_sudo=true
+ansible_become=yes
# Debug level for all OpenShift components (Defaults to 2)
debug_level=2
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index 1f13aade6..28298d940 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -15,9 +15,9 @@ nfs
# ssh agent.
ansible_ssh_user=root
-# If ansible_ssh_user is not root, ansible_sudo must be set to true and the
+# If ansible_ssh_user is not root, ansible_become must be set to true and the
# user must be configured for passwordless sudo
-#ansible_sudo=true
+#ansible_become=yes
# Debug level for all OpenShift components (Defaults to 2)
debug_level=2
@@ -76,7 +76,7 @@ deployment_type=origin
#openshift_docker_options="-l warn --ipv6=false"
# Deprecated methods to set --log-driver and --log-opts flags, use openshift_docker_options instead
#openshift_docker_log_driver=json
-#openshift_docker_log_options="tag=mailer"
+#openshift_docker_log_options=["tag=mailer"]
# Alternate image format string. If you're not modifying the format string and
# only need to inject your own registry you may want to consider
@@ -92,12 +92,26 @@ deployment_type=origin
# htpasswd auth
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
+# Defining htpasswd users
+#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'
+# or
+#openshift_master_htpasswd_file=<path to local pre-generated htpasswd file>
# Allow all auth
#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
# LDAP auth
#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': '', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}]
+# Configuring the ldap ca certificate
+#openshift_master_ldap_ca=<ca text>
+# or
+#openshift_master_ldap_ca_file=<path to local ca file to use>
+
+# Available variables for configuring certificates for other identity providers:
+#openshift_master_openid_ca
+#openshift_master_openid_ca_file
+#openshift_master_request_header_ca
+#openshift_master_request_header_ca_file
# Cloud Provider Configuration
#
@@ -118,7 +132,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_cloudprovider_openstack_auth_url=http://openstack.example.com:35357/v2.0/
#openshift_cloudprovider_openstack_username=username
#openshift_cloudprovider_openstack_password=password
-#openshift_cloudprovider_openstack_tenand_id=tenant_id
+#openshift_cloudprovider_openstack_tenant_id=tenant_id
#openshift_cloudprovider_openstack_tenant_name=tenant_name
#openshift_cloudprovider_openstack_region=region
#openshift_cloudprovider_openstack_lb_subnet_id=subnet_id
@@ -253,7 +267,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# will add the newly provided certificates to the cached set of certificates.
# If you would like openshift_master_named_certificates to be overwritten with
# the provided value, specify openshift_master_overwrite_named_certificates.
-#openshift_master_overwrite_named_certificates: true
+#openshift_master_overwrite_named_certificates=true
#
# Provide local certificate paths which will be deployed to masters
#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key"}]
@@ -341,6 +355,40 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Setting this variable to true will override that check.
#openshift_override_hostname_check=true
+# Configure dnsmasq for cluster dns, switch the host's local resolver to use dnsmasq
+# and configure node's dnsIP to point at the node's local dnsmasq instance. Defaults
+# to True for Origin 1.2 and OSE 3.2. False for 1.1 / 3.1 installs, this cannot
+# be used with 1.0 and 3.0.
+# openshift_use_dnsmasq=False
+
+# Global Proxy Configuration
+# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment
+# variables for docker and master services.
+#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT
+#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT
+#openshift_no_proxy='.hosts.example.com,some-host.com'
+#
+# Most environments don't require a proxy between openshift masters, nodes, and
+# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list.
+# If all of your hosts share a common domain you may wish to disable this and
+# specify that domain above.
+#openshift_generate_no_proxy_hosts: True
+#
+# These options configure the BuildDefaults admission controller which injects
+# environment variables into Builds. These values will default to their
+# corresponding values above but you may set them independently. See BuildDefaults
+# documentation at https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
+#openshift_builddefaults_http_proxy=openshift_http_proxy
+#openshift_builddefaults_https_proxy=openshift_https_proxy
+#openshift_builddefaults_no_proxy=openshift_noproxy
+#openshift_builddefaults_git_http_proxy=openshift_builddefaults_http_proxy
+#openshift_builddefaults_git_https_proxy=openshift_builddefaults_https_proxy
+# Or you may optionally define your own serialized as json
+#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","kind":"BuildDefaultsConfig"}}}'
+
+# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
+#openshift_master_dynamic_provisioning_enabled=False
+
# host group for masters
[masters]
ose3-master[1:3]-ansible.test.example.com
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index 2386eb236..38adfe572 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -14,9 +14,9 @@ lb
# ssh agent.
ansible_ssh_user=root
-# If ansible_ssh_user is not root, ansible_sudo must be set to true and the
+# If ansible_ssh_user is not root, ansible_become must be set to true and the
# user must be configured for passwordless sudo
-#ansible_sudo=true
+#ansible_become=yes
# Debug level for all OpenShift components (Defaults to 2)
debug_level=2
@@ -75,7 +75,7 @@ deployment_type=openshift-enterprise
#openshift_docker_options="-l warn --ipv6=false"
# Deprecated methods to set --log-driver and --log-opts flags, use openshift_docker_options instead
#openshift_docker_log_driver=json
-#openshift_docker_log_options="tag=mailer"
+#openshift_docker_log_options=["tag=mailer"]
# Alternate image format string. If you're not modifying the format string and
@@ -88,12 +88,26 @@ deployment_type=openshift-enterprise
# htpasswd auth
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
+# Defining htpasswd users
+#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'
+# or
+#openshift_master_htpasswd_file=<path to local pre-generated htpasswd file>
# Allow all auth
#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
# LDAP auth
#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': '', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}]
+# Configuring the ldap ca certificate
+#openshift_master_ldap_ca=<ca text>
+# or
+#openshift_master_ldap_ca_file=<path to local ca file to use>
+
+# Available variables for configuring certificates for other identity providers:
+#openshift_master_openid_ca
+#openshift_master_openid_ca_file
+#openshift_master_request_header_ca
+#openshift_master_request_header_ca_file
# Cloud Provider Configuration
#
@@ -114,7 +128,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_cloudprovider_openstack_auth_url=http://openstack.example.com:35357/v2.0/
#openshift_cloudprovider_openstack_username=username
#openshift_cloudprovider_openstack_password=password
-#openshift_cloudprovider_openstack_tenand_id=tenant_id
+#openshift_cloudprovider_openstack_tenant_id=tenant_id
#openshift_cloudprovider_openstack_tenant_name=tenant_name
#openshift_cloudprovider_openstack_region=region
#openshift_cloudprovider_openstack_lb_subnet_id=subnet_id
@@ -249,7 +263,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# will add the newly provided certificates to the cached set of certificates.
# If you would like openshift_master_named_certificates to be overwritten with
# the provided value, specify openshift_master_overwrite_named_certificates.
-#openshift_master_overwrite_named_certificates: true
+#openshift_master_overwrite_named_certificates=true
#
# Provide local certificate paths which will be deployed to masters
#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key"}]
@@ -337,6 +351,40 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Setting this variable to true will override that check.
#openshift_override_hostname_check=true
+# Configure dnsmasq for cluster dns, switch the host's local resolver to use dnsmasq
+# and configure node's dnsIP to point at the node's local dnsmasq instance. Defaults
+# to True for Origin 1.2 and OSE 3.2. False for 1.1 / 3.1 installs, this cannot
+# be used with 1.0 and 3.0.
+# openshift_use_dnsmasq=False
+
+# Global Proxy Configuration
+# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment
+# variables for docker and master services.
+#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT
+#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT
+#openshift_no_proxy='.hosts.example.com,some-host.com'
+#
+# Most environments don't require a proxy between openshift masters, nodes, and
+# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list.
+# If all of your hosts share a common domain you may wish to disable this and
+# specify that domain above.
+#openshift_generate_no_proxy_hosts: True
+#
+# These options configure the BuildDefaults admission controller which injects
+# environment variables into Builds. These values will default to their
+# corresponding values above but you may set them independently. See BuildDefaults
+# documentation at https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
+#openshift_builddefaults_http_proxy=openshift_http_proxy
+#openshift_builddefaults_https_proxy=openshift_https_proxy
+#openshift_builddefaults_no_proxy=openshift_noproxy
+#openshift_builddefaults_git_http_proxy=openshift_builddefaults_http_proxy
+#openshift_builddefaults_git_https_proxy=openshift_builddefaults_https_proxy
+# Or you may optionally define your own serialized as json
+#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","kind":"BuildDefaultsConfig"}}}'
+
+# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
+#openshift_master_dynamic_provisioning_enabled=False
+
# host group for masters
[masters]
ose3-master[1:3]-ansible.test.example.com
diff --git a/inventory/gce/hosts/hosts b/inventory/gce/hosts/hosts
index bf4e0845a..3996e577e 100644
--- a/inventory/gce/hosts/hosts
+++ b/inventory/gce/hosts/hosts
@@ -1 +1 @@
-localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter='/usr/bin/env python2'
+localhost ansible_connection=local ansible_become=no ansible_python_interpreter='/usr/bin/env python2'
diff --git a/inventory/libvirt/hosts/hosts b/inventory/libvirt/hosts/hosts
index bf4e0845a..3996e577e 100644
--- a/inventory/libvirt/hosts/hosts
+++ b/inventory/libvirt/hosts/hosts
@@ -1 +1 @@
-localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter='/usr/bin/env python2'
+localhost ansible_connection=local ansible_become=no ansible_python_interpreter='/usr/bin/env python2'
diff --git a/inventory/openstack/hosts/hosts b/inventory/openstack/hosts/hosts
index 2d2194a4d..9b63e98f4 100644
--- a/inventory/openstack/hosts/hosts
+++ b/inventory/openstack/hosts/hosts
@@ -1 +1 @@
-localhost ansible_sudo=no ansible_python_interpreter='/usr/bin/env python2' connection=local
+localhost ansible_become=no ansible_python_interpreter='/usr/bin/env python2' connection=local
diff --git a/lookup_plugins/oo_option.py b/lookup_plugins/oo_option.py
index 35dce48f9..3fc46ab9b 100644
--- a/lookup_plugins/oo_option.py
+++ b/lookup_plugins/oo_option.py
@@ -17,14 +17,36 @@ This returns, by order of priority:
* if none of the above conditions are met, empty string is returned
'''
-from ansible.utils import template
+
import os
+# pylint: disable=no-name-in-module,import-error,unused-argument,unused-variable,super-init-not-called,too-few-public-methods,missing-docstring
+try:
+ # ansible-2.0
+ from ansible.plugins.lookup import LookupBase
+except ImportError:
+ # ansible-1.9.x
+ class LookupBase(object):
+ def __init__(self, basedir=None, runner=None, **kwargs):
+ self.runner = runner
+ self.basedir = self.runner.basedir
+ def get_basedir(self, variables):
+ return self.basedir
+
+# pylint: disable=no-name-in-module,import-error
+try:
+ # ansible-2.0
+ from ansible import template
+except ImportError:
+ # ansible 1.9.x
+ from ansible.utils import template
+
+
# Reason: disable too-few-public-methods because the `run` method is the only
# one required by the Ansible API
# Status: permanently disabled
# pylint: disable=too-few-public-methods
-class LookupModule(object):
+class LookupModule(LookupBase):
''' oo_option lookup plugin main class '''
# Reason: disable unused-argument because Ansible is calling us with many
diff --git a/lookup_plugins/sequence.py b/lookup_plugins/sequence.py
deleted file mode 100644
index 8ca9e7b39..000000000
--- a/lookup_plugins/sequence.py
+++ /dev/null
@@ -1,215 +0,0 @@
-# (c) 2013, Jayson Vantuyl <jayson@aggressive.ly>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible.errors import AnsibleError
-import ansible.utils as utils
-from re import compile as re_compile, IGNORECASE
-
-# shortcut format
-NUM = "(0?x?[0-9a-f]+)"
-SHORTCUT = re_compile(
- "^(" + # Group 0
- NUM + # Group 1: Start
- "-)?" +
- NUM + # Group 2: End
- "(/" + # Group 3
- NUM + # Group 4: Stride
- ")?" +
- "(:(.+))?$", # Group 5, Group 6: Format String
- IGNORECASE
-)
-
-
-class LookupModule(object):
- """
- sequence lookup module
-
- Used to generate some sequence of items. Takes arguments in two forms.
-
- The simple / shortcut form is:
-
- [start-]end[/stride][:format]
-
- As indicated by the brackets: start, stride, and format string are all
- optional. The format string is in the style of printf. This can be used
- to pad with zeros, format in hexadecimal, etc. All of the numerical values
- can be specified in octal (i.e. 0664) or hexadecimal (i.e. 0x3f8).
- Negative numbers are not supported.
-
- Some examples:
-
- 5 -> ["1","2","3","4","5"]
- 5-8 -> ["5", "6", "7", "8"]
- 2-10/2 -> ["2", "4", "6", "8", "10"]
- 4:host%02d -> ["host01","host02","host03","host04"]
-
- The standard Ansible key-value form is accepted as well. For example:
-
- start=5 end=11 stride=2 format=0x%02x -> ["0x05","0x07","0x09","0x0a"]
-
- This format takes an alternate form of "end" called "count", which counts
- some number from the starting value. For example:
-
- count=5 -> ["1", "2", "3", "4", "5"]
- start=0x0f00 count=4 format=%04x -> ["0f00", "0f01", "0f02", "0f03"]
- start=0 count=5 stride=2 -> ["0", "2", "4", "6", "8"]
- start=1 count=5 stride=2 -> ["1", "3", "5", "7", "9"]
-
- The count option is mostly useful for avoiding off-by-one errors and errors
- calculating the number of entries in a sequence when a stride is specified.
- """
-
- def __init__(self, basedir, **kwargs):
- """absorb any keyword args"""
- self.basedir = basedir
-
- def reset(self):
- """set sensible defaults"""
- self.start = 1
- self.count = None
- self.end = None
- self.stride = 1
- self.format = "%d"
-
- def parse_kv_args(self, args):
- """parse key-value style arguments"""
- for arg in ["start", "end", "count", "stride"]:
- try:
- arg_raw = args.pop(arg, None)
- if arg_raw is None:
- continue
- arg_cooked = int(arg_raw, 0)
- setattr(self, arg, arg_cooked)
- except ValueError:
- raise AnsibleError(
- "can't parse arg %s=%r as integer"
- % (arg, arg_raw)
- )
- if 'format' in args:
- self.format = args.pop("format")
- if args:
- raise AnsibleError(
- "unrecognized arguments to with_sequence: %r"
- % args.keys()
- )
-
- def parse_simple_args(self, term):
- """parse the shortcut forms, return True/False"""
- match = SHORTCUT.match(term)
- if not match:
- return False
-
- _, start, end, _, stride, _, format = match.groups()
-
- if start is not None:
- try:
- start = int(start, 0)
- except ValueError:
- raise AnsibleError("can't parse start=%s as integer" % start)
- if end is not None:
- try:
- end = int(end, 0)
- except ValueError:
- raise AnsibleError("can't parse end=%s as integer" % end)
- if stride is not None:
- try:
- stride = int(stride, 0)
- except ValueError:
- raise AnsibleError("can't parse stride=%s as integer" % stride)
-
- if start is not None:
- self.start = start
- if end is not None:
- self.end = end
- if stride is not None:
- self.stride = stride
- if format is not None:
- self.format = format
-
- def sanity_check(self):
- if self.count is None and self.end is None:
- raise AnsibleError(
- "must specify count or end in with_sequence"
- )
- elif self.count is not None and self.end is not None:
- raise AnsibleError(
- "can't specify both count and end in with_sequence"
- )
- elif self.count is not None:
- # convert count to end
- if self.count != 0:
- self.end = self.start + self.count * self.stride - 1
- else:
- self.start = 0
- self.end = 0
- self.stride = 0
- del self.count
- if self.stride > 0 and self.end < self.start:
- raise AnsibleError("to count backwards make stride negative")
- if self.stride < 0 and self.end > self.start:
- raise AnsibleError("to count forward don't make stride negative")
- if self.format.count('%') != 1:
- raise AnsibleError("bad formatting string: %s" % self.format)
-
- def generate_sequence(self):
- if self.stride > 0:
- adjust = 1
- else:
- adjust = -1
- numbers = xrange(self.start, self.end + adjust, self.stride)
-
- for i in numbers:
- try:
- formatted = self.format % i
- yield formatted
- except (ValueError, TypeError):
- raise AnsibleError(
- "problem formatting %r with %r" % self.format
- )
-
- def run(self, terms, inject=None, **kwargs):
- results = []
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- if isinstance(terms, basestring):
- terms = [ terms ]
-
- for term in terms:
- try:
- self.reset() # clear out things for this iteration
-
- try:
- if not self.parse_simple_args(term):
- self.parse_kv_args(utils.parse_kv(term))
- except Exception:
- raise AnsibleError(
- "unknown error parsing with_sequence arguments: %r"
- % term
- )
-
- self.sanity_check()
- if self.stride != 0:
- results.extend(self.generate_sequence())
- except AnsibleError:
- raise
- except Exception, e:
- raise AnsibleError(
- "unknown error generating sequence: %s" % str(e)
- )
-
- return results
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 98d85ddd9..57cc726b5 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -5,7 +5,7 @@
}
Name: openshift-ansible
-Version: 3.0.81
+Version: 3.0.87
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -15,6 +15,7 @@ BuildArch: noarch
Requires: ansible >= 1.9.4
Requires: python2
+Requires: openshift-ansible-docs = %{version}-%{release}
%description
Openshift and Atomic Enterprise Ansible
@@ -43,30 +44,14 @@ mkdir -p %{buildroot}%{_bindir}
mkdir -p %{buildroot}%{python_sitelib}/openshift_ansible
mkdir -p %{buildroot}/etc/bash_completion.d
mkdir -p %{buildroot}/etc/openshift_ansible
-cp -p bin/{ossh,oscp,opssh,opscp,ohi} %{buildroot}%{_bindir}
-cp -pP bin/openshift_ansible/* %{buildroot}%{python_sitelib}/openshift_ansible
-cp -p bin/ossh_bash_completion %{buildroot}/etc/bash_completion.d
-cp -p bin/openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshift_ansible.conf
# Fix links
-rm -f %{buildroot}%{python_sitelib}/openshift_ansible/multi_inventory.py
rm -f %{buildroot}%{python_sitelib}/openshift_ansible/aws
rm -f %{buildroot}%{python_sitelib}/openshift_ansible/gce
-ln -sf %{_datadir}/ansible/inventory/multi_inventory.py %{buildroot}%{python_sitelib}/openshift_ansible/multi_inventory.py
-ln -sf %{_datadir}/ansible/inventory/aws %{buildroot}%{python_sitelib}/openshift_ansible/aws
-ln -sf %{_datadir}/ansible/inventory/gce %{buildroot}%{python_sitelib}/openshift_ansible/gce
# openshift-ansible-docs install
-# -docs are currently just %doc, no install needed
-
-# openshift-ansible-inventory install
-mkdir -p %{buildroot}/etc/ansible
-mkdir -p %{buildroot}%{_datadir}/ansible/inventory
-mkdir -p %{buildroot}%{_datadir}/ansible/inventory/aws
-mkdir -p %{buildroot}%{_datadir}/ansible/inventory/gce
-cp -p inventory/multi_inventory.py %{buildroot}%{_datadir}/ansible/inventory
-cp -p inventory/multi_inventory.yaml.example %{buildroot}/etc/ansible/multi_inventory.yaml
-cp -p inventory/aws/hosts/ec2.py %{buildroot}%{_datadir}/ansible/inventory/aws
-cp -p inventory/gce/hosts/gce.py %{buildroot}%{_datadir}/ansible/inventory/gce
+# Install example inventory into docs/examples
+mkdir -p docs/example-inventories
+cp inventory/byo/* docs/example-inventories/
# openshift-ansible-playbooks install
cp -rp playbooks %{buildroot}%{_datadir}/ansible/%{name}/
@@ -95,28 +80,6 @@ popd
%dir %{_datadir}/ansible/%{name}
# ----------------------------------------------------------------------------------
-# openshift-ansible-bin subpackage
-# ----------------------------------------------------------------------------------
-%package bin
-Summary: Openshift and Atomic Enterprise Ansible Scripts for working with metadata hosts
-Requires: %{name} = %{version}
-Requires: %{name}-inventory = %{version}
-Requires: %{name}-playbooks = %{version}
-BuildRequires: python2-devel
-BuildArch: noarch
-
-%description bin
-Scripts to make it nicer when working with hosts that are defined only by metadata.
-
-%files bin
-%{_bindir}/*
-%exclude %{_bindir}/atomic-openshift-installer
-%{python_sitelib}/openshift_ansible/
-/etc/bash_completion.d/*
-%config(noreplace) /etc/openshift_ansible/
-
-
-# ----------------------------------------------------------------------------------
# openshift-ansible-docs subpackage
# ----------------------------------------------------------------------------------
%package docs
@@ -131,47 +94,6 @@ BuildArch: noarch
%doc docs
# ----------------------------------------------------------------------------------
-# openshift-ansible-inventory subpackage
-# ----------------------------------------------------------------------------------
-%package inventory
-Summary: Openshift and Atomic Enterprise Ansible Inventories
-Requires: %{name} = %{version}
-BuildArch: noarch
-
-%description inventory
-Ansible Inventories used with the openshift-ansible scripts and playbooks.
-
-%files inventory
-%config(noreplace) /etc/ansible/*
-%dir %{_datadir}/ansible/inventory
-%{_datadir}/ansible/inventory/multi_inventory.py*
-
-%package inventory-aws
-Summary: Openshift and Atomic Enterprise Ansible Inventories for AWS
-Requires: %{name}-inventory = %{version}
-Requires: python-boto
-BuildArch: noarch
-
-%description inventory-aws
-Ansible Inventories for AWS used with the openshift-ansible scripts and playbooks.
-
-%files inventory-aws
-%{_datadir}/ansible/inventory/aws/ec2.py*
-
-%package inventory-gce
-Summary: Openshift and Atomic Enterprise Ansible Inventories for GCE
-Requires: %{name}-inventory = %{version}
-Requires: python-libcloud >= 0.13
-BuildArch: noarch
-
-%description inventory-gce
-Ansible Inventories for GCE used with the openshift-ansible scripts and playbooks.
-
-%files inventory-gce
-%{_datadir}/ansible/inventory/gce/gce.py*
-
-
-# ----------------------------------------------------------------------------------
# openshift-ansible-playbooks subpackage
# ----------------------------------------------------------------------------------
%package playbooks
@@ -261,6 +183,83 @@ Atomic OpenShift Utilities includes
%changelog
+* Wed Apr 27 2016 Troy Dawson <tdawson@redhat.com> 3.0.87-1
+- a-o-i-: Allow empty proxy (smunilla@redhat.com)
+- a-o-i: Populate groups for openshift_facts (smunilla@redhat.com)
+- Replace sudo with become when accessing deployment_vars.
+ (abutcher@redhat.com)
+- Port lookup plugins to ansible v2. (abutcher@redhat.com)
+- Add masterConfig.volumeConfig.dynamicProvisioningEnabled (sdodson@redhat.com)
+
+* Tue Apr 26 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.86-1
+- Don't set empty HTTP_PROXY, HTTPS_PROXY, NO_PROXY values (sdodson@redhat.com)
+- a-o-i tests: Update attended tests for proxy (smunilla@redhat.com)
+- Move portal_net from openshift_common to openshift_facts.
+ (abutcher@redhat.com)
+- Apply openshift_common to all masters prior to creating certificates for
+ portal_net. (abutcher@redhat.com)
+- Access portal_net in common facts. (abutcher@redhat.com)
+- Add support for setting identity provider custom values (jdetiber@redhat.com)
+- port filter_plugins to ansible2 (tob@butter.sh)
+- a-o-i: Update prompt when asking for proxy (smunilla@redhat.com)
+- a-o-i: UI additions for proxies (smunilla@redhat.com)
+
+* Mon Apr 25 2016 Troy Dawson <tdawson@redhat.com> 3.0.85-1
+- Fix backward compat for osm_default_subdomain (jdetiber@redhat.com)
+- Replace deprecated sudo with become. (abutcher@redhat.com)
+- Fix image version handling for v1.2.0-rc1 (sdodson@redhat.com)
+- Pod must be recreated for the upgrade (bleanhar@redhat.com)
+- openshift_etcd_facts should rely on openshift_facts not openshift_common
+ (jdetiber@redhat.com)
+- Sort and de-dupe no_proxy list (sdodson@redhat.com)
+- openshift-metrics: adding duration and resolution options
+ (efreiber@redhat.com)
+- Changed service account creation to ansible (vishal.patil@nuagenetworks.net)
+- As per https://github.com/openshift/openshift-
+ ansible/issues/1795#issuecomment-213873564, renamed openshift_node_dnsmasq to
+ openshift_use_dnsmasq where applicable. Fixes 1795 (donovan@switchbit.io)
+- Add global proxy configuration (sdodson@redhat.com)
+- remove duplicate register: (tob@butter.sh)
+
+* Fri Apr 22 2016 Troy Dawson <tdawson@redhat.com> 3.0.84-1
+- Fix for docker not present (jdetiber@redhat.com)
+- Reconcile roles in additive-only mode on upgrade (jliggitt@redhat.com)
+- Set etcd_hostname and etcd_ip for masters w/ external etcd.
+ (abutcher@redhat.com)
+
+* Thu Apr 21 2016 Troy Dawson <tdawson@redhat.com> 3.0.83-1
+- a-o-i: Correct bug with default storage host (smunilla@redhat.com)
+- Only add new sccs (bleanhar@redhat.com)
+- Fix bug after portal_net move from master to common role.
+ (dgoodwin@redhat.com)
+- Sync latest content (sdodson@redhat.com)
+- Use xpaas 1.3.0-1, use enterprise content for metrics (sdodson@redhat.com)
+- Support configurable admin user and password for the enterprise Prefix
+ changes for admin and password with nuage_master (abhat@nuagenetworks.net)
+
+* Wed Apr 20 2016 Troy Dawson <tdawson@redhat.com> 3.0.82-1
+- Use a JSON list for docker log options. (dgoodwin@redhat.com)
+- Fix legacy cli_docker_* vars not migrating. (dgoodwin@redhat.com)
+- Fix use of older image tag version during upgrade. (dgoodwin@redhat.com)
+- Remove etcd_interface variable. Remove openshift_docker dependency from the
+ etcd role. (abutcher@redhat.com)
+- Use openshift_hostname/openshift_ip values for etcd configuration and
+ certificates. (abutcher@redhat.com)
+- added new openshift-metrics service (j.david.nieto@gmail.com)
+- Translate legacy facts within the oo_openshift_env filter.
+ (abutcher@redhat.com)
+- Remove empty facts from nested dictionaries. (abutcher@redhat.com)
+- Fix router selector fact migration and match multiple selectors when counting
+ nodes. (abutcher@redhat.com)
+- Fixing the spec for PR 1734 (bleanhar@redhat.com)
+- Add openshift_use_dnsmasq (sdodson@redhat.com)
+- Promote portal_net to openshift.common, add kube_svc_ip (sdodson@redhat.com)
+- Add example inventories to docs, install docs by default (sdodson@redhat.com)
+- Fix use of JSON inventory vars with raw booleans. (dgoodwin@redhat.com)
+- cleanup roles after roles move to openshift-tools (jdiaz@redhat.com)
+- Reference Setup for Origin and Ose from up-to-date docs.openshift.[com|org]
+ instead of local README_[origin|OSE].md (jchaloup@redhat.com)
+
* Mon Apr 18 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.81-1
- IMAGE_PREFIX=openshift3/ for enterprise logging/metrics (sdodson@redhat.com)
- a-o-i: Don't assume storage on 1st master (smunilla@redhat.com)
diff --git a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml
index c14d08e87..5a5a00ea4 100644
--- a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml
+++ b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml
@@ -8,7 +8,7 @@
- hosts:
- OSEv3:children
- sudo: yes
+ become: yes
tasks:
- shell: docker ps -a -q | xargs docker stop
diff --git a/playbooks/adhoc/noc/filter_plugins b/playbooks/adhoc/noc/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/adhoc/noc/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/adhoc/noc/roles b/playbooks/adhoc/noc/roles
deleted file mode 120000
index 20c4c58cf..000000000
--- a/playbooks/adhoc/noc/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles \ No newline at end of file
diff --git a/playbooks/adhoc/setupnfs.yml b/playbooks/adhoc/setupnfs.yml
index 5f3631fcf..fd489dc70 100644
--- a/playbooks/adhoc/setupnfs.yml
+++ b/playbooks/adhoc/setupnfs.yml
@@ -1,7 +1,7 @@
---
### This playbook is old and we are currently not using NFS.
- hosts: tag_Name_nfs-v3-stg
- sudo: no
+ become: no
remote_user: root
gather_facts: no
roles:
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 8fb515982..0755d8bc5 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -10,7 +10,7 @@
- hosts:
- OSEv3:children
- sudo: yes
+ become: yes
tasks:
- name: Detecting Operating System
@@ -232,6 +232,9 @@
- /usr/local/bin/oadm
- /usr/local/bin/oc
- /usr/local/bin/kubectl
+ - /etc/NetworkManager/dispatcher.d/99-origin-dns.sh
+ - /etc/dnsmasq.d/origin-dns.conf
+ - /etc/dnsmasq.d/origin-upstream-dns.conf
# Since we are potentially removing the systemd unit files for separated
# master-api and master-controllers services, so we need to reload the
@@ -240,7 +243,9 @@
command: systemctl daemon-reload
- hosts: nodes
- sudo: yes
+ become: yes
tasks:
- name: restart docker
service: name=docker state=restarted
+ - name: restart NetworkManager
+ service: name=NetworkManager state=restarted
diff --git a/playbooks/aws/openshift-cluster/add_nodes.yml b/playbooks/aws/openshift-cluster/add_nodes.yml
index 3d88e6b23..0e8eb90c1 100644
--- a/playbooks/aws/openshift-cluster/add_nodes.yml
+++ b/playbooks/aws/openshift-cluster/add_nodes.yml
@@ -6,14 +6,9 @@
gather_facts: no
vars_files:
- vars.yml
- - ["vars.{{ deployment_type }}.{{ cluster_id }}.yml", vars.defaults.yml]
vars:
oo_extend_env: True
tasks:
- - fail:
- msg: Deployment type not supported for aws provider yet
- when: deployment_type == 'enterprise'
-
- include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
vars:
type: "compute"
diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml
index 9fba856a2..8402b3579 100644
--- a/playbooks/aws/openshift-cluster/config.yml
+++ b/playbooks/aws/openshift-cluster/config.yml
@@ -4,16 +4,18 @@
- ../../aws/openshift-cluster/cluster_hosts.yml
vars:
g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ g_sudo: "{{ deployment_vars[deployment_type].become }}"
g_nodeonmaster: true
openshift_cluster_id: "{{ cluster_id }}"
openshift_debug_level: "{{ debug_level }}"
openshift_deployment_type: "{{ deployment_type }}"
openshift_public_hostname: "{{ ec2_ip_address }}"
openshift_registry_selector: 'type=infra'
- openshift_router_selector: 'type=infra'
+ openshift_hosted_router_selector: 'type=infra'
openshift_infra_nodes: "{{ g_infra_hosts }}"
- openshift_node_labels: '{"region": "{{ ec2_region }}", "type": "{{ hostvars[inventory_hostname]["ec2_tag_sub-host-type"] if inventory_hostname in groups["tag_host-type_node"] else hostvars[inventory_hostname]["ec2_tag_host-type"] }}"}'
+ openshift_node_labels:
+ region: "{{ deployment_vars[deployment_type].region }}"
+ type: "{{ hostvars[inventory_hostname]['ec2_tag_sub-host-type'] if inventory_hostname in groups['tag_host-type_node'] else hostvars[inventory_hostname]['ec2_tag_host-type'] }}"
openshift_master_cluster_method: 'native'
openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml
index 15b83dfad..3edace493 100644
--- a/playbooks/aws/openshift-cluster/launch.yml
+++ b/playbooks/aws/openshift-cluster/launch.yml
@@ -6,12 +6,7 @@
gather_facts: no
vars_files:
- vars.yml
- - ["vars.{{ deployment_type }}.{{ cluster_id }}.yml", vars.defaults.yml]
tasks:
- - fail:
- msg: Deployment type not supported for aws provider yet
- when: deployment_type == 'enterprise'
-
- include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml
- include: tasks/launch_instances.yml
vars:
diff --git a/playbooks/aws/openshift-cluster/list.yml b/playbooks/aws/openshift-cluster/list.yml
index 8b41a355e..a542b4ca3 100644
--- a/playbooks/aws/openshift-cluster/list.yml
+++ b/playbooks/aws/openshift-cluster/list.yml
@@ -15,7 +15,7 @@
name: "{{ item }}"
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: groups[scratch_group] | default([]) | difference(['localhost'])
- name: List Hosts
diff --git a/playbooks/aws/openshift-cluster/scaleup.yml b/playbooks/aws/openshift-cluster/scaleup.yml
index 7e3a47964..6fa9142a0 100644
--- a/playbooks/aws/openshift-cluster/scaleup.yml
+++ b/playbooks/aws/openshift-cluster/scaleup.yml
@@ -12,7 +12,7 @@
name: "{{ item }}"
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: "{{ groups.nodes_to_add }}"
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
@@ -24,7 +24,7 @@
vars:
g_new_node_hosts: "{{ groups.nodes_to_add }}"
g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ g_sudo: "{{ deployment_vars[deployment_type].become }}"
g_nodeonmaster: true
openshift_cluster_id: "{{ cluster_id }}"
openshift_debug_level: "{{ debug_level }}"
diff --git a/playbooks/aws/openshift-cluster/service.yml b/playbooks/aws/openshift-cluster/service.yml
index d5f7d6b19..f7f4812bb 100644
--- a/playbooks/aws/openshift-cluster/service.yml
+++ b/playbooks/aws/openshift-cluster/service.yml
@@ -16,7 +16,7 @@
name: "{{ item }}"
groups: g_service_masters
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: "{{ master_hosts | default([]) }}"
- name: Evaluate g_service_nodes
@@ -24,7 +24,7 @@
name: "{{ item }}"
groups: g_service_nodes
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: "{{ node_hosts | default([]) }}"
- include: ../../common/openshift-node/service.yml
diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
index 63be06ecf..7d5776ae6 100644
--- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
@@ -8,80 +8,50 @@
sub_host_type: "{{ g_sub_host_type }}"
- set_fact:
- ec2_region: "{{ lookup('env', 'ec2_region')
- | default(deployment_vars[deployment_type].region, true) }}"
- when: ec2_region is not defined
-- set_fact:
- ec2_image_name: "{{ lookup('env', 'ec2_image_name')
- | default(deployment_vars[deployment_type].image_name, true) }}"
- when: ec2_image_name is not defined and ec2_image is not defined
-- set_fact:
- ec2_image: "{{ lookup('env', 'ec2_image')
- | default(deployment_vars[deployment_type].image, true) }}"
- when: ec2_image is not defined and not ec2_image_name
-- set_fact:
- ec2_keypair: "{{ lookup('env', 'ec2_keypair')
- | default(deployment_vars[deployment_type].keypair, true) }}"
- when: ec2_keypair is not defined
-- set_fact:
- ec2_vpc_subnet: "{{ lookup('env', 'ec2_vpc_subnet')
- | default(deployment_vars[deployment_type].vpc_subnet, true) }}"
- when: ec2_vpc_subnet is not defined
-- set_fact:
- ec2_assign_public_ip: "{{ lookup('env', 'ec2_assign_public_ip')
- | default(deployment_vars[deployment_type].assign_public_ip, true) }}"
- when: ec2_assign_public_ip is not defined
-
-- set_fact:
- ec2_instance_type: "{{ ec2_master_instance_type | default(lookup('env', 'ec2_master_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}"
- ec2_security_groups: "{{ ec2_master_security_groups | default(lookup('env', 'ec2_master_security_groups') | default(lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true), true), true) }}"
+ ec2_instance_type: "{{ lookup('env', 'ec2_master_instance_type') | default(deployment_vars[deployment_type].type, true) }}"
+ ec2_security_groups: "{{ lookup('env', 'ec2_master_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"
when: host_type == "master" and sub_host_type == "default"
- set_fact:
- ec2_instance_type: "{{ ec2_etcd_instance_type | default(lookup('env', 'ec2_etcd_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}"
- ec2_security_groups: "{{ ec2_etcd_security_groups | default(lookup('env', 'ec2_etcd_security_groups') | default(lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true), true), true) }}"
+ ec2_instance_type: "{{ lookup('env', 'ec2_etcd_instance_type') | default(deployment_vars[deployment_type].type, true) }}"
+ ec2_security_groups: "{{ lookup('env', 'ec2_etcd_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"
when: host_type == "etcd" and sub_host_type == "default"
- set_fact:
- ec2_instance_type: "{{ ec2_infra_instance_type | default(lookup('env', 'ec2_infra_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}"
- ec2_security_groups: "{{ ec2_infra_security_groups | default(lookup('env', 'ec2_infra_security_groups') | default(lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true), true), true) }}"
+ ec2_instance_type: "{{ lookup('env', 'ec2_infra_instance_type') | default(deployment_vars[deployment_type].type, true) }}"
+ ec2_security_groups: "{{ lookup('env', 'ec2_infra_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"
when: host_type == "node" and sub_host_type == "infra"
- set_fact:
- ec2_instance_type: "{{ ec2_node_instance_type | default(lookup('env', 'ec2_node_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}"
- ec2_security_groups: "{{ ec2_node_security_groups | default(lookup('env', 'ec2_node_security_groups') | default(lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true), true), true) }}"
+ ec2_instance_type: "{{ lookup('env', 'ec2_node_instance_type') | default(deployment_vars[deployment_type].type, true) }}"
+ ec2_security_groups: "{{ lookup('env', 'ec2_node_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"
when: host_type == "node" and sub_host_type == "compute"
- set_fact:
- ec2_instance_type: "{{ lookup('env', 'ec2_instance_type')
- | default(deployment_vars[deployment_type].type, true) }}"
+ ec2_instance_type: "{{ deployment_vars[deployment_type].type }}"
when: ec2_instance_type is not defined
- set_fact:
- ec2_security_groups: "{{ lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"
+ ec2_security_groups: "{{ deployment_vars[deployment_type].security_groups }}"
when: ec2_security_groups is not defined
- name: Find amis for deployment_type
ec2_ami_find:
- region: "{{ ec2_region }}"
- ami_id: "{{ ec2_image | default(omit, true) }}"
- name: "{{ ec2_image_name | default(omit, true) }}"
+ region: "{{ deployment_vars[deployment_type].region }}"
+ ami_id: "{{ deployment_vars[deployment_type].image }}"
+ name: "{{ deployment_vars[deployment_type].image_name }}"
register: ami_result
- fail: msg="Could not find requested ami"
when: not ami_result.results
- set_fact:
- latest_ami: "{{ ami_result.results | oo_ami_selector(ec2_image_name) }}"
+ latest_ami: "{{ ami_result.results | oo_ami_selector(deployment_vars[deployment_type].image_name) }}"
volume_defs:
etcd:
root:
volume_size: "{{ lookup('env', 'os_etcd_root_vol_size') | default(25, true) }}"
device_type: "{{ lookup('env', 'os_etcd_root_vol_type') | default('gp2', true) }}"
iops: "{{ lookup('env', 'os_etcd_root_vol_iops') | default(500, true) }}"
- etcd:
- volume_size: "{{ lookup('env', 'os_etcd_vol_size') | default(32, true) }}"
- device_type: "{{ lookup('env', 'os_etcd_vol_type') | default('gp2', true) }}"
- iops: "{{ lookup('env', 'os_etcd_vol_iops') | default(500, true) }}"
master:
root:
volume_size: "{{ lookup('env', 'os_master_root_vol_size') | default(25, true) }}"
@@ -107,14 +77,14 @@
- name: Launch instance(s)
ec2:
state: present
- region: "{{ ec2_region }}"
- keypair: "{{ ec2_keypair }}"
- group: "{{ ec2_security_groups }}"
+ region: "{{ deployment_vars[deployment_type].region }}"
+ keypair: "{{ deployment_vars[deployment_type].keypair }}"
+ group: "{{ deployment_vars[deployment_type].security_groups }}"
instance_type: "{{ ec2_instance_type }}"
- image: "{{ latest_ami }}"
+ image: "{{ deployment_vars[deployment_type].image }}"
count: "{{ instances | length }}"
- vpc_subnet_id: "{{ ec2_vpc_subnet | default(omit, true) }}"
- assign_public_ip: "{{ ec2_assign_public_ip | default(omit, true) }}"
+ vpc_subnet_id: "{{ deployment_vars[deployment_type].vpc_subnet }}"
+ assign_public_ip: "{{ deployment_vars[deployment_type].assign_public_ip }}"
user_data: "{{ lookup('template', '../templates/user_data.j2') }}"
wait: yes
instance_tags:
@@ -127,7 +97,7 @@
register: ec2
- name: Add Name tag to instances
- ec2_tag: resource={{ item.1.id }} region={{ ec2_region }} state=present
+ ec2_tag: resource={{ item.1.id }} region={{ deployment_vars[deployment_type].region }} state=present
with_together:
- instances
- ec2.instances
@@ -136,29 +106,32 @@
Name: "{{ item.0 }}"
- set_fact:
- instance_groups: "tag_created-by_{{ created_by }}, tag_clusterid_{{ cluster }}, tag_environment_{{ cluster_env }},
- tag_host-type_{{ host_type }}, tag_sub-host-type_{{ sub_host_type }}"
+ instance_groups: >
+ tag_created-by_{{ created_by }}, tag_clusterid_{{ cluster }},
+ tag_environment_{{ cluster_env }}, tag_host-type_{{ host_type }},
+ tag_sub-host-type_{{ sub_host_type }}
- set_fact:
node_label:
- region: "{{ec2_region}}"
+ region: "{{ deployment_vars[deployment_type].region }}"
type: "{{sub_host_type}}"
when: host_type == "node"
- set_fact:
node_label:
- region: "{{ec2_region}}"
+ region: "{{ deployment_vars[deployment_type].region }}"
type: "{{host_type}}"
when: host_type != "node"
- set_fact:
logrotate:
- name: syslog
- path: "/var/log/cron
- \n/var/log/maillog
- \n/var/log/messages
- \n/var/log/secure
- \n/var/log/spooler \n"
+ path: |
+ /var/log/cron
+ /var/log/maillog
+ /var/log/messages
+ /var/log/secure
+ /var/log/spooler"
options:
- daily
- rotate 7
@@ -173,7 +146,7 @@
hostname: "{{ item.0 }}"
ansible_ssh_host: "{{ item.1.dns_name }}"
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
groups: "{{ instance_groups }}"
ec2_private_ip_address: "{{ item.1.private_ip }}"
ec2_ip_address: "{{ item.1.public_ip }}"
@@ -188,7 +161,7 @@
hostname: "{{ item.0 }}"
ansible_ssh_host: "{{ item.1.dns_name }}"
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
groups: nodes_to_add
ec2_private_ip_address: "{{ item.1.private_ip }}"
ec2_ip_address: "{{ item.1.public_ip }}"
diff --git a/playbooks/aws/openshift-cluster/templates/user_data.j2 b/playbooks/aws/openshift-cluster/templates/user_data.j2
index 3621a7d7d..2a3974a8c 100644
--- a/playbooks/aws/openshift-cluster/templates/user_data.j2
+++ b/playbooks/aws/openshift-cluster/templates/user_data.j2
@@ -1,24 +1,4 @@
#cloud-config
-{% if type == 'etcd' and 'etcd' in volume_defs[type] %}
-cloud_config_modules:
-- disk_setup
-- mounts
-
-mounts:
-- [ xvdb, /var/lib/etcd, xfs, "defaults" ]
-
-disk_setup:
- xvdb:
- table_type: mbr
- layout: True
-
-fs_setup:
-- label: etcd_storage
- filesystem: xfs
- device: /dev/xvdb
- partition: auto
-{% endif %}
-
{% if type in ['node', 'master'] and 'docker' in volume_defs[type] %}
mounts:
- [ xvdb ]
@@ -33,18 +13,7 @@ write_files:
permissions: '0644'
{% endif %}
-{% if deployment_type == 'online' %}
-devices: ['/var'] # Workaround for https://bugs.launchpad.net/bugs/1455436
-
-disable_root: 0
-growpart:
- mode: auto
- devices: ['/var']
-runcmd:
-- xfs_growfs /var
-{% endif %}
-
-{% if deployment_vars[deployment_type].sudo %}
+{% if deployment_vars[deployment_type].become %}
- path: /etc/sudoers.d/99-{{ deployment_vars[deployment_type].ssh_user }}-cloud-init-requiretty
permissions: 440
content: |
diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml
index 6dd5d8b62..fb13e1839 100644
--- a/playbooks/aws/openshift-cluster/terminate.yml
+++ b/playbooks/aws/openshift-cluster/terminate.yml
@@ -11,7 +11,7 @@
name: "{{ item }}"
groups: oo_hosts_to_terminate
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: (groups['tag_clusterid_' ~ cluster_id] | default([])) | difference(['localhost'])
- name: Unsubscribe VMs
diff --git a/playbooks/aws/openshift-cluster/update.yml b/playbooks/aws/openshift-cluster/update.yml
index 32bab76b5..bd31c42dd 100644
--- a/playbooks/aws/openshift-cluster/update.yml
+++ b/playbooks/aws/openshift-cluster/update.yml
@@ -13,7 +13,7 @@
name: "{{ item }}"
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: "{{ g_all_hosts | default([]) }}"
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
index 11026e38d..44d9a3e25 100644
--- a/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
+++ b/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
@@ -1,14 +1,13 @@
---
-# This playbook upgrades an existing AWS cluster, leaving nodes untouched if used with an 'online' deployment type.
# Usage:
-# ansible-playbook playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml -e deployment_type=online -e cluster_id=<cluster_id>
+# ansible-playbook playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml -e deployment_type=<deployment_type> -e cluster_id=<cluster_id>
- include: ../../../../common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
vars_files:
- "{{lookup('file', '../../../../aws/openshift-cluster/vars.yml')}}"
- "{{lookup('file', '../../../../aws/openshift-cluster/cluster_hosts.yml')}}"
vars:
g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ g_sudo: "{{ deployment_vars[deployment_type].become }}"
g_nodeonmaster: true
openshift_cluster_id: "{{ cluster_id }}"
openshift_debug_level: "{{ debug_level }}"
diff --git a/playbooks/aws/openshift-cluster/vars.defaults.yml b/playbooks/aws/openshift-cluster/vars.defaults.yml
deleted file mode 100644
index ed97d539c..000000000
--- a/playbooks/aws/openshift-cluster/vars.defaults.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/playbooks/aws/openshift-cluster/vars.online.int.yml b/playbooks/aws/openshift-cluster/vars.online.int.yml
deleted file mode 100644
index 2e2f25ccd..000000000
--- a/playbooks/aws/openshift-cluster/vars.online.int.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-ec2_image: ami-9101c8fa
-ec2_image_name: libra-ops-rhel7*
-ec2_region: us-east-1
-ec2_keypair: mmcgrath_libra
-ec2_master_instance_type: t2.medium
-ec2_master_security_groups: [ 'integration', 'integration-master' ]
-ec2_infra_instance_type: c4.large
-ec2_infra_security_groups: [ 'integration', 'integration-infra' ]
-ec2_node_instance_type: m4.large
-ec2_node_security_groups: [ 'integration', 'integration-node' ]
-ec2_etcd_instance_type: m4.large
-ec2_etcd_security_groups: [ 'integration', 'integration-etcd' ]
-ec2_vpc_subnet: subnet-987c0def
-ec2_assign_public_ip: yes
diff --git a/playbooks/aws/openshift-cluster/vars.online.prod.yml b/playbooks/aws/openshift-cluster/vars.online.prod.yml
deleted file mode 100644
index 18a53e12e..000000000
--- a/playbooks/aws/openshift-cluster/vars.online.prod.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-ec2_image: ami-9101c8fa
-ec2_image_name: libra-ops-rhel7*
-ec2_region: us-east-1
-ec2_keypair: mmcgrath_libra
-ec2_master_instance_type: t2.medium
-ec2_master_security_groups: [ 'production', 'production-master' ]
-ec2_infra_instance_type: c4.large
-ec2_infra_security_groups: [ 'production', 'production-infra' ]
-ec2_node_instance_type: m4.large
-ec2_node_security_groups: [ 'production', 'production-node' ]
-ec2_etcd_instance_type: m4.large
-ec2_etcd_security_groups: [ 'production', 'production-etcd' ]
-ec2_vpc_subnet: subnet-987c0def
-ec2_assign_public_ip: yes
diff --git a/playbooks/aws/openshift-cluster/vars.online.stage.yml b/playbooks/aws/openshift-cluster/vars.online.stage.yml
deleted file mode 100644
index 1f9ac4252..000000000
--- a/playbooks/aws/openshift-cluster/vars.online.stage.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-ec2_image: ami-9101c8fa
-ec2_image_name: libra-ops-rhel7*
-ec2_region: us-east-1
-ec2_keypair: mmcgrath_libra
-ec2_master_instance_type: t2.medium
-ec2_master_security_groups: [ 'stage', 'stage-master' ]
-ec2_infra_instance_type: c4.large
-ec2_infra_security_groups: [ 'stage', 'stage-infra' ]
-ec2_node_instance_type: m4.large
-ec2_node_security_groups: [ 'stage', 'stage-node' ]
-ec2_etcd_instance_type: m4.large
-ec2_etcd_security_groups: [ 'stage', 'stage-etcd' ]
-ec2_vpc_subnet: subnet-987c0def
-ec2_assign_public_ip: yes
diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml
index ae12286bd..8bda72ac2 100644
--- a/playbooks/aws/openshift-cluster/vars.yml
+++ b/playbooks/aws/openshift-cluster/vars.yml
@@ -3,42 +3,31 @@ debug_level: 2
deployment_rhel7_ent_base:
# rhel-7.1, requires cloud access subscription
- image: ami-10663b78
- image_name:
- region: us-east-1
+ image: "{{ lookup('oo_option', 'ec2_image') | default('ami-10251c7a', True) }}"
+ image_name: "{{ lookup('oo_option', 'ec2_image_name') | default(None, True) }}"
+ region: "{{ lookup('oo_option', 'ec2_region') | default('us-east-1', True) }}"
ssh_user: ec2-user
- sudo: yes
- keypair: libra
- type: m4.large
- security_groups: [ 'public' ]
- vpc_subnet:
- assign_public_ip:
+ become: yes
+ keypair: "{{ lookup('oo_option', 'ec2_keypair') | default('libra', True) }}"
+ type: "{{ lookup('oo_option', 'ec2_instance_type') | default('m4.large', True) }}"
+ security_groups: "{{ lookup('oo_option', 'ec2_security_groups') | default([ 'public' ], True) }}"
+ vpc_subnet: "{{ lookup('oo_option', 'ec2_vpc_subnet') | default(omit, True) }}"
+ assign_public_ip: "{{ lookup('oo_option', 'ec2_assign_public_ip') | default(omit, True) }}"
deployment_vars:
origin:
# centos-7, requires marketplace
- image: ami-61bbf104
- image_name:
- region: us-east-1
+ image: "{{ lookup('oo_option', 'ec2_image') | default('ami-61bbf104', True) }}"
+ image_name: "{{ lookup('oo_option', 'ec2_image_name') | default(None, True) }}"
+ region: "{{ lookup('oo_option', 'ec2_region') | default('us-east-1', True) }}"
ssh_user: centos
- sudo: yes
- keypair: libra
- type: m4.large
- security_groups: [ 'public' ]
- vpc_subnet:
- assign_public_ip:
- online:
- # private ami
- image: ami-7a9e9812
- image_name: openshift-rhel7_*
- region: us-east-1
- ssh_user: root
- sudo: no
- keypair: libra
- type: m4.large
- security_groups: [ 'public' ]
- vpc_subnet:
- assign_public_ip:
+ become: yes
+ keypair: "{{ lookup('oo_option', 'ec2_keypair') | default('libra', True) }}"
+ type: "{{ lookup('oo_option', 'ec2_instance_type') | default('m4.large', True) }}"
+ security_groups: "{{ lookup('oo_option', 'ec2_security_groups') | default([ 'public' ], True) }}"
+ vpc_subnet: "{{ lookup('oo_option', 'ec2_vpc_subnet') | default(omit, True) }}"
+ assign_public_ip: "{{ lookup('oo_option', 'ec2_assign_public_ip') | default(omit, True) }}"
+
enterprise: "{{ deployment_rhel7_ent_base }}"
openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml
index 916dfd0a6..b694b6c08 100644
--- a/playbooks/byo/openshift_facts.yml
+++ b/playbooks/byo/openshift_facts.yml
@@ -1,4 +1,14 @@
---
+- name: Cluster hosts
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - include_vars: openshift-cluster/cluster_hosts.yml
+
+- include: ../common/openshift-cluster/evaluate_groups.yml
+
- name: Gather Cluster facts
hosts: OSEv3
roles:
diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-cluster/additional_config.yml
index a7fc270ac..5ed1d3b3c 100644
--- a/playbooks/common/openshift-cluster/additional_config.yml
+++ b/playbooks/common/openshift-cluster/additional_config.yml
@@ -48,3 +48,5 @@
- role: openshift_registry
registry_volume_claim: "{{ openshift.hosted.registry.storage.volume.name }}-claim"
when: deploy_infra | bool and attach_registry_volume | bool
+ - role: openshift_metrics
+ when: openshift.hosted.metrics.deploy | bool
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 6f908fa7f..99b36098a 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -4,7 +4,7 @@
- include: validate_hostnames.yml
- name: Set oo_options
- hosts: oo_hosts_to_config
+ hosts: oo_all_hosts
tasks:
- set_fact:
openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}"
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index ce0134c44..c5273b08f 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -34,15 +34,15 @@
name: "{{ item }}"
groups: oo_all_hosts
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_all_hosts | default([]) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: g_all_hosts | default([])
- name: Evaluate oo_masters
add_host:
name: "{{ item }}"
groups: oo_masters
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_master_hosts | union(g_new_master_hosts) | default([]) }}"
- name: Evaluate oo_etcd_to_config
@@ -50,7 +50,7 @@
name: "{{ item }}"
groups: oo_etcd_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_etcd_hosts | default([]) }}"
- name: Evaluate oo_masters_to_config
@@ -58,7 +58,7 @@
name: "{{ item }}"
groups: oo_masters_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_new_master_hosts | default(g_master_hosts | default([], true), true) }}"
- name: Evaluate oo_nodes_to_config
@@ -66,7 +66,7 @@
name: "{{ item }}"
groups: oo_nodes_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}"
# Skip adding the master to oo_nodes_to_config when g_new_node_hosts is
@@ -75,7 +75,7 @@
name: "{{ item }}"
groups: oo_nodes_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_master_hosts | default([]) }}"
when: g_nodeonmaster | default(false) == true and g_new_node_hosts is not defined
@@ -91,7 +91,7 @@
name: "{{ g_master_hosts[0] }}"
groups: oo_first_master
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
when: g_master_hosts|length > 0
- name: Evaluate oo_lb_to_config
@@ -99,7 +99,7 @@
name: "{{ item }}"
groups: oo_lb_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_lb_hosts | default([]) }}"
- name: Evaluate oo_nfs_to_config
@@ -107,5 +107,5 @@
name: "{{ item }}"
groups: oo_nfs_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_nfs_hosts | default([]) }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
index 63c8ef756..51b108f6a 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
@@ -42,10 +42,10 @@
- name: Update cluster policy
hosts: oo_first_master
tasks:
- - name: oadm policy reconcile-cluster-roles --confirm
+ - name: oadm policy reconcile-cluster-roles --additive-only=true --confirm
command: >
{{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- policy reconcile-cluster-roles --confirm
+ policy reconcile-cluster-roles --additive-only=true --confirm
- name: Upgrade default router
hosts: oo_first_master
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
index 31ba8c4a9..a72749a2b 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
@@ -490,7 +490,7 @@
- name: Reconcile Cluster Roles
command: >
{{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- policy reconcile-cluster-roles --confirm
+ policy reconcile-cluster-roles --additive-only=true --confirm
run_once: true
- name: Reconcile Cluster Role Bindings
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
index 54bb251f7..5e62b43a3 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
@@ -103,7 +103,7 @@
- name: Reconcile Cluster Roles
command: >
{{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- policy reconcile-cluster-roles --confirm
+ policy reconcile-cluster-roles --additive-only=true --confirm
run_once: true
- name: Reconcile Cluster Role Bindings
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml
index 2bd625257..319758a06 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml
@@ -1,7 +1,7 @@
- include_vars: ../../../../../roles/openshift_node/vars/main.yml
- name: Update systemd units
- include: ../../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version=v{{ g_aos_versions.avail_version }}
+ include: ../../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
- name: Verifying the correct version was configured
shell: grep {{ verify_upgrade_version }} {{ item }}
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml
new file mode 100644
index 000000000..a911f12be
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml
@@ -0,0 +1,24 @@
+- name: Prepare for Node evacuation
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=false
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+
+- name: Evacuate Node for Kubelet upgrade
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --evacuate --force
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+
+- include: rpm_upgrade.yml
+ vars:
+ component: "node"
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ when: not openshift.common.is_containerized | bool
+
+- include: containerized_upgrade.yml
+ when: openshift.common.is_containerized | bool
+
+- name: Set node schedulability
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=true
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: openshift.node.schedulable | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
index 21480ba55..1dd47dc15 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
@@ -2,10 +2,11 @@
###############################################################################
# Evaluate host groups and gather facts
###############################################################################
-- name: Load openshift_facts
+- name: Load openshift_facts and update repos
hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
roles:
- openshift_facts
+ - openshift_repos
- name: Evaluate additional groups for upgrade
hosts: localhost
@@ -100,7 +101,7 @@
vars:
target_version: "{{ '1.2' if deployment_type == 'origin' else '3.1.1.900' }}"
openshift_docker_hosted_registry_insecure: True
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.master.portal_net }}"
+ openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
handlers:
- include: ../../../../../roles/openshift_master/handlers/main.yml
- include: ../../../../../roles/openshift_node/handlers/main.yml
@@ -161,14 +162,14 @@
when: inventory_hostname in groups.oo_masters_to_config
- name: Update systemd units
- include: ../../../../../roles/openshift_master/tasks/systemd_units.yml openshift_version=v{{ g_aos_versions.curr_version }}
+ include: ../../../../../roles/openshift_master/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
when: inventory_hostname in groups.oo_masters_to_config
- include_vars: ../../../../../roles/openshift_node/vars/main.yml
when: inventory_hostname in groups.oo_nodes_to_config
- name: Update systemd units
- include: ../../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version=v{{g_aos_versions.curr_version}}
+ include: ../../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
when: inventory_hostname in groups.oo_nodes_to_config
# Note: the version number is hardcoded here in hopes of catching potential
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml
index 7a2718e1b..5c96ad094 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml
@@ -4,3 +4,6 @@
- name: Ensure python-yaml present for config upgrade
action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
when: not openshift.common.is_atomic | bool
+
+- name: Restart node service
+ service: name="{{ openshift.common.service_type }}-node" state=restarted
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
index 8852bb8de..a91727ecd 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
@@ -50,7 +50,7 @@
- include_vars: ../../../../../roles/openshift_master/vars/main.yml
- name: Update systemd units
- include: ../../../../../roles/openshift_master/tasks/systemd_units.yml openshift_version=v{{ g_aos_versions.avail_version }}
+ include: ../../../../../roles/openshift_master/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
# - name: Upgrade master configuration
# openshift_upgrade_config:
@@ -88,23 +88,13 @@
###############################################################################
- name: Upgrade nodes
hosts: oo_nodes_to_config
+ serial: 1
roles:
- openshift_facts
handlers:
- include: ../../../../../roles/openshift_node/handlers/main.yml
tasks:
- - include: rpm_upgrade.yml
- vars:
- component: "node"
- openshift_version: "{{ openshift_pkg_version | default('') }}"
- when: not openshift.common.is_containerized | bool
-
- - include: containerized_upgrade.yml
- when: openshift.common.is_containerized | bool
-
- # This will restart the node
- - name: Restart openvswitch service
- service: name="{{ openshift.common.service_type }}-node" state=restarted
+ - include: node_upgrade.yml
- set_fact:
node_update_complete: True
@@ -130,6 +120,7 @@
###############################################################################
# Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints
###############################################################################
+
- name: Reconcile Cluster Roles and Cluster Role Bindings and Security Context Constraints
hosts: oo_masters_to_config
roles:
@@ -138,7 +129,7 @@
origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}"
ent_reconcile_bindings: true
openshift_docker_hosted_registry_insecure: True
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.master.portal_net }}"
+ openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
tasks:
- name: Verifying the correct commandline tools are available
shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}}
@@ -147,7 +138,7 @@
- name: Reconcile Cluster Roles
command: >
{{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- policy reconcile-cluster-roles --confirm
+ policy reconcile-cluster-roles --additive-only=true --confirm
run_once: true
- name: Reconcile Cluster Role Bindings
@@ -164,7 +155,7 @@
- name: Reconcile Security Context Constraints
command: >
- {{ openshift.common.admin_binary}} policy reconcile-sccs --confirm
+ {{ openshift.common.admin_binary}} policy reconcile-sccs --confirm --additive-only=true
run_once: true
- set_fact:
diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml
index 2f07b2f51..6cb3a954f 100644
--- a/playbooks/common/openshift-etcd/config.yml
+++ b/playbooks/common/openshift-etcd/config.yml
@@ -22,6 +22,8 @@
etcd_cert_subdir: etcd-{{ openshift.common.hostname }}
etcd_cert_config_dir: /etc/etcd
etcd_cert_prefix:
+ etcd_hostname: "{{ openshift.common.hostname }}"
+ etcd_ip: "{{ openshift.common.ip }}"
- name: Create temp directory for syncing certs
hosts: localhost
@@ -43,7 +45,7 @@
| oo_filter_list(filter_attr='etcd_server_certs_missing') }}"
sync_tmpdir: "{{ hostvars.localhost.g_etcd_mktemp.stdout }}"
roles:
- - etcd_certificates
+ - openshift_etcd_certificates
post_tasks:
- name: Create a tarball of the etcd certs
command: >
@@ -51,7 +53,7 @@
-C {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }} .
args:
creates: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
- with_items: etcd_needing_server_certs
+ with_items: "{{ etcd_needing_server_certs | default([]) }}"
- name: Retrieve the etcd cert tarballs
fetch:
src: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
@@ -59,7 +61,7 @@
flat: yes
fail_on_missing: yes
validate_checksum: yes
- with_items: etcd_needing_server_certs
+ with_items: "{{ etcd_needing_server_certs | default([]) }}"
# Configure a first etcd host to avoid conflicts in choosing a leader
# if other members come online too quickly.
@@ -69,7 +71,7 @@
sync_tmpdir: "{{ hostvars.localhost.g_etcd_mktemp.stdout }}"
etcd_url_scheme: https
etcd_peer_url_scheme: https
- etcd_peers_group: oo_etcd_to_config
+ etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
pre_tasks:
- name: Ensure certificate directory exists
file:
@@ -91,7 +93,7 @@
sync_tmpdir: "{{ hostvars.localhost.g_etcd_mktemp.stdout }}"
etcd_url_scheme: https
etcd_peer_url_scheme: https
- etcd_peers_group: oo_etcd_to_config
+ etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
pre_tasks:
- name: Ensure certificate directory exists
file:
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index f16db0062..1f5e3aaff 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -35,6 +35,19 @@
- set_fact:
openshift_master_debug_level: "{{ t_oo_option_master_debug_level }}"
when: openshift_master_debug_level is not defined and t_oo_option_master_debug_level != ""
+
+ - set_fact:
+ openshift_master_default_subdomain: "{{ lookup('oo_option', 'openshift_master_default_subdomain') | default(None, true) }}"
+ when: openshift_master_default_subdomain is not defined
+ - set_fact:
+ openshift_hosted_metrics_deploy: "{{ lookup('oo_option', 'openshift_hosted_metrics_deploy') | default(false, true) }}"
+ when: openshift_hosted_metrics_deploy is not defined
+ - set_fact:
+ openshift_hosted_metrics_duration: "{{ lookup('oo_option', 'openshift_hosted_metrics_duration') | default(7) }}"
+ when: openshift_hosted_metrics_duration is not defined
+ - set_fact:
+ openshift_hosted_metrics_resolution: "{{ lookup('oo_option', 'openshift_hosted_metrics_resolution') | default(10) }}"
+ when: openshift_hosted_metrics_resolution is not defined
roles:
- openshift_facts
post_tasks:
@@ -53,7 +66,6 @@
console_url: "{{ openshift_master_console_url | default(None) }}"
console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
- portal_net: "{{ openshift_master_portal_net | default(None) }}"
ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
- openshift_facts:
@@ -75,6 +87,8 @@
etcd_cert_subdir: openshift-master-{{ openshift.common.hostname }}
etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
etcd_cert_prefix: master.etcd-
+ etcd_hostname: "{{ openshift.common.hostname }}"
+ etcd_ip: "{{ openshift.common.ip }}"
when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
- name: Create temp directory for syncing certs
@@ -98,7 +112,7 @@
| oo_filter_list(filter_attr='etcd_client_certs_missing') }}"
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
roles:
- - etcd_certificates
+ - openshift_etcd_certificates
post_tasks:
- name: Create a tarball of the etcd certs
command: >
@@ -106,7 +120,7 @@
-C {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }} .
args:
creates: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
- with_items: etcd_needing_client_certs
+ with_items: "{{ etcd_needing_client_certs | default([]) }}"
- name: Retrieve the etcd cert tarballs
fetch:
src: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
@@ -114,7 +128,7 @@
flat: yes
fail_on_missing: yes
validate_checksum: yes
- with_items: etcd_needing_client_certs
+ with_items: "{{ etcd_needing_client_certs | default([]) }}"
- name: Copy the external etcd certs to the masters
hosts: oo_masters_to_config
@@ -164,7 +178,7 @@
- name: Check status of master certificates
stat:
path: "{{ openshift.common.config_base }}/master/{{ item }}"
- with_items: openshift_master_certs
+ with_items: "{{ openshift_master_certs }}"
register: g_master_cert_stat_result
- set_fact:
master_certs_missing: "{{ False in (g_master_cert_stat_result.results
@@ -199,7 +213,7 @@
state: absent
when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
with_nested:
- - masters_needing_certs
+ - "{{ masters_needing_certs | default([]) }}"
- - master.etcd-client.crt
- master.etcd-client.key
@@ -209,7 +223,7 @@
-C {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }} .
args:
creates: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz"
- with_items: masters_needing_certs
+ with_items: "{{ masters_needing_certs | default([]) }}"
- name: Retrieve the master cert tarball from the master
fetch:
@@ -218,7 +232,7 @@
flat: yes
fail_on_missing: yes
validate_checksum: yes
- with_items: masters_needing_certs
+ with_items: "{{ masters_needing_certs | default([]) }}"
- name: Configure load balancers
hosts: oo_lb_to_config
diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml
index 02449e40d..57a63cfee 100644
--- a/playbooks/common/openshift-master/restart.yml
+++ b/playbooks/common/openshift-master/restart.yml
@@ -97,7 +97,7 @@
name: "{{ item }}"
groups: oo_active_masters
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ groups.oo_masters_to_config | default([]) }}"
when: (hostvars[item]['is_active'] | default(false)) | bool
- name: Evaluate oo_current_masters
@@ -105,7 +105,7 @@
name: "{{ item }}"
groups: oo_current_masters
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ groups.oo_masters_to_config | default([]) }}"
when: (hostvars[item]['current_host'] | default(false)) | bool
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 21fa37d7a..f0bb91568 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -66,7 +66,7 @@
-C {{ item.config_dir }} .
args:
creates: "{{ item.config_dir }}.tgz"
- with_items: nodes_needing_certs
+ with_items: "{{ nodes_needing_certs | default([]) }}"
- name: Retrieve the node config tarballs from the master
fetch:
@@ -75,7 +75,7 @@
flat: yes
fail_on_missing: yes
validate_checksum: yes
- with_items: nodes_needing_certs
+ with_items: "{{ nodes_needing_certs | default([]) }}"
- name: Deploy node certificates
hosts: oo_nodes_to_config
@@ -105,7 +105,7 @@
name: "{{ item }}"
groups: oo_containerized_master_nodes
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ groups.oo_nodes_to_config | default([]) }}"
when: hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
@@ -118,8 +118,8 @@
# TODO: configure these based on
# hostvars[groups.oo_first_master.0].openshift.hosted.registry instead of
# hardcoding
- openshift_docker_hosted_registry_insecure: True
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.master.portal_net }}"
+ openshift_docker_hosted_registry_insecure: "{{ openshift.docker.hosted_registry_insecure | default(True) }}"
+ openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
roles:
- openshift_node
@@ -131,8 +131,8 @@
# TODO: configure these based on
# hostvars[groups.oo_first_master.0].openshift.hosted.registry instead of
# hardcoding
- openshift_docker_hosted_registry_insecure: True
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.master.portal_net }}"
+ openshift_docker_hosted_registry_insecure: "{{ openshift.docker.hosted_registry_insecure | default(True) }}"
+ openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
roles:
- openshift_node
@@ -154,6 +154,8 @@
etcd_cert_subdir: openshift-node-{{ openshift.common.hostname }}
etcd_cert_config_dir: "{{ openshift.common.config_base }}/node"
etcd_cert_prefix: node.etcd-
+ etcd_hostname: "{{ openshift.common.hostname }}"
+ etcd_ip: "{{ openshift.common.ip }}"
when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config and (openshift.common.use_flannel | bool)
- name: Configure flannel etcd certificates
@@ -168,7 +170,7 @@
| oo_filter_list(filter_attr='etcd_client_flannel_certs_missing') | default([]) }}"
when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
roles:
- - role: etcd_certificates
+ - role: openshift_etcd_certificates
when: openshift_use_flannel | default(false) | bool
post_tasks:
- name: Create a tarball of the etcd flannel certs
@@ -177,7 +179,7 @@
-C {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }} .
args:
creates: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
- with_items: etcd_needing_client_certs
+ with_items: "{{ etcd_needing_client_certs | default([]) }}"
when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
- name: Retrieve the etcd cert tarballs
fetch:
@@ -186,7 +188,7 @@
flat: yes
fail_on_missing: yes
validate_checksum: yes
- with_items: etcd_needing_client_certs
+ with_items: "{{ etcd_needing_client_certs | default([]) }}"
when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
- name: Copy the external etcd flannel certs to the nodes
diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml
index ba37a3a1f..475d29293 100644
--- a/playbooks/gce/openshift-cluster/config.yml
+++ b/playbooks/gce/openshift-cluster/config.yml
@@ -5,14 +5,14 @@
- ../../gce/openshift-cluster/cluster_hosts.yml
vars:
g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ g_sudo: "{{ deployment_vars[deployment_type].become }}"
g_nodeonmaster: true
openshift_cluster_id: "{{ cluster_id }}"
openshift_debug_level: "{{ debug_level }}"
openshift_deployment_type: "{{ deployment_type }}"
openshift_hostname: "{{ gce_private_ip }}"
openshift_registry_selector: 'type=infra'
- openshift_router_selector: 'type=infra'
+ openshift_hosted_router_selector: 'type=infra'
openshift_infra_nodes: "{{ g_infra_hosts }}"
openshift_master_cluster_method: 'native'
openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml
index 992033d16..c29cac272 100644
--- a/playbooks/gce/openshift-cluster/list.yml
+++ b/playbooks/gce/openshift-cluster/list.yml
@@ -15,7 +15,7 @@
name: "{{ item }}"
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true))
- name: List Hosts
diff --git a/playbooks/gce/openshift-cluster/service.yml b/playbooks/gce/openshift-cluster/service.yml
index 914f38c1f..13b267976 100644
--- a/playbooks/gce/openshift-cluster/service.yml
+++ b/playbooks/gce/openshift-cluster/service.yml
@@ -15,14 +15,14 @@
name: "{{ item }}"
groups: g_service_nodes
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: "{{ node_hosts | default([]) | difference(['localhost']) | difference(groups.status_terminated) }}"
- add_host:
name: "{{ item }}"
groups: g_service_masters
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: "{{ master_hosts | default([]) | difference(['localhost']) | difference(groups.status_terminated) }}"
- include: ../../common/openshift-node/service.yml
diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
index 8ebf71cd4..e3efd8566 100644
--- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
@@ -39,7 +39,7 @@
hostname: "{{ item.name }}"
ansible_ssh_host: "{{ item.public_ip }}"
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}"
gce_public_ip: "{{ item.public_ip }}"
gce_private_ip: "{{ item.private_ip }}"
diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml
index d835c53ba..6a0ac088a 100644
--- a/playbooks/gce/openshift-cluster/terminate.yml
+++ b/playbooks/gce/openshift-cluster/terminate.yml
@@ -11,7 +11,7 @@
name: "{{ item }}"
groups: oo_hosts_to_terminate
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: (groups['tag_clusterid-' ~ cluster_id] | default([])) | difference(['localhost'])
- name: Unsubscribe VMs
diff --git a/playbooks/gce/openshift-cluster/update.yml b/playbooks/gce/openshift-cluster/update.yml
index 2dc540978..9b7a2777a 100644
--- a/playbooks/gce/openshift-cluster/update.yml
+++ b/playbooks/gce/openshift-cluster/update.yml
@@ -13,7 +13,7 @@
name: "{{ item }}"
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: "{{ g_all_hosts | default([]) }}"
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml
index d173213fc..13c754c1e 100644
--- a/playbooks/gce/openshift-cluster/vars.yml
+++ b/playbooks/gce/openshift-cluster/vars.yml
@@ -5,19 +5,14 @@ deployment_rhel7_ent_base:
image: "{{ lookup('oo_option', 'image_name') | default('rhel-7', True) }}"
machine_type: "{{ lookup('oo_option', 'machine_type') | default('n1-standard-1', True) }}"
ssh_user: "{{ lookup('env', 'gce_ssh_user') | default(ansible_ssh_user, true) }}"
- sudo: yes
+ become: yes
deployment_vars:
origin:
image: "{{ lookup('oo_option', 'image_name') | default('centos-7', True) }}"
machine_type: "{{ lookup('oo_option', 'machine_type') | default('n1-standard-1', True) }}"
ssh_user: "{{ lookup('env', 'gce_ssh_user') | default(ansible_ssh_user, true) }}"
- sudo: yes
- online:
- image: libra-rhel7
- machine_type: n1-standard-1
- ssh_user: root
- sudo: no
+ become: yes
enterprise: "{{ deployment_rhel7_ent_base }}"
openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml
index 0e003ef67..81a6fff0d 100644
--- a/playbooks/libvirt/openshift-cluster/config.yml
+++ b/playbooks/libvirt/openshift-cluster/config.yml
@@ -8,13 +8,13 @@
- ../../libvirt/openshift-cluster/cluster_hosts.yml
vars:
g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ g_sudo: "{{ deployment_vars[deployment_type].become }}"
g_nodeonmaster: true
openshift_cluster_id: "{{ cluster_id }}"
openshift_debug_level: "{{ debug_level }}"
openshift_deployment_type: "{{ deployment_type }}"
openshift_registry_selector: 'type=infra'
- openshift_router_selector: 'type=infra'
+ openshift_hosted_router_selector: 'type=infra'
openshift_infra_nodes: "{{ g_infra_hosts }}"
openshift_master_cluster_method: 'native'
openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml
index 701d57d26..2475b9d6b 100644
--- a/playbooks/libvirt/openshift-cluster/launch.yml
+++ b/playbooks/libvirt/openshift-cluster/launch.yml
@@ -12,9 +12,6 @@
image_name: "{{ deployment_vars[deployment_type].image.name }}"
image_compression: "{{ deployment_vars[deployment_type].image.compression }}"
tasks:
- - fail: msg="Deployment type not supported for libvirt provider yet"
- when: deployment_type == 'online'
-
- include: tasks/configure_libvirt.yml
- include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml
diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml
index 6cb81ee79..eb64544db 100644
--- a/playbooks/libvirt/openshift-cluster/list.yml
+++ b/playbooks/libvirt/openshift-cluster/list.yml
@@ -15,7 +15,7 @@
name: "{{ item }}"
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: groups[scratch_group] | default([]) | difference(['localhost'])
- name: List Hosts
diff --git a/playbooks/libvirt/openshift-cluster/service.yml b/playbooks/libvirt/openshift-cluster/service.yml
index cd07c8701..8bd24a8cf 100644
--- a/playbooks/libvirt/openshift-cluster/service.yml
+++ b/playbooks/libvirt/openshift-cluster/service.yml
@@ -18,7 +18,7 @@
add_host:
name: "{{ item }}"
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
groups: g_service_masters
with_items: "{{ g_master_hosts | default([]) }}"
@@ -26,7 +26,7 @@
add_host:
name: "{{ item }}"
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
groups: g_service_nodes
with_items: "{{ g_node_hosts | default([]) }}"
diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
index 937a765fa..7231f255a 100644
--- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
@@ -1,7 +1,7 @@
---
# TODO: Add support for choosing base image based on deployment_type and os
# wanted (os wanted needs support added in bin/cluster with sane defaults:
-# fedora/centos for origin, rhel for online/enterprise)
+# fedora/centos for origin, rhel for enterprise)
# TODO: create a role to encapsulate some of this complexity, possibly also
# create a module to manage the storage tasks, network tasks, and possibly
@@ -113,7 +113,7 @@
hostname: '{{ item.0 }}'
ansible_ssh_host: '{{ item.1 }}'
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
groups: "tag_environment-{{ cluster_env }}, tag_host-type-{{ type }}, tag_sub-host-type-{{ g_sub_host_type }}, tag_clusterid-{{ cluster_id }}"
openshift_node_labels: "{{ node_label }}"
with_together:
diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml
index d6251ac83..baef911f9 100644
--- a/playbooks/libvirt/openshift-cluster/terminate.yml
+++ b/playbooks/libvirt/openshift-cluster/terminate.yml
@@ -14,7 +14,7 @@
name: "{{ item }}"
groups: oo_hosts_to_terminate
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: groups[cluster_group] | default([])
- name: Unsubscribe VMs
diff --git a/playbooks/libvirt/openshift-cluster/update.yml b/playbooks/libvirt/openshift-cluster/update.yml
index 2dc540978..9b7a2777a 100644
--- a/playbooks/libvirt/openshift-cluster/update.yml
+++ b/playbooks/libvirt/openshift-cluster/update.yml
@@ -13,7 +13,7 @@
name: "{{ item }}"
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: "{{ g_all_hosts | default([]) }}"
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/libvirt/openshift-cluster/vars.yml b/playbooks/libvirt/openshift-cluster/vars.yml
index aa0c69e08..4daaf1c91 100644
--- a/playbooks/libvirt/openshift-cluster/vars.yml
+++ b/playbooks/libvirt/openshift-cluster/vars.yml
@@ -20,7 +20,7 @@ deployment_rhel7_ent_base:
default('25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0', True) }}"
compression: ""
ssh_user: openshift
- sudo: yes
+ become: yes
deployment_vars:
origin:
@@ -34,14 +34,7 @@ deployment_vars:
sha256: "{{ lookup('oo_option', 'image_sha256') |
default('dd0f5e610e7c5ffacaca35ed7a78a19142a588f4543da77b61c1fb0d74400471', True) }}"
ssh_user: openshift
- sudo: yes
- online:
- image:
- url:
- name:
- sha256:
- ssh_user: root
- sudo: no
+ become: yes
enterprise: "{{ deployment_rhel7_ent_base }}"
openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml
index 093beaf03..9c0ca9af9 100644
--- a/playbooks/openstack/openshift-cluster/config.yml
+++ b/playbooks/openstack/openshift-cluster/config.yml
@@ -6,12 +6,12 @@
vars:
g_nodeonmaster: true
g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ g_sudo: "{{ deployment_vars[deployment_type].become }}"
openshift_cluster_id: "{{ cluster_id }}"
openshift_debug_level: "{{ debug_level }}"
openshift_deployment_type: "{{ deployment_type }}"
openshift_registry_selector: 'type=infra'
- openshift_router_selector: 'type=infra'
+ openshift_hosted_router_selector: 'type=infra'
openshift_infra_nodes: "{{ g_infra_hosts }}"
openshift_master_cluster_method: 'native'
openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
diff --git a/playbooks/openstack/openshift-cluster/dns.yml b/playbooks/openstack/openshift-cluster/dns.yml
index 5e7671a48..31113d5f0 100644
--- a/playbooks/openstack/openshift-cluster/dns.yml
+++ b/playbooks/openstack/openshift-cluster/dns.yml
@@ -12,7 +12,7 @@
name: "{{ item }}"
groups: oo_dns_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: "{{ groups[cluster_id ~ '-dns'] }}"
- name: Evaluate oo_hosts_to_add_in_dns
@@ -20,7 +20,7 @@
name: "{{ item }}"
groups: oo_hosts_to_add_in_dns
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: "{{ groups['tag_clusterid_' ~ cluster_id] }}"
- name: Gather facts
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
index dd6a22cbe..2f05c3adc 100644
--- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
+++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
@@ -350,7 +350,6 @@ resources:
port_range_min: 10250
port_range_max: 10250
remote_mode: remote_group_id
- remote_group_id: { get_resource: master-secgrp }
- direction: ingress
protocol: udp
port_range_min: 4789
diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml
index 0afcad72e..3d4fe42d0 100644
--- a/playbooks/openstack/openshift-cluster/launch.yml
+++ b/playbooks/openstack/openshift-cluster/launch.yml
@@ -7,10 +7,6 @@
vars_files:
- vars.yml
tasks:
- - fail:
- msg: "Deployment type not supported for OpenStack provider yet"
- when: deployment_type == 'online'
-
# TODO: Write an Ansible module for dealing with HEAT stacks
# Dealing with the outputs is currently terrible
@@ -106,7 +102,7 @@
hostname: '{{ item[0] }}'
ansible_ssh_host: '{{ item[2] }}'
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
groups: 'tag_environment_{{ cluster_env }}, tag_host-type_etcd, tag_sub-host-type_default, tag_clusterid_{{ cluster_id }}'
openshift_node_labels:
type: "etcd"
@@ -120,7 +116,7 @@
hostname: '{{ item[0] }}'
ansible_ssh_host: '{{ item[2] }}'
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
groups: 'tag_environment_{{ cluster_env }}, tag_host-type_master, tag_sub-host-type_default, tag_clusterid_{{ cluster_id }}'
openshift_node_labels:
type: "master"
@@ -134,7 +130,7 @@
hostname: '{{ item[0] }}'
ansible_ssh_host: '{{ item[2] }}'
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
groups: 'tag_environment_{{ cluster_env }}, tag_host-type_node, tag_sub-host-type_compute, tag_clusterid_{{ cluster_id }}'
openshift_node_labels:
type: "compute"
@@ -148,7 +144,7 @@
hostname: '{{ item[0] }}'
ansible_ssh_host: '{{ item[2] }}'
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
groups: 'tag_environment_{{ cluster_env }}, tag_host-type_node, tag_sub-host-type_infra, tag_clusterid_{{ cluster_id }}'
openshift_node_labels:
type: "infra"
@@ -162,7 +158,7 @@
hostname: '{{ parsed_outputs.dns_name }}'
ansible_ssh_host: '{{ parsed_outputs.dns_floating_ip }}'
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
groups: '{{ cluster_id }}-dns'
- name: Wait for ssh
diff --git a/playbooks/openstack/openshift-cluster/list.yml b/playbooks/openstack/openshift-cluster/list.yml
index 123ebd323..ba9c6bf9c 100644
--- a/playbooks/openstack/openshift-cluster/list.yml
+++ b/playbooks/openstack/openshift-cluster/list.yml
@@ -16,7 +16,7 @@
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_ssh_host: "{{ hostvars[item].ansible_ssh_host | default(item) }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: groups[scratch_group] | default([]) | difference(['localhost'])
- name: List Hosts
diff --git a/playbooks/openstack/openshift-cluster/terminate.yml b/playbooks/openstack/openshift-cluster/terminate.yml
index a1fb41b53..5bd8476f1 100644
--- a/playbooks/openstack/openshift-cluster/terminate.yml
+++ b/playbooks/openstack/openshift-cluster/terminate.yml
@@ -10,7 +10,7 @@
name: "{{ item }}"
groups: oo_hosts_to_terminate
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: (groups['tag_environment_' ~ cluster_env]|default([])) | intersect(groups['tag_clusterid_' ~ cluster_id ]|default([]))
- name: Unsubscribe VMs
diff --git a/playbooks/openstack/openshift-cluster/update.yml b/playbooks/openstack/openshift-cluster/update.yml
index 16027b15c..539af6524 100644
--- a/playbooks/openstack/openshift-cluster/update.yml
+++ b/playbooks/openstack/openshift-cluster/update.yml
@@ -15,7 +15,7 @@
name: "{{ item }}"
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: "{{ g_all_hosts | default([]) }}"
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml
index ee26d223e..84cba0506 100644
--- a/playbooks/openstack/openshift-cluster/vars.yml
+++ b/playbooks/openstack/openshift-cluster/vars.yml
@@ -21,17 +21,13 @@ openstack_flavor:
deployment_rhel7_ent_base:
image: "{{ lookup('oo_option', 'image_name') | default('rhel-guest-image-7.2-20151102.0.x86_64', True) }}"
ssh_user: openshift
- sudo: yes
+ become: yes
deployment_vars:
origin:
image: "{{ lookup('oo_option', 'image_name') | default('centos-70-raw', True) }}"
ssh_user: openshift
- sudo: yes
- online:
- image:
- ssh_user: root
- sudo: no
+ become: yes
enterprise: "{{ deployment_rhel7_ent_base }}"
openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index 878d5fea8..e4a31c692 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -22,7 +22,7 @@
- name: Install docker
action: "{{ ansible_pkg_mgr }} name=docker{{ '-' + docker_version if docker_version is defined and docker_version != '' else '' }} state=present"
- when: not openshift.common.is_atomic | bool and not docker_version_result | skipped and docker_version_result.stdout | default('0.0', True) | version_compare(docker_version, 'lt')
+ when: not openshift.common.is_atomic | bool and docker_downgrade_result | skipped
# If docker were enabled and started before we downgraded it may have entered a
# failed state. Check for that and clear it if necessary.
@@ -69,6 +69,22 @@
reg_flag: --insecure-registry
notify:
- restart docker
+
+- name: Set Proxy Settings
+ lineinfile:
+ dest: /etc/sysconfig/docker
+ regexp: '^{{ item.reg_conf_var }}=.*$'
+ line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val }}'"
+ state: "{{ 'present' if item.reg_fact_val != '' else 'absent'}}"
+ with_items:
+ - reg_conf_var: HTTP_PROXY
+ reg_fact_val: "{{ docker_http_proxy | default('') }}"
+ - reg_conf_var: HTTPS_PROXY
+ reg_fact_val: "{{ docker_https_proxy | default('') }}"
+ - reg_conf_var: NO_PROXY
+ reg_fact_val: "{{ docker_no_proxy | default('') | join(',') }}"
+ notify:
+ - restart docker
- name: Set various docker options
lineinfile:
diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml
index e6b10cab7..a2212bacd 100644
--- a/roles/etcd/defaults/main.yaml
+++ b/roles/etcd/defaults/main.yaml
@@ -1,6 +1,5 @@
---
-etcd_service: "{{ 'etcd' if not openshift.common.is_containerized | bool else 'etcd_container' }}"
-etcd_interface: "{{ ansible_default_ipv4.interface }}"
+etcd_service: "{{ 'etcd' if not etcd_is_containerized | bool else 'etcd_container' }}"
etcd_client_port: 2379
etcd_peer_port: 2380
etcd_url_scheme: http
@@ -9,10 +8,10 @@ etcd_peer_url_scheme: http
etcd_initial_cluster_state: new
etcd_initial_cluster_token: etcd-cluster-1
-etcd_initial_advertise_peer_urls: "{{ etcd_peer_url_scheme }}://{{ hostvars[inventory_hostname]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_peer_port }}"
-etcd_listen_peer_urls: "{{ etcd_peer_url_scheme }}://{{ hostvars[inventory_hostname]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_peer_port }}"
-etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ hostvars[inventory_hostname]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_client_port }}"
-etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ hostvars[inventory_hostname]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_client_port }}"
+etcd_initial_advertise_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}"
+etcd_listen_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}"
+etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
+etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
etcd_data_dir: /var/lib/etcd/
diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml
index 36906b347..a71b36237 100644
--- a/roles/etcd/meta/main.yml
+++ b/roles/etcd/meta/main.yml
@@ -16,6 +16,5 @@ galaxy_info:
- cloud
- system
dependencies:
-- { role: openshift_docker }
- { role: os_firewall }
- { role: etcd_common }
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index afec6b30b..a798dc973 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -1,36 +1,35 @@
---
-- fail:
- msg: Interface {{ etcd_interface }} not found
- when: "'ansible_' ~ etcd_interface not in hostvars[inventory_hostname]"
-
-- fail:
- msg: IPv4 address not found for {{ etcd_interface }}
- when: "'ipv4' not in hostvars[inventory_hostname]['ansible_' ~ etcd_interface] or 'address' not in hostvars[inventory_hostname]['ansible_' ~ etcd_interface].ipv4"
+- name: Set hostname and ip facts
+ set_fact:
+ # Store etcd_hostname and etcd_ip such that they will be available
+ # in hostvars. Defaults for these variables are set in etcd_common.
+ etcd_hostname: "{{ etcd_hostname }}"
+ etcd_ip: "{{ etcd_ip }}"
- name: Install etcd
action: "{{ ansible_pkg_mgr }} name=etcd state=present"
- when: not openshift.common.is_containerized | bool
+ when: not etcd_is_containerized | bool
- name: Pull etcd container
command: docker pull {{ openshift.etcd.etcd_image }}
- when: openshift.common.is_containerized | bool
+ when: etcd_is_containerized | bool
- name: Install etcd container service file
template:
dest: "/etc/systemd/system/etcd_container.service"
src: etcd.docker.service
register: install_etcd_result
- when: openshift.common.is_containerized | bool
+ when: etcd_is_containerized | bool
- name: Ensure etcd datadir exists
- when: openshift.common.is_containerized | bool
+ when: etcd_is_containerized | bool
file:
path: "{{ etcd_data_dir }}"
state: directory
mode: 0700
- name: Disable system etcd when containerized
- when: openshift.common.is_containerized | bool
+ when: etcd_is_containerized | bool
service:
name: etcd
state: stopped
@@ -42,27 +41,27 @@
changed_when: false
- name: Mask system etcd when containerized
- when: openshift.common.is_containerized | bool and 'LoadState=not-found' not in etcd_show.stdout
+ when: etcd_is_containerized | bool and 'LoadState=not-found' not in etcd_show.stdout
command: systemctl mask etcd
- name: Reload systemd units
command: systemctl daemon-reload
- when: openshift.common.is_containerized | bool and ( install_etcd_result | changed )
+ when: etcd_is_containerized | bool and ( install_etcd_result | changed )
- name: Validate permissions on the config dir
file:
path: "{{ etcd_conf_dir }}"
state: directory
- owner: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}"
- group: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}"
+ owner: "{{ 'etcd' if not etcd_is_containerized | bool else omit }}"
+ group: "{{ 'etcd' if not etcd_is_containerized | bool else omit }}"
mode: 0700
- name: Validate permissions on certificate files
file:
path: "{{ item }}"
mode: 0600
- owner: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}"
- group: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}"
+ owner: "{{ 'etcd' if not etcd_is_containerized | bool else omit }}"
+ group: "{{ 'etcd' if not etcd_is_containerized | bool else omit }}"
when: etcd_url_scheme == 'https'
with_items:
- "{{ etcd_ca_file }}"
@@ -73,8 +72,8 @@
file:
path: "{{ item }}"
mode: 0600
- owner: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}"
- group: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}"
+ owner: "{{ 'etcd' if not etcd_is_containerized | bool else omit }}"
+ group: "{{ 'etcd' if not etcd_is_containerized | bool else omit }}"
when: etcd_peer_url_scheme == 'https'
with_items:
- "{{ etcd_peer_ca_file }}"
diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2
index 28816fd87..cd048ec60 100644
--- a/roles/etcd/templates/etcd.conf.j2
+++ b/roles/etcd/templates/etcd.conf.j2
@@ -1,15 +1,15 @@
{% macro initial_cluster() -%}
-{% for host in groups[etcd_peers_group] -%}
+{% for host in etcd_peers -%}
{% if loop.last -%}
-{{ host }}={{ etcd_peer_url_scheme }}://{{ etcd_host_int_map[host].interface.ipv4.address }}:{{ etcd_peer_port }}
+{{ hostvars[host].etcd_hostname }}={{ etcd_peer_url_scheme }}://{{ hostvars[host].etcd_ip }}:{{ etcd_peer_port }}
{%- else -%}
-{{ host }}={{ etcd_peer_url_scheme }}://{{ etcd_host_int_map[host].interface.ipv4.address }}:{{ etcd_peer_port }},
+{{ hostvars[host].etcd_hostname }}={{ etcd_peer_url_scheme }}://{{ hostvars[host].etcd_ip }}:{{ etcd_peer_port }},
{%- endif -%}
{% endfor -%}
{% endmacro -%}
-{% if groups[etcd_peers_group] and groups[etcd_peers_group] | length > 1 %}
-ETCD_NAME={{ inventory_hostname }}
+{% if etcd_peers | default([]) | length > 1 %}
+ETCD_NAME={{ etcd_hostname }}
ETCD_LISTEN_PEER_URLS={{ etcd_listen_peer_urls }}
{% else %}
ETCD_NAME=default
@@ -23,7 +23,7 @@ ETCD_LISTEN_CLIENT_URLS={{ etcd_listen_client_urls }}
#ETCD_MAX_WALS=5
#ETCD_CORS=
-{% if groups[etcd_peers_group] and groups[etcd_peers_group] | length > 1 %}
+{% if etcd_peers | default([]) | length > 1 %}
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_initial_advertise_peer_urls }}
ETCD_INITIAL_CLUSTER={{ initial_cluster() }}
diff --git a/roles/etcd_ca/tasks/main.yml b/roles/etcd_ca/tasks/main.yml
index cf7bc00a3..e1bb9baed 100644
--- a/roles/etcd_ca/tasks/main.yml
+++ b/roles/etcd_ca/tasks/main.yml
@@ -1,7 +1,7 @@
---
- name: Install openssl
action: "{{ ansible_pkg_mgr }} name=openssl state=present"
- when: not openshift.common.is_atomic | bool
+ when: not etcd_is_atomic | bool
- file:
path: "{{ item }}"
diff --git a/roles/etcd_certificates/tasks/client.yml b/roles/etcd_certificates/tasks/client.yml
index 6aa4883e0..b497a46c0 100644
--- a/roles/etcd_certificates/tasks/client.yml
+++ b/roles/etcd_certificates/tasks/client.yml
@@ -4,7 +4,7 @@
path: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}"
state: directory
mode: 0700
- with_items: etcd_needing_client_certs
+ with_items: "{{ etcd_needing_client_certs | default([]) }}"
- name: Create the client csr
command: >
@@ -12,14 +12,14 @@
-config {{ etcd_openssl_conf }}
-out {{ item.etcd_cert_prefix }}client.csr
-reqexts {{ etcd_req_ext }} -batch -nodes
- -subj /CN={{ item.openshift.common.hostname }}
+ -subj /CN={{ item.etcd_hostname }}
args:
chdir: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}"
creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/'
~ item.etcd_cert_prefix ~ 'client.csr' }}"
environment:
- SAN: "IP:{{ item.openshift.common.ip }}"
- with_items: etcd_needing_client_certs
+ SAN: "IP:{{ item.etcd_ip }}"
+ with_items: "{{ etcd_needing_client_certs | default([]) }}"
- name: Sign and create the client crt
command: >
@@ -32,11 +32,11 @@
creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/'
~ item.etcd_cert_prefix ~ 'client.crt' }}"
environment:
- SAN: "IP:{{ item.openshift.common.ip }}"
- with_items: etcd_needing_client_certs
+ SAN: "IP:{{ item.etcd_ip }}"
+ with_items: "{{ etcd_needing_client_certs | default([]) }}"
- file:
src: "{{ etcd_ca_cert }}"
dest: "{{ etcd_generated_certs_dir}}/{{ item.etcd_cert_subdir }}/{{ item.etcd_cert_prefix }}ca.crt"
state: hard
- with_items: etcd_needing_client_certs
+ with_items: "{{ etcd_needing_client_certs | default([]) }}"
diff --git a/roles/etcd_certificates/tasks/main.yml b/roles/etcd_certificates/tasks/main.yml
index 3bb715943..17092ca58 100644
--- a/roles/etcd_certificates/tasks/main.yml
+++ b/roles/etcd_certificates/tasks/main.yml
@@ -1,6 +1,6 @@
---
- include: client.yml
- when: etcd_needing_client_certs is defined and etcd_needing_client_certs
+ when: etcd_needing_client_certs | default([]) | length > 0
- include: server.yml
- when: etcd_needing_server_certs is defined and etcd_needing_server_certs
+ when: etcd_needing_server_certs | default([]) | length > 0
diff --git a/roles/etcd_certificates/tasks/server.yml b/roles/etcd_certificates/tasks/server.yml
index 3499dcbef..934b8b805 100644
--- a/roles/etcd_certificates/tasks/server.yml
+++ b/roles/etcd_certificates/tasks/server.yml
@@ -4,7 +4,7 @@
path: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}"
state: directory
mode: 0700
- with_items: etcd_needing_server_certs
+ with_items: "{{ etcd_needing_server_certs | default([]) }}"
- name: Create the server csr
command: >
@@ -12,14 +12,14 @@
-config {{ etcd_openssl_conf }}
-out {{ item.etcd_cert_prefix }}server.csr
-reqexts {{ etcd_req_ext }} -batch -nodes
- -subj /CN={{ item.openshift.common.hostname }}
+ -subj /CN={{ item.etcd_hostname }}
args:
chdir: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}"
creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/'
~ item.etcd_cert_prefix ~ 'server.csr' }}"
environment:
- SAN: "IP:{{ etcd_host_int_map[item.inventory_hostname].interface.ipv4.address }}"
- with_items: etcd_needing_server_certs
+ SAN: "IP:{{ item.etcd_ip }}"
+ with_items: "{{ etcd_needing_server_certs | default([]) }}"
- name: Sign and create the server crt
command: >
@@ -32,8 +32,8 @@
creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/'
~ item.etcd_cert_prefix ~ 'server.crt' }}"
environment:
- SAN: "IP:{{ etcd_host_int_map[item.inventory_hostname].interface.ipv4.address }}"
- with_items: etcd_needing_server_certs
+ SAN: "IP:{{ item.etcd_ip }}"
+ with_items: "{{ etcd_needing_server_certs | default([]) }}"
- name: Create the peer csr
command: >
@@ -41,14 +41,14 @@
-config {{ etcd_openssl_conf }}
-out {{ item.etcd_cert_prefix }}peer.csr
-reqexts {{ etcd_req_ext }} -batch -nodes
- -subj /CN={{ item.openshift.common.hostname }}
+ -subj /CN={{ item.etcd_hostname }}
args:
chdir: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}"
creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/'
~ item.etcd_cert_prefix ~ 'peer.csr' }}"
environment:
- SAN: "IP:{{ etcd_host_int_map[item.inventory_hostname].interface.ipv4.address }}"
- with_items: etcd_needing_server_certs
+ SAN: "IP:{{ item.etcd_ip }}"
+ with_items: "{{ etcd_needing_server_certs | default([]) }}"
- name: Sign and create the peer crt
command: >
@@ -61,11 +61,11 @@
creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/'
~ item.etcd_cert_prefix ~ 'peer.crt' }}"
environment:
- SAN: "IP:{{ etcd_host_int_map[item.inventory_hostname].interface.ipv4.address }}"
- with_items: etcd_needing_server_certs
+ SAN: "IP:{{ item.etcd_ip }}"
+ with_items: "{{ etcd_needing_server_certs | default([]) }}"
- file:
src: "{{ etcd_ca_cert }}"
dest: "{{ etcd_generated_certs_dir}}/{{ item.etcd_cert_subdir }}/{{ item.etcd_cert_prefix }}ca.crt"
state: hard
- with_items: etcd_needing_server_certs
+ with_items: "{{ etcd_needing_server_certs | default([]) }}"
diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml
index 3af509448..1ff1d6ef8 100644
--- a/roles/etcd_common/defaults/main.yml
+++ b/roles/etcd_common/defaults/main.yml
@@ -1,6 +1,4 @@
---
-etcd_peers_group: oo_etcd_to_config
-
# etcd server vars
etcd_conf_dir: /etc/etcd
etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt"
@@ -28,3 +26,9 @@ etcd_ca_db: "{{ etcd_ca_dir }}/index.txt"
etcd_ca_serial: "{{ etcd_ca_dir }}/serial"
etcd_ca_crl_number: "{{ etcd_ca_dir }}/crlnumber"
etcd_ca_default_days: 365
+
+# etcd server & certificate vars
+etcd_hostname: "{{ inventory_hostname }}"
+etcd_ip: "{{ ansible_default_ipv4.address }}"
+etcd_is_atomic: False
+etcd_is_containerized: False
diff --git a/roles/etcd_common/tasks/main.yml b/roles/etcd_common/tasks/main.yml
deleted file mode 100644
index be75fdab2..000000000
--- a/roles/etcd_common/tasks/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- set_fact:
- etcd_host_int_map: "{{ lookup('template', '../templates/host_int_map.j2') | from_yaml }}"
-
-- fail:
- msg: "Interface {{ item.value.etcd_interface }} not found on host {{ item.key }}"
- when: "'etcd_interface' in item.value and 'interface' not in item.value"
- with_dict: etcd_host_int_map | default({})
-
-- fail:
- msg: IPv4 address not found for {{ item.value.interface.device }} on host {{ item.key }}
- when: "'ipv4' not in item.value.interface or 'address' not in item.value.interface.ipv4"
- with_dict: etcd_host_int_map | default({})
diff --git a/roles/etcd_common/templates/host_int_map.j2 b/roles/etcd_common/templates/host_int_map.j2
deleted file mode 100644
index 9c9c76413..000000000
--- a/roles/etcd_common/templates/host_int_map.j2
+++ /dev/null
@@ -1,13 +0,0 @@
----
-{% for host in groups[etcd_peers_group] %}
-{% set entry=hostvars[host] %}
-{{ entry.inventory_hostname }}:
-{% if 'etcd_interface' in entry %}
- etcd_interface: {{ entry.etcd_interface }}
-{% if entry.etcd_interface in entry.ansible_interfaces %}
- interface: {{ entry['ansible_' ~ entry.etcd_interface] | to_json }}
-{% endif %}
-{% else %}
- interface: {{ entry['ansible_' ~ entry.ansible_default_ipv4.interface] | to_json }}
-{% endif %}
-{% endfor %}
diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml
index f9b9ae7f1..981ea5c7a 100644
--- a/roles/flannel/handlers/main.yml
+++ b/roles/flannel/handlers/main.yml
@@ -1,8 +1,8 @@
---
- name: restart flanneld
- sudo: true
+ become: yes
service: name=flanneld state=restarted
- name: restart docker
- sudo: true
+ become: yes
service: name=docker state=restarted
diff --git a/roles/flannel/tasks/main.yml b/roles/flannel/tasks/main.yml
index aa27b674e..6b6dfb423 100644
--- a/roles/flannel/tasks/main.yml
+++ b/roles/flannel/tasks/main.yml
@@ -1,11 +1,11 @@
---
- name: Install flannel
- sudo: true
+ become: yes
action: "{{ ansible_pkg_mgr }} name=flannel state=present"
when: not openshift.common.is_containerized | bool
- name: Set flannel etcd url
- sudo: true
+ become: yes
lineinfile:
dest: /etc/sysconfig/flanneld
backrefs: yes
@@ -13,7 +13,7 @@
line: '\1{{ etcd_hosts|join(",") }}'
- name: Set flannel etcd key
- sudo: true
+ become: yes
lineinfile:
dest: /etc/sysconfig/flanneld
backrefs: yes
@@ -21,7 +21,7 @@
line: '\1{{ flannel_etcd_key }}'
- name: Set flannel options
- sudo: true
+ become: yes
lineinfile:
dest: /etc/sysconfig/flanneld
backrefs: yes
@@ -29,7 +29,7 @@
line: '\1--iface {{ flannel_interface }} --etcd-cafile={{ etcd_peer_ca_file }} --etcd-keyfile={{ etcd_peer_key_file }} --etcd-certfile={{ etcd_peer_cert_file }}'
- name: Enable flanneld
- sudo: true
+ become: yes
service:
name: flanneld
state: started
@@ -37,7 +37,7 @@
register: start_result
- name: Remove docker bridge ip
- sudo: true
+ become: yes
shell: ip a del `ip a show docker0 | grep "inet[[:space:]]" | awk '{print $2}'` dev docker0
notify:
- restart docker
diff --git a/roles/flannel_register/README.md b/roles/flannel_register/README.md
index ba7541ab1..623c4c7cf 100644
--- a/roles/flannel_register/README.md
+++ b/roles/flannel_register/README.md
@@ -14,7 +14,7 @@ Role Variables
| Name | Default value | Description |
|---------------------|----------------------------------------------------|-------------------------------------------------|
-| flannel_network | {{ openshift.master.portal_net }} or 172.16.1.1/16 | interface to use for inter-host communication |
+| flannel_network | {{ openshift.common.portal_net }} or 172.16.1.1/16 | interface to use for inter-host communication |
| flannel_min_network | {{ min_network }} or 172.16.5.0 | beginning of IP range for the subnet allocation |
| flannel_subnet_len | /openshift.com/network | size of the subnet allocated to each host |
| flannel_etcd_key | /openshift.com/network | etcd prefix |
diff --git a/roles/flannel_register/defaults/main.yaml b/roles/flannel_register/defaults/main.yaml
index 269d1a17c..b1279aa88 100644
--- a/roles/flannel_register/defaults/main.yaml
+++ b/roles/flannel_register/defaults/main.yaml
@@ -1,5 +1,5 @@
---
-flannel_network: "{{ openshift.master.portal_net | default('172.30.0.0/16', true) }}"
+flannel_network: "{{ openshift.common.portal_net | default('172.30.0.0/16', true) }}"
flannel_min_network: 172.30.5.0
flannel_subnet_len: 24
flannel_etcd_key: /openshift.com/network
diff --git a/roles/flannel_register/tasks/main.yml b/roles/flannel_register/tasks/main.yml
index 1629157c8..845b7ef40 100644
--- a/roles/flannel_register/tasks/main.yml
+++ b/roles/flannel_register/tasks/main.yml
@@ -1,14 +1,14 @@
---
- name: Assures /etc/flannel dir exists
- sudo: true
+ become: yes
file: path=/etc/flannel state=directory
- name: Generate etcd configuration for etcd
- sudo: true
+ become: yes
template:
src: "flannel-config.json"
dest: "/etc/flannel/config.json"
- name: Insert flannel configuration into etcd
- sudo: true
+ become: yes
command: 'curl -L --cacert "{{ etcd_peer_ca_file }}" --cert "{{ etcd_peer_cert_file }}" --key "{{ etcd_peer_key_file }}" "{{ etcd_hosts[0] }}/v2/keys{{ flannel_etcd_key }}/config" -XPUT --data-urlencode value@/etc/flannel/config.json'
diff --git a/roles/kube_nfs_volumes/README.md b/roles/kube_nfs_volumes/README.md
index 1520f79b2..dd91ad8b1 100644
--- a/roles/kube_nfs_volumes/README.md
+++ b/roles/kube_nfs_volumes/README.md
@@ -94,7 +94,7 @@ partitions.
* Create an ansible playbook, say `setupnfs.yaml`:
```
- hosts: nfsservers
- sudo: yes
+ become: yes
roles:
- role: kube_nfs_volumes
disks: "/dev/sdb,/dev/sdc"
diff --git a/roles/nuage_master/files/serviceaccount.sh b/roles/nuage_master/files/serviceaccount.sh
deleted file mode 100644
index f6fdb8a8d..000000000
--- a/roles/nuage_master/files/serviceaccount.sh
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/bin/bash
-# Parse CLI options
-for i in "$@"; do
- case $i in
- --master-cert-dir=*)
- MASTER_DIR="${i#*=}"
- CA_CERT=${MASTER_DIR}/ca.crt
- CA_KEY=${MASTER_DIR}/ca.key
- CA_SERIAL=${MASTER_DIR}/ca.serial.txt
- ADMIN_FILE=${MASTER_DIR}/admin.kubeconfig
- ;;
- --server=*)
- SERVER="${i#*=}"
- ;;
- --output-cert-dir=*)
- OUTDIR="${i#*=}"
- CONFIG_FILE=${OUTDIR}/nuage.kubeconfig
- ;;
- esac
-done
-
-# If any are missing, print the usage and exit
-if [ -z $SERVER ] || [ -z $OUTDIR ] || [ -z $MASTER_DIR ]; then
- echo "Invalid syntax: $@"
- echo "Usage:"
- echo " $0 --server=<address>:<port> --output-cert-dir=/path/to/output/dir/ --master-cert-dir=/path/to/master/"
- echo "--master-cert-dir: Directory where the master's configuration is held"
- echo "--server: Address of Kubernetes API server (default port is 8443)"
- echo "--output-cert-dir: Directory to put artifacts in"
- echo ""
- echo "All options are required"
- exit 1
-fi
-
-# Login as admin so that we can create the service account
-oc login -u system:admin --config=$ADMIN_FILE || exit 1
-oc project default --config=$ADMIN_FILE
-
-ACCOUNT_CONFIG='
-{
- "apiVersion": "v1",
- "kind": "ServiceAccount",
- "metadata": {
- "name": "nuage"
- }
-}
-'
-
-# Create the account with the included info
-echo $ACCOUNT_CONFIG|oc create --config=$ADMIN_FILE -f -
-
-# Add the cluser-reader role, which allows this service account read access to
-# everything in the cluster except secrets
-oadm policy add-cluster-role-to-user cluster-reader system:serviceaccounts:default:nuage --config=$ADMIN_FILE
-
-# Generate certificates and a kubeconfig for the service account
-oadm create-api-client-config --certificate-authority=${CA_CERT} --client-dir=${OUTDIR} --signer-cert=${CA_CERT} --signer-key=${CA_KEY} --signer-serial=${CA_SERIAL} --user=system:serviceaccounts:default:nuage --master=${SERVER} --public-master=${SERVER} --basename='nuage'
-
-# Verify the finalized kubeconfig
-if ! [ $(oc whoami --config=$CONFIG_FILE) == 'system:serviceaccounts:default:nuage' ]; then
- echo "Service account creation failed!"
- exit 1
-fi
diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml
index 5d133cf16..56224cf82 100644
--- a/roles/nuage_master/handlers/main.yaml
+++ b/roles/nuage_master/handlers/main.yaml
@@ -1,6 +1,6 @@
---
- name: restart nuage-openshift-monitor
- sudo: true
+ become: yes
service: name=nuage-openshift-monitor state=restarted
- name: restart master
diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml
index abeee3d71..b8eaede3b 100644
--- a/roles/nuage_master/tasks/main.yaml
+++ b/roles/nuage_master/tasks/main.yaml
@@ -1,22 +1,20 @@
---
- name: Create directory /usr/share/nuage-openshift-monitor
- sudo: true
+ become: yes
file: path=/usr/share/nuage-openshift-monitor state=directory
- name: Create the log directory
- sudo: true
+ become: yes
file: path={{ nuage_mon_rest_server_logdir }} state=directory
- name: Install Nuage Openshift Monitor
- sudo: true
+ become: yes
yum: name={{ nuage_openshift_rpm }} state=present
-- name: Run the service account creation script
- sudo: true
- script: serviceaccount.sh --server={{ openshift.master.api_url }} --output-cert-dir={{ cert_output_dir }} --master-cert-dir={{ openshift_master_config_dir }}
+- include: serviceaccount.yml
- name: Download the certs and keys
- sudo: true
+ become: yes
fetch: src={{ cert_output_dir }}/{{ item }} dest=/tmp/{{ item }} flat=yes
with_items:
- ca.crt
@@ -27,7 +25,7 @@
- include: certificates.yml
- name: Create nuage-openshift-monitor.yaml
- sudo: true
+ become: yes
template: src=nuage-openshift-monitor.j2 dest=/usr/share/nuage-openshift-monitor/nuage-openshift-monitor.yaml owner=root mode=0644
notify:
- restart master
diff --git a/roles/nuage_master/tasks/serviceaccount.yml b/roles/nuage_master/tasks/serviceaccount.yml
new file mode 100644
index 000000000..5b4af5824
--- /dev/null
+++ b/roles/nuage_master/tasks/serviceaccount.yml
@@ -0,0 +1,51 @@
+---
+- name: Create temporary directory for admin kubeconfig
+ command: mktemp -u /tmp/openshift-ansible-XXXXXXX.kubeconfig
+ register: nuage_tmp_conf_mktemp
+ changed_when: False
+
+- set_fact:
+ nuage_tmp_conf: "{{ nuage_tmp_conf_mktemp.stdout }}"
+
+- name: Copy Configuration to temporary conf
+ command: >
+ cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{nuage_tmp_conf}}
+ changed_when: false
+
+- name: Create Admin Service Account
+ shell: >
+ echo {{ nuage_service_account_config | to_json | quote }} |
+ {{ openshift.common.client_binary }} create
+ -n default
+ --config={{nuage_tmp_conf}}
+ -f -
+ register: osnuage_create_service_account
+ failed_when: "'already exists' not in osnuage_create_service_account.stderr and osnuage_create_service_account.rc != 0"
+ changed_when: osnuage_create_service_account.rc == 0
+
+- name: Configure role/user permissions
+ command: >
+ {{ openshift.common.admin_binary }} {{item}}
+ --config={{nuage_tmp_conf}}
+ with_items: "{{nuage_tasks}}"
+ register: osnuage_perm_task
+ failed_when: "'already exists' not in osnuage_perm_task.stderr and osnuage_perm_task.rc != 0"
+ changed_when: osnuage_perm_task.rc == 0
+
+- name: Generate the node client config
+ command: >
+ {{ openshift.common.admin_binary }} create-api-client-config
+ --certificate-authority={{ openshift_master_ca_cert }}
+ --client-dir={{ cert_output_dir }}
+ --master={{ openshift.master.api_url }}
+ --public-master={{ openshift.master.api_url }}
+ --signer-cert={{ openshift_master_ca_cert }}
+ --signer-key={{ openshift_master_ca_key }}
+ --signer-serial={{ openshift_master_ca_serial }}
+ --basename='nuage'
+ --user={{ nuage_service_account }}
+
+- name: Clean temporary configuration file
+ command: >
+ rm -f {{nuage_tmp_conf}}
+ changed_when: false
diff --git a/roles/nuage_master/templates/nuage-openshift-monitor.j2 b/roles/nuage_master/templates/nuage-openshift-monitor.j2
index 7228e646b..075de9d9e 100644
--- a/roles/nuage_master/templates/nuage-openshift-monitor.j2
+++ b/roles/nuage_master/templates/nuage-openshift-monitor.j2
@@ -15,6 +15,12 @@ vspVersion: {{ vsp_version }}
enterpriseName: {{ enterprise }}
# Name of the domain in which pods will reside
domainName: {{ domain }}
+# CSP admin user's password
+cspAdminPassword: {{ nuage_master_cspadminpasswd }}
+# Enterprise admin user name
+enterpriseAdminUser: {{ nuage_master_adminusername }}
+# Enterprise admin password
+enterpriseAdminPassword: {{ nuage_master_adminuserpasswd }}
# Location where logs should be saved
log_dir: {{ nuage_mon_rest_server_logdir }}
# Monitor rest server paramters
diff --git a/roles/nuage_master/vars/main.yaml b/roles/nuage_master/vars/main.yaml
index ec4562c77..d3536eb33 100644
--- a/roles/nuage_master/vars/main.yaml
+++ b/roles/nuage_master/vars/main.yaml
@@ -1,4 +1,7 @@
openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
+openshift_master_ca_cert: "{{ openshift_master_config_dir }}/ca.crt"
+openshift_master_ca_key: "{{ openshift_master_config_dir }}/ca.key"
+openshift_master_ca_serial: "{{ openshift_master_config_dir }}/ca.serial.txt"
ca_cert: "{{ openshift_master_config_dir }}/ca.crt"
admin_config: "{{ openshift.common.config_base }}/master/admin.kubeconfig"
cert_output_dir: /usr/share/nuage-openshift-monitor
@@ -15,3 +18,17 @@ nuage_ca_master_rest_server_key: "{{ nuage_mon_rest_server_crt_dir }}/nuageMonSe
nuage_ca_master_rest_server_crt: "{{ nuage_mon_rest_server_crt_dir }}/nuageMonServer.crt"
nuage_master_crt_dir : /usr/share/nuage-openshift-monitor
+nuage_service_account: system:serviceaccount:default:nuage
+
+nuage_service_account_config:
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: nuage
+
+nuage_tasks:
+ - policy add-cluster-role-to-user cluster-reader {{ nuage_service_account }}
+
+nuage_master_cspadminpasswd: ''
+nuage_master_adminusername: 'admin'
+nuage_master_adminuserpasswd: 'admin'
diff --git a/roles/nuage_node/handlers/main.yaml b/roles/nuage_node/handlers/main.yaml
index 25482a845..5f2b97ae2 100644
--- a/roles/nuage_node/handlers/main.yaml
+++ b/roles/nuage_node/handlers/main.yaml
@@ -1,8 +1,8 @@
---
- name: restart vrs
- sudo: true
+ become: yes
service: name=openvswitch state=restarted
- name: restart node
- sudo: true
+ become: yes
service: name={{ openshift.common.service_type }}-node state=restarted
diff --git a/roles/nuage_node/tasks/main.yaml b/roles/nuage_node/tasks/main.yaml
index d7dd53802..1146573d3 100644
--- a/roles/nuage_node/tasks/main.yaml
+++ b/roles/nuage_node/tasks/main.yaml
@@ -1,27 +1,27 @@
---
- name: Install Nuage VRS
- sudo: true
+ become: yes
yum: name={{ vrs_rpm }} state=present
- name: Set the uplink interface
- sudo: true
+ become: yes
lineinfile: dest={{ vrs_config }} regexp=^NETWORK_UPLINK_INTF line='NETWORK_UPLINK_INTF={{ uplink_interface }}'
- name: Set the Active Controller
- sudo: true
+ become: yes
lineinfile: dest={{ vrs_config }} regexp=^ACTIVE_CONTROLLER line='ACTIVE_CONTROLLER={{ vsc_active_ip }}'
- name: Set the Standby Controller
- sudo: true
+ become: yes
lineinfile: dest={{ vrs_config }} regexp=^STANDBY_CONTROLLER line='STANDBY_CONTROLLER={{ vsc_standby_ip }}'
when: vsc_standby_ip is defined
- name: Install plugin rpm
- sudo: true
+ become: yes
yum: name={{ plugin_rpm }} state=present
- name: Copy the certificates and keys
- sudo: true
+ become: yes
copy: src="/tmp/{{ item }}" dest="{{ vsp_openshift_dir }}/{{ item }}"
with_items:
- ca.crt
@@ -32,7 +32,7 @@
- include: certificates.yml
- name: Set the vsp-openshift.yaml
- sudo: true
+ become: yes
template: src=vsp-openshift.j2 dest={{ vsp_openshift_yaml }} owner=root mode=0644
notify:
- restart vrs
diff --git a/roles/nuage_node/vars/main.yaml b/roles/nuage_node/vars/main.yaml
index a4d7052a7..86486259f 100644
--- a/roles/nuage_node/vars/main.yaml
+++ b/roles/nuage_node/vars/main.yaml
@@ -8,7 +8,7 @@ ca_cert: "{{ vsp_openshift_dir }}/ca.crt"
api_server: "{{ openshift_node_master_api_url }}"
nuage_mon_rest_server_port: "{{ nuage_openshift_monitor_rest_server_port | default('9443') }}"
nuage_mon_rest_server_url: "https://{{ openshift_master_cluster_hostname }}:{{ nuage_mon_rest_server_port }}"
-docker_bridge: "docker0"
+docker_bridge: "{{ nuage_docker_bridge | default('docker0') }}"
rest_client_cert: "{{ vsp_openshift_dir }}/nuageMonClient.crt"
rest_client_key: "{{ vsp_openshift_dir }}/nuageMonClient.key"
rest_server_ca_cert: "{{ vsp_openshift_dir }}/nuageMonCA.crt"
diff --git a/roles/openshift_builddefaults/meta/main.yml b/roles/openshift_builddefaults/meta/main.yml
new file mode 100644
index 000000000..422d08400
--- /dev/null
+++ b/roles/openshift_builddefaults/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: Scott Dodson
+ description: OpenShift Build Defaults configuration
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.9
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: openshift_facts
diff --git a/roles/openshift_builddefaults/tasks/main.yml b/roles/openshift_builddefaults/tasks/main.yml
new file mode 100644
index 000000000..c82aebe72
--- /dev/null
+++ b/roles/openshift_builddefaults/tasks/main.yml
@@ -0,0 +1,24 @@
+---
+- name: Set builddefaults
+ openshift_facts:
+ role: builddefaults
+ # TODO: add ability to define builddefaults env vars sort of like this
+ # may need to move the config generation to a filter however.
+ # openshift_env: "{{ hostvars[inventory_hostname]
+ # | oo_merge_dicts(hostvars)
+ # | oo_openshift_env }}"
+ # openshift_env_structures:
+ # - 'openshift.builddefaults.env.*'
+ local_facts:
+ http_proxy: "{{ openshift_builddefaults_http_proxy | default(None) }}"
+ https_proxy: "{{ openshift_builddefaults_https_proxy | default(None) }}"
+ no_proxy: "{{ openshift_builddefaults_no_proxy | default(None) }}"
+ git_http_proxy: "{{ openshift_builddefaults_git_http_proxy | default(None) }}"
+ git_https_proxy: "{{ openshift_builddefaults_git_https_proxy | default(None) }}"
+
+- name: Set builddefaults config structure
+ openshift_facts:
+ role: builddefaults
+ local_facts:
+ config: "{{ openshift_builddefaults_json | default(builddefaults_yaml) }}"
+
diff --git a/roles/openshift_builddefaults/vars/main.yml b/roles/openshift_builddefaults/vars/main.yml
new file mode 100644
index 000000000..9727c73a5
--- /dev/null
+++ b/roles/openshift_builddefaults/vars/main.yml
@@ -0,0 +1,15 @@
+---
+builddefaults_yaml:
+ BuildDefaults:
+ configuration:
+ apiVersion: v1
+ kind: BuildDefaultsConfig
+ gitHTTPProxy: "{{ openshift.builddefaults.git_http_proxy | default('', true) }}"
+ gitHTTPSProxy: "{{ openshift.builddefaults.git_https_proxy | default('', true) }}"
+ env:
+ - name: HTTP_PROXY
+ value: "{{ openshift.builddefaults.http_proxy | default('', true) }}"
+ - name: HTTPS_PROXY
+ value: "{{ openshift.builddefaults.https_proxy | default('', true) }}"
+ - name: NO_PROXY
+ value: "{{ openshift.builddefaults.no_proxy | default('', true) | join(',') }}"
diff --git a/roles/openshift_cloud_provider/templates/openstack.conf.j2 b/roles/openshift_cloud_provider/templates/openstack.conf.j2
index 1b70edc16..8a06b3a08 100644
--- a/roles/openshift_cloud_provider/templates/openstack.conf.j2
+++ b/roles/openshift_cloud_provider/templates/openstack.conf.j2
@@ -11,7 +11,6 @@ tenant-name = {{ openshift.cloudprovider.openstack.tenant_name }}
region = {{ openshift.cloudprovider.openstack.region }}
{% endif %}
{% if 'lb_subnet_id' in openshift.cloudprovider.openstack %}
-+
-+[LoadBalancer]
-+subnet-id = {{ openshift.cloudprovider.openstack.lb_subnet_id }}
-+{% endif %}
+[LoadBalancer]
+subnet-id = {{ openshift.cloudprovider.openstack.lb_subnet_id }}
+{% endif %}
diff --git a/roles/openshift_cluster_metrics/tasks/main.yml b/roles/openshift_cluster_metrics/tasks/main.yml
index d45f62eca..1fc8a074a 100644
--- a/roles/openshift_cluster_metrics/tasks/main.yml
+++ b/roles/openshift_cluster_metrics/tasks/main.yml
@@ -28,7 +28,6 @@
cluster-reader
system:serviceaccount:default:heapster
register: oex_cluster_header_role
- register: oex_cluster_header_role
failed_when: "'already exists' not in oex_cluster_header_role.stderr and oex_cluster_header_role.rc != 0"
changed_when: false
diff --git a/roles/openshift_common/README.md b/roles/openshift_common/README.md
index 87306d4a6..2a271854b 100644
--- a/roles/openshift_common/README.md
+++ b/roles/openshift_common/README.md
@@ -20,6 +20,7 @@ Role Variables
| openshift_ip | UNDEF | Internal IP address to use for this host |
| openshift_public_hostname | UNDEF | Public hostname to use for this host |
| openshift_public_ip | UNDEF | Public IP address to use for this host |
+| openshift_portal_net | UNDEF | Service IP CIDR |
Dependencies
------------
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
index eda43b9f8..4ec255dbc 100644
--- a/roles/openshift_common/tasks/main.yml
+++ b/roles/openshift_common/tasks/main.yml
@@ -27,6 +27,7 @@
use_nuage: "{{ openshift_use_nuage | default(None) }}"
use_manageiq: "{{ openshift_use_manageiq | default(None) }}"
data_dir: "{{ openshift_data_dir | default(None) }}"
+ use_dnsmasq: "{{ openshift_use_dnsmasq | default(None) }}"
# Using oo_image_tag_to_rpm_version here is a workaround for how
# openshift_version is set. That value is computed based on either RPM
diff --git a/roles/openshift_docker/tasks/main.yml b/roles/openshift_docker/tasks/main.yml
index 23613b762..10f47f9b2 100644
--- a/roles/openshift_docker/tasks/main.yml
+++ b/roles/openshift_docker/tasks/main.yml
@@ -4,12 +4,13 @@
# openshift_image_tag correctly for upgrades.
- name: Set version when containerized
command: >
- docker run --rm {{ openshift.common.cli_image }}:latest version
+ docker run --rm {{ openshift.common.cli_image }} version
register: cli_image_version
when: openshift.common.is_containerized is defined and openshift.common.is_containerized | bool and openshift_image_tag is not defined
- set_fact:
- l_image_tag: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0] }}"
+ l_image_tag: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0:2] | join('-') if openshift.common.deployment_type == 'origin' else
+ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0] }}"
when: openshift.common.is_containerized is defined and openshift.common.is_containerized | bool and openshift_image_tag is not defined
- set_fact:
@@ -23,6 +24,6 @@
with_items:
- role: docker
local_facts:
- openshift_image_tag: "{{ l_image_tag }}"
- openshift_version: "{{ l_image_tag if l_image_tag is defined else '' | oo_image_tag_to_rpm_version }}"
+ openshift_image_tag: "{{ l_image_tag | default(None) }}"
+ openshift_version: "{{ l_image_tag.split('-')[0] if l_image_tag is defined else '' | oo_image_tag_to_rpm_version }}"
when: openshift.common.is_containerized is defined and openshift.common.is_containerized | bool
diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml
index 89393168b..3acd2bba8 100644
--- a/roles/openshift_docker_facts/tasks/main.yml
+++ b/roles/openshift_docker_facts/tasks/main.yml
@@ -27,6 +27,9 @@
docker_log_options: "{{ openshift.docker.log_options | default(omit) }}"
docker_push_dockerhub: "{{ openshift.docker.disable_push_dockerhub
| default(omit) }}"
+ docker_http_proxy: "{{ openshift.common.http_proxy | default(omit) }}"
+ docker_https_proxy: "{{ openshift.common.https_proxy | default(omit) }}"
+ docker_no_proxy: "{{ openshift.common.no_proxy | default(omit) }}"
- set_fact:
docker_options: >
diff --git a/roles/openshift_etcd/meta/main.yml b/roles/openshift_etcd/meta/main.yml
index 5e5f96d44..7cc548f69 100644
--- a/roles/openshift_etcd/meta/main.yml
+++ b/roles/openshift_etcd/meta/main.yml
@@ -12,7 +12,7 @@ galaxy_info:
categories:
- cloud
dependencies:
-- role: openshift_facts
+- role: openshift_etcd_facts
- role: openshift_docker
when: openshift.common.is_containerized | bool
- role: etcd
diff --git a/roles/openshift_etcd_certificates/meta/main.yml b/roles/openshift_etcd_certificates/meta/main.yml
new file mode 100644
index 000000000..2725fdb51
--- /dev/null
+++ b/roles/openshift_etcd_certificates/meta/main.yml
@@ -0,0 +1,16 @@
+---
+galaxy_info:
+ author: Andrew Butcher
+ description: OpenShift etcd Certificates
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.9
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: openshift_etcd_facts
+- role: etcd_certificates
diff --git a/roles/openshift_etcd_facts/meta/main.yml b/roles/openshift_etcd_facts/meta/main.yml
new file mode 100644
index 000000000..925aa9f92
--- /dev/null
+++ b/roles/openshift_etcd_facts/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: Andrew Butcher
+ description: OpenShift etcd Facts
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.9
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: openshift_facts
diff --git a/roles/openshift_etcd_facts/vars/main.yml b/roles/openshift_etcd_facts/vars/main.yml
new file mode 100644
index 000000000..6f3894565
--- /dev/null
+++ b/roles/openshift_etcd_facts/vars/main.yml
@@ -0,0 +1,5 @@
+---
+etcd_is_containerized: "{{ openshift.common.is_containerized }}"
+etcd_is_atomic: "{{ openshift.common.is_atomic }}"
+etcd_hostname: "{{ openshift.common.hostname }}"
+etcd_ip: "{{ openshift.common.ip }}"
diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh
index af388f6cf..7d81ac927 100755
--- a/roles/openshift_examples/examples-sync.sh
+++ b/roles/openshift_examples/examples-sync.sh
@@ -5,7 +5,7 @@
#
# This script should be run from openshift-ansible/roles/openshift_examples
-XPAAS_VERSION=ose-v1.3.0
+XPAAS_VERSION=ose-v1.3.0-1
ORIGIN_VERSION=${1:-v1.2}
EXAMPLES_BASE=$(pwd)/files/examples/${ORIGIN_VERSION}
find ${EXAMPLES_BASE} -name '*.json' -delete
@@ -40,7 +40,7 @@ find application-templates-${XPAAS_VERSION}/ -name '*.json' ! -wholename '*secre
wget https://raw.githubusercontent.com/jboss-fuse/application-templates/master/fis-image-streams.json -O ${EXAMPLES_BASE}/xpaas-streams/fis-image-streams.json
wget https://raw.githubusercontent.com/openshift/origin-metrics/master/metrics.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/origin/metrics-deployer.yaml
-cp ${EXAMPLES_BASE}/infrastructure-templates/origin/metrics-*.yaml ${EXAMPLES_BASE}/infrastructure-templates/enterprise/
+wget https://raw.githubusercontent.com/openshift/origin-metrics/enterprise/metrics.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/enterprise/metrics-deployer.yaml
wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/master/deployment/deployer.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/origin/logging-deployer.yaml
wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/enterprise/deployment/deployer.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/enterprise/logging-deployer.yaml
diff --git a/roles/openshift_examples/files/examples/v1.2/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v1.2/db-templates/mongodb-persistent-template.json
index 672eaaa09..70c906f8e 100644
--- a/roles/openshift_examples/files/examples/v1.2/db-templates/mongodb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.2/db-templates/mongodb-persistent-template.json
@@ -5,7 +5,7 @@
"name": "mongodb-persistent",
"creationTimestamp": null,
"annotations": {
- "description": "MongoDB database service, with persistent storage. Scaling to more than one replica is not supported",
+ "description": "MongoDB database service, with persistent storage. Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mongodb",
"tags": "database,mongodb"
}
@@ -232,7 +232,7 @@
"name": "VOLUME_CAPACITY",
"displayName": "Volume Capacity",
"description": "Volume space available for data, e.g. 512Mi, 2Gi.",
- "value": "512Mi",
+ "value": "1Gi",
"required": true
}
],
diff --git a/roles/openshift_examples/files/examples/v1.2/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v1.2/db-templates/mysql-persistent-template.json
index d94262dde..e39ee57c8 100644
--- a/roles/openshift_examples/files/examples/v1.2/db-templates/mysql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.2/db-templates/mysql-persistent-template.json
@@ -5,7 +5,7 @@
"name": "mysql-persistent",
"creationTimestamp": null,
"annotations": {
- "description": "MySQL database service, with persistent storage. Scaling to more than one replica is not supported",
+ "description": "MySQL database service, with persistent storage. Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mysql-database",
"tags": "database,mysql"
}
@@ -221,7 +221,7 @@
"name": "VOLUME_CAPACITY",
"displayName": "Volume Capacity",
"description": "Volume space available for data, e.g. 512Mi, 2Gi.",
- "value": "512Mi",
+ "value": "1Gi",
"required": true
}
],
diff --git a/roles/openshift_examples/files/examples/v1.2/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v1.2/db-templates/postgresql-persistent-template.json
index 5713411ad..347e01de3 100644
--- a/roles/openshift_examples/files/examples/v1.2/db-templates/postgresql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.2/db-templates/postgresql-persistent-template.json
@@ -5,7 +5,7 @@
"name": "postgresql-persistent",
"creationTimestamp": null,
"annotations": {
- "description": "PostgreSQL database service, with persistent storage. Scaling to more than one replica is not supported",
+ "description": "PostgreSQL database service, with persistent storage. Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-postgresql",
"tags": "database,postgresql"
}
@@ -220,7 +220,7 @@
"name": "VOLUME_CAPACITY",
"displayName": "Volume Capacity",
"description": "Volume space available for data, e.g. 512Mi, 2Gi.",
- "value": "512Mi",
+ "value": "1Gi",
"required": true
}
],
diff --git a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/logging-deployer.yaml b/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/logging-deployer.yaml
index c6cc98ce3..848e93c5f 100644
--- a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/logging-deployer.yaml
+++ b/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/logging-deployer.yaml
@@ -69,6 +69,8 @@ objects:
value: ${ES_OPS_RECOVER_EXPECTED_NODES}
- name: ES_OPS_RECOVER_AFTER_TIME
value: ${ES_OPS_RECOVER_AFTER_TIME}
+ - name: MODE
+ value: ${MODE}
dnsPolicy: ClusterFirst
restartPolicy: Never
serviceAccount: logging-deployer
@@ -80,11 +82,11 @@ objects:
secretName: logging-deployer
parameters:
-
- description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"'
+ description: 'Specify image prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployment:3.2.0", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
- value: "registry.access.redhat.com/openshift3/"
+ value: registry.access.redhat.com/openshift3/
-
- description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"'
+ description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployment:3.2.0", set version "3.2.0"'
name: IMAGE_VERSION
value: "3.2.0"
-
@@ -148,4 +150,7 @@ parameters:
description: "Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
name: ES_OPS_RECOVER_AFTER_TIME
value: "5m"
-
+-
+ description: "The mode that the deployer runs in."
+ name: MODE
+ value: "install"
diff --git a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/logging-deployer.yaml b/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/logging-deployer.yaml
index 9257b1f28..cc33f77d8 100644
--- a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/logging-deployer.yaml
+++ b/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/logging-deployer.yaml
@@ -1,156 +1,290 @@
apiVersion: "v1"
-kind: "Template"
-metadata:
- name: logging-deployer-template
- annotations:
- description: "Template for deploying everything needed for aggregated logging. Requires cluster-admin 'logging-deployer' service account and 'logging-deployer' secret."
- tags: "infrastructure"
-labels:
- logging-infra: deployer
- provider: openshift
- component: deployer
-objects:
+kind: "List"
+items:
-
- apiVersion: v1
- kind: Pod
+ apiVersion: "v1"
+ kind: "Template"
metadata:
- generateName: logging-deployer-
- spec:
- containers:
- - image: ${IMAGE_PREFIX}logging-deployment:${IMAGE_VERSION}
- imagePullPolicy: Always
- name: deployer
- volumeMounts:
- - name: secret
- mountPath: /secret
- readOnly: true
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: ENABLE_OPS_CLUSTER
- value: ${ENABLE_OPS_CLUSTER}
- - name: KIBANA_HOSTNAME
- value: ${KIBANA_HOSTNAME}
- - name: KIBANA_OPS_HOSTNAME
- value: ${KIBANA_OPS_HOSTNAME}
- - name: PUBLIC_MASTER_URL
- value: ${PUBLIC_MASTER_URL}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: ES_INSTANCE_RAM
- value: ${ES_INSTANCE_RAM}
- - name: ES_CLUSTER_SIZE
- value: ${ES_CLUSTER_SIZE}
- - name: ES_NODE_QUORUM
- value: ${ES_NODE_QUORUM}
- - name: ES_RECOVER_AFTER_NODES
- value: ${ES_RECOVER_AFTER_NODES}
- - name: ES_RECOVER_EXPECTED_NODES
- value: ${ES_RECOVER_EXPECTED_NODES}
- - name: ES_RECOVER_AFTER_TIME
- value: ${ES_RECOVER_AFTER_TIME}
- - name: ES_OPS_INSTANCE_RAM
- value: ${ES_OPS_INSTANCE_RAM}
- - name: ES_OPS_CLUSTER_SIZE
- value: ${ES_OPS_CLUSTER_SIZE}
- - name: ES_OPS_NODE_QUORUM
- value: ${ES_OPS_NODE_QUORUM}
- - name: ES_OPS_RECOVER_AFTER_NODES
- value: ${ES_OPS_RECOVER_AFTER_NODES}
- - name: ES_OPS_RECOVER_EXPECTED_NODES
- value: ${ES_OPS_RECOVER_EXPECTED_NODES}
- - name: ES_OPS_RECOVER_AFTER_TIME
- value: ${ES_OPS_RECOVER_AFTER_TIME}
- - name: FLUENTD_NODESELECTOR
- value: ${FLUENTD_NODESELECTOR}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: logging-deployer
- volumes:
- - name: empty
- emptyDir: {}
- - name: secret
- secret:
- secretName: logging-deployer
-parameters:
--
- description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"'
- name: IMAGE_PREFIX
- value: "docker.io/openshift/origin-"
--
- description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"'
- name: IMAGE_VERSION
- value: "latest"
--
- description: "If true, set up to use a second ES cluster for ops logs."
- name: ENABLE_OPS_CLUSTER
- value: "false"
--
- description: "External hostname where clients will reach kibana"
- name: KIBANA_HOSTNAME
- required: true
--
- description: "External hostname at which admins will visit the ops Kibana."
- name: KIBANA_OPS_HOSTNAME
- value: kibana-ops.example.com
--
- description: "External URL for the master, for OAuth purposes"
- name: PUBLIC_MASTER_URL
- required: true
--
- description: "Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc.cluster.local"
--
- description: "Amount of RAM to reserve per ElasticSearch instance."
- name: ES_INSTANCE_RAM
- value: "8G"
--
- description: "How many instances of ElasticSearch to deploy."
- name: ES_CLUSTER_SIZE
- required: true
--
- description: "Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_NODE_QUORUM
+ name: logging-deployer-account-template
+ annotations:
+ description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin."
+ tags: "infrastructure"
+ objects:
+ - apiVersion: v1
+ kind: ServiceAccount
+ name: logging-deployer
+ metadata:
+ name: logging-deployer
+ labels:
+ logging-infra: deployer
+ provider: openshift
+ component: deployer
+ secrets:
+ - name: logging-deployer
+ -
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: aggregated-logging-kibana
+ -
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: aggregated-logging-elasticsearch
+ -
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: aggregated-logging-fluentd
+ -
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: aggregated-logging-curator
+ - apiVersion: v1
+ kind: ClusterRole
+ metadata:
+ name: oauth-editor
+ rules:
+ - resources:
+ - oauthclients
+ verbs:
+ - create
+ - delete
+ - apiVersion: v1
+ kind: ClusterRole
+ metadata:
+ name: daemonset-admin
+ rules:
+ - resources:
+ - daemonsets
+ apiGroups:
+ - extensions
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - delete
+ - update
-
- description: "Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
- name: ES_RECOVER_AFTER_NODES
--
- description: "Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
- name: ES_RECOVER_EXPECTED_NODES
--
- description: "Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
- name: ES_RECOVER_AFTER_TIME
- value: "5m"
--
- description: "Amount of RAM to reserve per ops ElasticSearch instance."
- name: ES_OPS_INSTANCE_RAM
- value: "8G"
--
- description: "How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
- name: ES_OPS_CLUSTER_SIZE
--
- description: "Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_OPS_NODE_QUORUM
--
- description: "Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_AFTER_NODES
--
- description: "Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_EXPECTED_NODES
--
- description: "Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
- name: ES_OPS_RECOVER_AFTER_TIME
- value: "5m"
--
- description: "The nodeSelector used for the Fluentd DaemonSet."
- name: FLUENTD_NODESELECTOR
- value: "logging-infra-fluentd=true"
+ apiVersion: "v1"
+ kind: "Template"
+ metadata:
+ name: logging-deployer-template
+ annotations:
+ description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account and 'logging-deployer' secret."
+ tags: "infrastructure"
+ labels:
+ logging-infra: deployer
+ provider: openshift
+ component: deployer
+ objects:
+ -
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ generateName: logging-deployer-
+ spec:
+ containers:
+ - image: ${IMAGE_PREFIX}logging-deployment:${IMAGE_VERSION}
+ imagePullPolicy: Always
+ name: deployer
+ volumeMounts:
+ - name: secret
+ mountPath: /secret
+ readOnly: true
+ - name: empty
+ mountPath: /etc/deploy
+ env:
+ - name: PROJECT
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: IMAGE_PREFIX
+ value: ${IMAGE_PREFIX}
+ - name: IMAGE_VERSION
+ value: ${IMAGE_VERSION}
+ - name: ENABLE_OPS_CLUSTER
+ value: ${ENABLE_OPS_CLUSTER}
+ - name: KIBANA_HOSTNAME
+ value: ${KIBANA_HOSTNAME}
+ - name: KIBANA_OPS_HOSTNAME
+ value: ${KIBANA_OPS_HOSTNAME}
+ - name: PUBLIC_MASTER_URL
+ value: ${PUBLIC_MASTER_URL}
+ - name: MASTER_URL
+ value: ${MASTER_URL}
+ - name: ES_INSTANCE_RAM
+ value: ${ES_INSTANCE_RAM}
+ - name: ES_PVC_SIZE
+ value: ${ES_PVC_SIZE}
+ - name: ES_PVC_PREFIX
+ value: ${ES_PVC_PREFIX}
+ - name: ES_CLUSTER_SIZE
+ value: ${ES_CLUSTER_SIZE}
+ - name: ES_NODE_QUORUM
+ value: ${ES_NODE_QUORUM}
+ - name: ES_RECOVER_AFTER_NODES
+ value: ${ES_RECOVER_AFTER_NODES}
+ - name: ES_RECOVER_EXPECTED_NODES
+ value: ${ES_RECOVER_EXPECTED_NODES}
+ - name: ES_RECOVER_AFTER_TIME
+ value: ${ES_RECOVER_AFTER_TIME}
+ - name: ES_OPS_INSTANCE_RAM
+ value: ${ES_OPS_INSTANCE_RAM}
+ - name: ES_OPS_PVC_SIZE
+ value: ${ES_OPS_PVC_SIZE}
+ - name: ES_OPS_PVC_PREFIX
+ value: ${ES_OPS_PVC_PREFIX}
+ - name: ES_OPS_CLUSTER_SIZE
+ value: ${ES_OPS_CLUSTER_SIZE}
+ - name: ES_OPS_NODE_QUORUM
+ value: ${ES_OPS_NODE_QUORUM}
+ - name: ES_OPS_RECOVER_AFTER_NODES
+ value: ${ES_OPS_RECOVER_AFTER_NODES}
+ - name: ES_OPS_RECOVER_EXPECTED_NODES
+ value: ${ES_OPS_RECOVER_EXPECTED_NODES}
+ - name: ES_OPS_RECOVER_AFTER_TIME
+ value: ${ES_OPS_RECOVER_AFTER_TIME}
+ - name: FLUENTD_NODESELECTOR
+ value: ${FLUENTD_NODESELECTOR}
+ - name: ES_NODESELECTOR
+ value: ${ES_NODESELECTOR}
+ - name: ES_OPS_NODESELECTOR
+ value: ${ES_OPS_NODESELECTOR}
+ - name: KIBANA_NODESELECTOR
+ value: ${KIBANA_NODESELECTOR}
+ - name: KIBANA_OPS_NODESELECTOR
+ value: ${KIBANA_OPS_NODESELECTOR}
+ - name: CURATOR_NODESELECTOR
+ value: ${CURATOR_NODESELECTOR}
+ - name: CURATOR_OPS_NODESELECTOR
+ value: ${CURATOR_OPS_NODESELECTOR}
+ - name: MODE
+ value: ${MODE}
+ dnsPolicy: ClusterFirst
+ restartPolicy: Never
+ serviceAccount: logging-deployer
+ volumes:
+ - name: empty
+ emptyDir: {}
+ - name: secret
+ secret:
+ secretName: logging-deployer
+ parameters:
+ -
+ description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"'
+ name: IMAGE_PREFIX
+ value: "docker.io/openshift/origin-"
+ -
+ description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"'
+ name: IMAGE_VERSION
+ value: "latest"
+ -
+ description: "If true, set up to use a second ES cluster for ops logs."
+ name: ENABLE_OPS_CLUSTER
+ value: "false"
+ -
+ description: "External hostname where clients will reach kibana"
+ name: KIBANA_HOSTNAME
+ required: true
+ -
+ description: "External hostname at which admins will visit the ops Kibana."
+ name: KIBANA_OPS_HOSTNAME
+ value: kibana-ops.example.com
+ -
+ description: "External URL for the master, for OAuth purposes"
+ name: PUBLIC_MASTER_URL
+ required: true
+ -
+ description: "Internal URL for the master, for authentication retrieval"
+ name: MASTER_URL
+ value: "https://kubernetes.default.svc.cluster.local"
+ -
+ description: "How many instances of ElasticSearch to deploy."
+ name: ES_CLUSTER_SIZE
+ required: true
+ -
+ description: "Amount of RAM to reserve per ElasticSearch instance."
+ name: ES_INSTANCE_RAM
+ value: "8G"
+ -
+ description: "Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
+ name: ES_PVC_SIZE
+ -
+ description: "Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE."
+ name: ES_PVC_PREFIX
+ value: "logging-es-"
+ -
+ description: "Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
+ name: ES_NODE_QUORUM
+ -
+ description: "Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
+ name: ES_RECOVER_AFTER_NODES
+ -
+ description: "Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
+ name: ES_RECOVER_EXPECTED_NODES
+ -
+ description: "Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
+ name: ES_RECOVER_AFTER_TIME
+ value: "5m"
+ -
+ description: "How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
+ name: ES_OPS_CLUSTER_SIZE
+ -
+ description: "Amount of RAM to reserve per ops ElasticSearch instance."
+ name: ES_OPS_INSTANCE_RAM
+ value: "8G"
+ -
+ description: "Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
+ name: ES_OPS_PVC_SIZE
+ -
+ description: "Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE."
+ name: ES_OPS_PVC_PREFIX
+ value: "logging-es-ops-"
+ -
+ description: "Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
+ name: ES_OPS_NODE_QUORUM
+ -
+ description: "Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
+ name: ES_OPS_RECOVER_AFTER_NODES
+ -
+ description: "Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
+ name: ES_OPS_RECOVER_EXPECTED_NODES
+ -
+ description: "Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
+ name: ES_OPS_RECOVER_AFTER_TIME
+ value: "5m"
+ -
+ description: "The nodeSelector used for the Fluentd DaemonSet."
+ name: FLUENTD_NODESELECTOR
+ value: "logging-infra-fluentd=true"
+ -
+ description: "Node selector Elasticsearch cluster (label=value)."
+ name: ES_NODESELECTOR
+ value: ""
+ -
+ description: "Node selector Elasticsearch operations cluster (label=value)."
+ name: ES_OPS_NODESELECTOR
+ value: ""
+ -
+ description: "Node selector Kibana cluster (label=value)."
+ name: KIBANA_NODESELECTOR
+ value: ""
+ -
+ description: "Node selector Kibana operations cluster (label=value)."
+ name: KIBANA_OPS_NODESELECTOR
+ value: ""
+ -
+ description: "Node selector Curator (label=value)."
+ name: CURATOR_NODESELECTOR
+ value: ""
+ -
+ description: "Node selector operations Curator (label=value)."
+ name: CURATOR_OPS_NODESELECTOR
+ value: ""
+ -
+ description: "The mode that the deployer runs in."
+ name: MODE
+ value: "install"
diff --git a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/metrics-deployer.yaml b/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/metrics-deployer.yaml
index 30d79acee..c620c46ec 100644
--- a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/metrics-deployer.yaml
+++ b/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/metrics-deployer.yaml
@@ -54,8 +54,12 @@ objects:
value: ${IMAGE_VERSION}
- name: MASTER_URL
value: ${MASTER_URL}
+ - name: MODE
+ value: ${MODE}
- name: REDEPLOY
value: ${REDEPLOY}
+ - name: IGNORE_PREFLIGHT
+ value: ${IGNORE_PREFLIGHT}
- name: USE_PERSISTENT_STORAGE
value: ${USE_PERSISTENT_STORAGE}
- name: HAWKULAR_METRICS_HOSTNAME
@@ -66,6 +70,10 @@ objects:
value: ${CASSANDRA_PV_SIZE}
- name: METRIC_DURATION
value: ${METRIC_DURATION}
+ - name: HEAPSTER_NODE_ID
+ value: ${HEAPSTER_NODE_ID}
+ - name: METRIC_RESOLUTION
+ value: ${METRIC_RESOLUTION}
dnsPolicy: ClusterFirst
restartPolicy: Never
serviceAccount: metrics-deployer
@@ -83,7 +91,7 @@ parameters:
-
description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
name: IMAGE_VERSION
- value: "latest"
+ value: "v0.1.0"
-
description: "Internal URL for the master, for authentication retrieval"
name: MASTER_URL
@@ -93,10 +101,18 @@ parameters:
name: HAWKULAR_METRICS_HOSTNAME
required: true
-
- description: "If set to true the deployer will try and delete all the existing components before trying to redeploy."
+ description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment"
+ name: MODE
+ value: "deploy"
+-
+ description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)"
name: REDEPLOY
value: "false"
-
+ description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy."
+ name: IGNORE_PREFLIGHT
+ value: "false"
+-
description: "Set to true for persistent storage, set to false to use non persistent storage"
name: USE_PERSISTENT_STORAGE
value: "true"
@@ -112,3 +128,11 @@ parameters:
description: "How many days metrics should be stored for."
name: METRIC_DURATION
value: "7"
+-
+ description: "The identifier used when generating metric ids in Hawkular"
+ name: HEAPSTER_NODE_ID
+ value: "nodename"
+-
+ description: "How often metrics should be gathered. Defaults value of '10s' for 10 seconds"
+ name: METRIC_RESOLUTION
+ value: "10s"
diff --git a/roles/openshift_examples/files/examples/v1.2/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v1.2/quickstart-templates/jenkins-persistent-template.json
index 6c143fc70..0d8dcffa1 100644
--- a/roles/openshift_examples/files/examples/v1.2/quickstart-templates/jenkins-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.2/quickstart-templates/jenkins-persistent-template.json
@@ -5,7 +5,7 @@
"name": "jenkins-persistent",
"creationTimestamp": null,
"annotations": {
- "description": "Jenkins service, with persistent storage.",
+ "description": "Jenkins service, with persistent storage. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-jenkins",
"tags": "instant-app,jenkins"
}
@@ -207,7 +207,7 @@
"name": "VOLUME_CAPACITY",
"displayName": "Volume Capacity",
"description": "Volume space available for data, e.g. 512Mi, 2Gi.",
- "value": "512Mi",
+ "value": "1Gi",
"required": true
}
],
diff --git a/roles/openshift_examples/files/examples/v1.2/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v1.2/quickstart-templates/nodejs-mongodb.json
index 3298ef40c..661bcbb69 100644
--- a/roles/openshift_examples/files/examples/v1.2/quickstart-templates/nodejs-mongodb.json
+++ b/roles/openshift_examples/files/examples/v1.2/quickstart-templates/nodejs-mongodb.json
@@ -112,7 +112,10 @@
"secret": "${GENERIC_WEBHOOK_SECRET}"
}
}
- ]
+ ],
+ "postCommit": {
+ "script": "npm test"
+ }
}
},
{
diff --git a/roles/openshift_examples/files/examples/v1.2/quickstart-templates/nodejs.json b/roles/openshift_examples/files/examples/v1.2/quickstart-templates/nodejs.json
index 82df67c4e..0518dfac7 100644
--- a/roles/openshift_examples/files/examples/v1.2/quickstart-templates/nodejs.json
+++ b/roles/openshift_examples/files/examples/v1.2/quickstart-templates/nodejs.json
@@ -112,7 +112,10 @@
"secret": "${GENERIC_WEBHOOK_SECRET}"
}
}
- ]
+ ],
+ "postCommit": {
+ "script": "npm test"
+ }
}
},
{
diff --git a/roles/openshift_expand_partition/README.md b/roles/openshift_expand_partition/README.md
index aed4ec871..c9c7b378c 100644
--- a/roles/openshift_expand_partition/README.md
+++ b/roles/openshift_expand_partition/README.md
@@ -45,7 +45,7 @@ space on /dev/xvda, and the file system will be expanded to fill the new
partition space.
- hosts: mynodes
- sudo: no
+ become: no
remote_user: root
gather_facts: no
roles:
@@ -68,7 +68,7 @@ partition space.
* Create an ansible playbook, say `expandvar.yaml`:
```
- hosts: mynodes
- sudo: no
+ become: no
remote_user: root
gather_facts: no
roles:
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 9218e12ae..643984982 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -56,12 +56,54 @@ def migrate_docker_facts(facts):
if 'node' in facts and 'portal_net' in facts['node']:
facts['docker']['hosted_registry_insecure'] = True
facts['docker']['hosted_registry_network'] = facts['node'].pop('portal_net')
+
+ # log_options was originally meant to be a comma separated string, but
+ # we now prefer an actual list, with backward compatability:
+ if 'log_options' in facts['docker'] and \
+ isinstance(facts['docker']['log_options'], basestring):
+ facts['docker']['log_options'] = facts['docker']['log_options'].split(",")
+
+ return facts
+
+# TODO: We should add a generic migration function that takes source and destination
+# paths and does the right thing rather than one function for common, one for node, etc.
+def migrate_common_facts(facts):
+ """ Migrate facts from various roles into common """
+ params = {
+ 'node': ('portal_net'),
+ 'master': ('portal_net')
+ }
+ if 'common' not in facts:
+ facts['common'] = {}
+ for role in params.keys():
+ if role in facts:
+ for param in params[role]:
+ if param in facts[role]:
+ facts['common'][param] = facts[role].pop(param)
+ return facts
+
+def migrate_node_facts(facts):
+ """ Migrate facts from various roles into node """
+ params = {
+ 'common': ('dns_ip'),
+ }
+ if 'node' not in facts:
+ facts['node'] = {}
+ for role in params.keys():
+ if role in facts:
+ for param in params[role]:
+ if param in facts[role]:
+ facts['node'][param] = facts[role].pop(param)
return facts
def migrate_local_facts(facts):
""" Apply migrations of local facts """
migrated_facts = copy.deepcopy(facts)
- return migrate_docker_facts(migrated_facts)
+ migrated_facts = migrate_docker_facts(migrated_facts)
+ migrated_facts = migrate_common_facts(migrated_facts)
+ migrated_facts = migrate_node_facts(migrated_facts)
+ migrated_facts = migrate_hosted_facts(migrated_facts)
+ return migrated_facts
def migrate_hosted_facts(facts):
""" Apply migrations for master facts """
@@ -448,6 +490,27 @@ def set_metrics_facts_if_unset(facts):
facts['common']['use_cluster_metrics'] = use_cluster_metrics
return facts
+def set_dnsmasq_facts_if_unset(facts):
+ """ Set dnsmasq facts if not already present in facts
+ Args:
+ facts (dict) existing facts
+ Returns:
+ facts (dict) updated facts with values set if not previously set
+ """
+
+ if 'common' in facts:
+ if 'use_dnsmasq' not in facts['common'] and facts['common']['version_gte_3_2_or_1_2']:
+ facts['common']['use_dnsmasq'] = True
+ else:
+ facts['common']['use_dnsmasq'] = False
+ if 'master' in facts and 'dns_port' not in facts['master']:
+ if facts['common']['use_dnsmasq']:
+ facts['master']['dns_port'] = 8053
+ else:
+ facts['master']['dns_port'] = 53
+
+ return facts
+
def set_project_cfg_facts_if_unset(facts):
""" Set Project Configuration facts if not already present in facts dict
dict:
@@ -586,11 +649,13 @@ def set_aggregate_facts(facts):
"""
all_hostnames = set()
internal_hostnames = set()
+ kube_svc_ip = first_ip(facts['common']['portal_net'])
if 'common' in facts:
all_hostnames.add(facts['common']['hostname'])
all_hostnames.add(facts['common']['public_hostname'])
all_hostnames.add(facts['common']['ip'])
all_hostnames.add(facts['common']['public_ip'])
+ facts['common']['kube_svc_ip'] = kube_svc_ip
internal_hostnames.add(facts['common']['hostname'])
internal_hostnames.add(facts['common']['ip'])
@@ -607,9 +672,8 @@ def set_aggregate_facts(facts):
'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]
all_hostnames.update(svc_names)
internal_hostnames.update(svc_names)
- first_svc_ip = first_ip(facts['master']['portal_net'])
- all_hostnames.add(first_svc_ip)
- internal_hostnames.add(first_svc_ip)
+ all_hostnames.add(kube_svc_ip)
+ internal_hostnames.add(kube_svc_ip)
facts['common']['all_hostnames'] = list(all_hostnames)
facts['common']['internal_hostnames'] = list(internal_hostnames)
@@ -1154,7 +1218,7 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
if key in inventory_json_facts:
# Watchout for JSON facts that sometimes load as strings.
# (can happen if the JSON contains a boolean)
- if isinstance(new[key], str):
+ if isinstance(new[key], basestring):
facts[key] = yaml.safe_load(new[key])
else:
facts[key] = copy.deepcopy(new[key])
@@ -1212,7 +1276,12 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
facts[key] = copy.deepcopy(value)
new_keys = set(new.keys()) - set(orig.keys())
for key in new_keys:
- facts[key] = copy.deepcopy(new[key])
+ # Watchout for JSON facts that sometimes load as strings.
+ # (can happen if the JSON contains a boolean)
+ if key in inventory_json_facts and isinstance(new[key], basestring):
+ facts[key] = yaml.safe_load(new[key])
+ else:
+ facts[key] = copy.deepcopy(new[key])
return facts
def save_local_facts(filename, facts):
@@ -1263,6 +1332,23 @@ def get_local_facts_from_file(filename):
return local_facts
+def sort_unique(alist):
+ """ Sorts and de-dupes a list
+
+ Args:
+ list: a list
+ Returns:
+ list: a sorted de-duped list
+ """
+
+ alist.sort()
+ out = list()
+ for i in alist:
+ if i not in out:
+ out.append(i)
+
+ return out
+
def safe_get_bool(fact):
""" Get a boolean fact safely.
@@ -1273,6 +1359,61 @@ def safe_get_bool(fact):
"""
return bool(strtobool(str(fact)))
+def set_proxy_facts(facts):
+ """ Set global proxy facts and promote defaults from http_proxy, https_proxy,
+ no_proxy to the more specific builddefaults and builddefaults_git vars.
+ 1. http_proxy, https_proxy, no_proxy
+ 2. builddefaults_*
+ 3. builddefaults_git_*
+
+ Args:
+ facts(dict): existing facts
+ Returns:
+ facts(dict): Updated facts with missing values
+ """
+ if 'common' in facts:
+ common = facts['common']
+ if 'http_proxy' in common or 'https_proxy' in common:
+ if 'generate_no_proxy_hosts' in common and \
+ common['generate_no_proxy_hosts']:
+ if 'no_proxy' in common and \
+ isinstance(common['no_proxy'], basestring):
+ common['no_proxy'] = common['no_proxy'].split(",")
+ else:
+ common['no_proxy'] = []
+ if 'no_proxy_internal_hostnames' in common:
+ common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(','))
+ common['no_proxy'].append('.' + common['dns_domain'])
+ common['no_proxy'].append(common['hostname'])
+ common['no_proxy'] = sort_unique(common['no_proxy'])
+ facts['common'] = common
+
+ if 'builddefaults' in facts:
+ builddefaults = facts['builddefaults']
+ common = facts['common']
+ # Copy values from common to builddefaults
+ if 'http_proxy' not in builddefaults and 'http_proxy' in common:
+ builddefaults['http_proxy'] = common['http_proxy']
+ if 'https_proxy' not in builddefaults and 'https_proxy' in common:
+ builddefaults['https_proxy'] = common['https_proxy']
+ if 'no_proxy' not in builddefaults and 'no_proxy' in common:
+ builddefaults['no_proxy'] = common['no_proxy']
+ if 'git_http_proxy' not in builddefaults and 'http_proxy' in builddefaults:
+ builddefaults['git_http_proxy'] = builddefaults['http_proxy']
+ if 'git_https_proxy' not in builddefaults and 'https_proxy' in builddefaults:
+ builddefaults['git_https_proxy'] = builddefaults['https_proxy']
+ # If we're actually defining a proxy config then create kube_admission_plugin_config
+ # if it doesn't exist, then merge builddefaults[config] structure
+ # into kube_admission_plugin_config
+ if 'kube_admission_plugin_config' not in facts['master']:
+ facts['master']['kube_admission_plugin_config'] = dict()
+ if 'config' in builddefaults and ('http_proxy' in builddefaults or \
+ 'https_proxy' in builddefaults):
+ facts['master']['kube_admission_plugin_config'].update(builddefaults['config'])
+ facts['builddefaults'] = builddefaults
+
+ return facts
+
# pylint: disable=too-many-statements
def set_container_facts_if_unset(facts):
""" Set containerized facts.
@@ -1406,7 +1547,8 @@ class OpenShiftFacts(object):
Raises:
OpenShiftFactsUnsupportedRoleError:
"""
- known_roles = ['cloudprovider',
+ known_roles = ['builddefaults',
+ 'cloudprovider',
'common',
'docker',
'etcd',
@@ -1490,9 +1632,11 @@ class OpenShiftFacts(object):
facts = build_controller_args(facts)
facts = build_api_server_args(facts)
facts = set_version_facts_if_unset(facts)
+ facts = set_dnsmasq_facts_if_unset(facts)
facts = set_manageiq_facts_if_unset(facts)
facts = set_aggregate_facts(facts)
facts = set_etcd_facts_if_unset(facts)
+ facts = set_proxy_facts(facts)
if not safe_get_bool(facts['common']['is_containerized']):
facts = set_installed_variant_rpm_facts(facts)
return dict(openshift=facts)
@@ -1519,6 +1663,7 @@ class OpenShiftFacts(object):
deployment_type=deployment_type,
hostname=hostname,
public_hostname=hostname,
+ portal_net='172.30.0.0/16',
client_binary='oc', admin_binary='oadm',
dns_domain='cluster.local',
install_examples=True,
@@ -1546,7 +1691,7 @@ class OpenShiftFacts(object):
etcd_hosts='', etcd_port='4001',
portal_net='172.30.0.0/16',
embedded_etcd=True, embedded_kube=True,
- embedded_dns=True, dns_port='53',
+ embedded_dns=True,
bind_addr='0.0.0.0',
session_max_seconds=3600,
session_name='ssn',
@@ -1555,7 +1700,8 @@ class OpenShiftFacts(object):
auth_token_max_seconds=500,
oauth_grant_method='auto',
scheduler_predicates=scheduler_predicates,
- scheduler_priorities=scheduler_priorities)
+ scheduler_priorities=scheduler_priorities,
+ dynamic_provisioning_enabled=True)
if 'node' in roles:
defaults['node'] = dict(labels={}, annotations={},
@@ -1576,6 +1722,24 @@ class OpenShiftFacts(object):
if 'hosted' in roles or self.role == 'hosted':
defaults['hosted'] = dict(
+ metrics=dict(
+ deploy=False,
+ duration=7,
+ resolution=10,
+ storage=dict(
+ kind=None,
+ volume=dict(
+ name='metrics',
+ size='10Gi'
+ ),
+ nfs=dict(
+ directory='/exports',
+ options='*(rw,root_squash)'),
+ host=None,
+ access_modes=['ReadWriteMany'],
+ create_pv=True
+ )
+ ),
registry=dict(
storage=dict(
kind=None,
@@ -1777,15 +1941,12 @@ class OpenShiftFacts(object):
if isinstance(val, basestring):
val = [x.strip() for x in val.split(',')]
new_local_facts['docker'][key] = list(set(val) - set(['']))
+ # Convert legacy log_options comma sep string to a list if present:
+ if 'log_options' in new_local_facts['docker'] and \
+ isinstance(new_local_facts['docker']['log_options'], basestring):
+ new_local_facts['docker']['log_options'] = new_local_facts['docker']['log_options'].split(',')
- for facts in new_local_facts.values():
- keys_to_delete = []
- if isinstance(facts, dict):
- for fact, value in facts.iteritems():
- if value == "" or value is None:
- keys_to_delete.append(fact)
- for key in keys_to_delete:
- del facts[key]
+ new_local_facts = self.remove_empty_facts(new_local_facts)
if new_local_facts != local_facts:
self.validate_local_facts(new_local_facts)
@@ -1796,6 +1957,23 @@ class OpenShiftFacts(object):
self.changed = changed
return new_local_facts
+ def remove_empty_facts(self, facts=None):
+ """ Remove empty facts
+
+ Args:
+ facts (dict): facts to clean
+ """
+ facts_to_remove = []
+ for fact, value in facts.iteritems():
+ if isinstance(facts[fact], dict):
+ facts[fact] = self.remove_empty_facts(facts[fact])
+ else:
+ if value == "" or value == [""] or value is None:
+ facts_to_remove.append(fact)
+ for fact in facts_to_remove:
+ del facts[fact]
+ return facts
+
def validate_local_facts(self, facts=None):
""" Validate local facts
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index 36def57c8..ff726ae24 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -5,15 +5,13 @@
when: ansible_version.full | version_compare('1.9.4', 'lt') or ansible_version.full | version_compare('2.0', 'ge')
- name: Detecting Operating System
- shell: ls /run/ostree-booted
- ignore_errors: yes
- failed_when: false
- changed_when: false
- register: ostree_output
+ stat:
+ path: /run/ostree-booted
+ register: ostree_booted
# Locally setup containerized facts for now
- set_fact:
- l_is_atomic: "{{ ostree_output.rc == 0 }}"
+ l_is_atomic: "{{ ostree_booted.stat.exists }}"
- set_fact:
l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}"
@@ -33,3 +31,19 @@
is_containerized: "{{ l_is_containerized | default(None) }}"
public_hostname: "{{ openshift_public_hostname | default(None) }}"
public_ip: "{{ openshift_public_ip | default(None) }}"
+ portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
+
+# had to be done outside of the above because hostname isn't yet set
+- name: Gather hostnames for proxy configuration
+ openshift_facts:
+ role: common
+ local_facts:
+ http_proxy: "{{ openshift_http_proxy | default(None) }}"
+ https_proxy: "{{ openshift_https_proxy | default(None) }}"
+ no_proxy: "{{ openshift_no_proxy | default(None) }}"
+ generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
+ no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
diff --git a/roles/openshift_hosted/tasks/router.yml b/roles/openshift_hosted/tasks/router.yml
index 6a36f74b2..4ccbf4430 100644
--- a/roles/openshift_hosted/tasks/router.yml
+++ b/roles/openshift_hosted/tasks/router.yml
@@ -32,6 +32,7 @@
{{ openshift.common.client_binary }} --api-version='v1' -o json
get nodes -n default --config={{ openshift.common.config_base }}/master/admin.kubeconfig
register: openshift_hosted_router_nodes_json
+ changed_when: false
when: openshift.hosted.router.replicas | default(None) == None
- name: Collect nodes matching router selector
diff --git a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
index d8a5b62a0..072f7bb4e 100644
--- a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
+++ b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
@@ -15,43 +15,52 @@
changed_when: False
- name: "Create logging project"
- command: {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project logging
+ command: >
+ {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project logging
- name: "Changing projects"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging
- name: "Creating logging deployer secret"
- command: " {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new logging-deployer {{ openshift_hosted_logging_secret_vars | default('nothing=/dev/null') }}"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new logging-deployer {{ openshift_hosted_logging_secret_vars | default('nothing=/dev/null') }}
register: secret_output
failed_when: "secret_output.rc == 1 and 'exists' not in secret_output.stderr"
- name: "Copy serviceAccount file"
- copy: dest=/tmp/logging-deployer-sa.yaml
- src={{role_path}}/files/logging-deployer-sa.yaml
- force=yes
+ copy:
+ dest: /tmp/logging-deployer-sa.yaml
+ src: "{{role_path}}/files/logging-deployer-sa.yaml"
+ force: yes
- name: "Create logging-deployer service account"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f /tmp/logging-deployer-sa.yaml"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f /tmp/logging-deployer-sa.yaml
register: deployer_output
failed_when: "deployer_output.rc == 1 and 'exists' not in deployer_output.stderr"
- name: "Set permissions for logging-deployer service account"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig policy add-role-to-user edit system:serviceaccount:logging:logging-deployer"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig policy add-role-to-user edit system:serviceaccount:logging:logging-deployer
register: permiss_output
failed_when: "permiss_output.rc == 1 and 'exists' not in permiss_output.stderr"
- name: "Set permissions for fluentd"
- command: {{ openshift.common.admin_binary}} policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd
+ command: >
+ {{ openshift.common.admin_binary}} policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd
register: fluentd_output
failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr"
- name: "Set additional permissions for fluentd"
- command: {{ openshift.common.admin_binary}} policy add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd
+ command: >
+ {{ openshift.common.admin_binary}} policy add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd
register: fluentd2_output
failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr"
- name: "Create deployer template"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f /usr/share/openshift/examples/infrastructure-templates/enterprise/logging-deployer.yaml -n openshift"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f /usr/share/openshift/examples/infrastructure-templates/enterprise/logging-deployer.yaml -n openshift
register: template_output
failed_when: "template_output.rc == 1 and 'exists' not in template_output.stderr"
@@ -90,13 +99,12 @@
- name: "Scale fluentd deployment config"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig scale dc/logging-fluentd --replicas={{ fluentd_replicas | default('1') }}"
-
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig scale dc/logging-fluentd --replicas={{ fluentd_replicas | default('1') }}
- - name: "Scale fluentd replication controller"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig scale rc/logging-fluentd-1 --replicas={{ fluentd_replicas | default('1') }}"
- - debug: msg="Logging components deployed. Note persistant volume for elasticsearch must be setup manually"
+ - debug:
+ msg: "Logging components deployed. Note persistant volume for elasticsearch must be setup manually"
- name: Delete temp directory
file:
diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml
index cee1f1738..291cdbbb5 100644
--- a/roles/openshift_manage_node/tasks/main.yml
+++ b/roles/openshift_manage_node/tasks/main.yml
@@ -6,7 +6,7 @@
retries: 50
delay: 5
changed_when: false
- with_items: openshift_nodes
+ with_items: "{{ openshift_nodes }}"
- name: Set node schedulability
command: >
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index 6bf28ff2b..09bde6002 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -9,10 +9,10 @@ os_firewall_allow:
port: "{{ openshift.master.api_port }}/tcp"
- service: api controllers https
port: "{{ openshift.master.controllers_port }}/tcp"
-- service: dns tcp
- port: 53/tcp
-- service: dns udp
- port: 53/udp
+- service: skydns tcp
+ port: "{{ openshift.master.dns_port }}/tcp"
+- service: skydns udp
+ port: "{{ openshift.master.dns_port }}/udp"
- service: Fluentd td-agent tcp
port: 24224/tcp
- service: Fluentd td-agent udp
diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml
index 0d4241e2c..e882e0b8b 100644
--- a/roles/openshift_master/meta/main.yml
+++ b/roles/openshift_master/meta/main.yml
@@ -15,5 +15,6 @@ dependencies:
- role: openshift_docker
- role: openshift_cli
- role: openshift_cloud_provider
+- role: openshift_builddefaults
- role: openshift_master_facts
- role: openshift_hosted_facts
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 18a42bf93..fe0784ea2 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -78,23 +78,50 @@
action: "{{ ansible_pkg_mgr }} name=httpd-tools state=present"
when: (item.kind == 'HTPasswdPasswordIdentityProvider') and
not openshift.common.is_atomic | bool
- with_items: openshift.master.identity_providers
+ with_items: "{{ openshift.master.identity_providers }}"
- name: Ensure htpasswd directory exists
file:
path: "{{ item.filename | dirname }}"
state: directory
when: item.kind == 'HTPasswdPasswordIdentityProvider'
- with_items: openshift.master.identity_providers
+ with_items: "{{ openshift.master.identity_providers }}"
- name: Create the htpasswd file if needed
- copy:
+ template:
dest: "{{ item.filename }}"
- content: ""
+ src: htpasswd.j2
mode: 0600
- force: no
+ backup: yes
when: item.kind == 'HTPasswdPasswordIdentityProvider'
- with_items: openshift.master.identity_providers
+ with_items: "{{ openshift.master.identity_providers }}"
+
+- name: Create the ldap ca file if needed
+ copy:
+ dest: "{{ item.ca if 'ca' in item and '/' in item.ca else openshift_master_config_dir ~ '/' ~ item.ca | default('ldap_ca.crt') }}"
+ content: "{{ openshift.master.ldap_ca }}"
+ mode: 0600
+ backup: yes
+ when: openshift.master.ldap_ca is defined and item.kind == 'LDAPPasswordIdentityProvider'
+ with_items: "{{ openshift.master.identity_providers }}"
+
+- name: Create the openid ca file if needed
+ copy:
+ dest: "{{ item.ca if 'ca' in item and '/' in item.ca else openshift_master_config_dir ~ '/' ~ item.ca | default('openid_ca.crt') }}"
+ content: "{{ openshift.master.openid_ca }}"
+ mode: 0600
+ backup: yes
+ when: openshift.master.openid_ca is defined and item.kind == 'OpenIDIdentityProvider' and item.ca | default('') != ''
+ with_items: "{{ openshift.master.identity_providers }}"
+
+- name: Create the request header ca file if needed
+ copy:
+ dest: "{{ item.clientCA if 'clientCA' in item and '/' in item.clientCA else openshift_master_config_dir ~ '/' ~ item.clientCA | default('request_header_ca.crt') }}"
+ content: "{{ openshift.master.request_header_ca }}"
+ mode: 0600
+ backup: yes
+ when: openshift.master.request_header_ca is defined and item.kind == 'RequestHeaderIdentityProvider' and item.clientCA | default('') != ''
+ with_items: "{{ openshift.master.identity_providers }}"
- name: Install the systemd units
include: systemd_units.yml
@@ -212,7 +239,7 @@
mode: 0700
owner: "{{ item }}"
group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}"
- with_items: client_users
+ with_items: "{{ client_users }}"
# TODO: Update this file if the contents of the source file are not present in
# the dest file, will need to make sure to ignore things that could be added
@@ -220,7 +247,7 @@
command: cp {{ openshift_master_config_dir }}/admin.kubeconfig ~{{ item }}/.kube/config
args:
creates: ~{{ item }}/.kube/config
- with_items: client_users
+ with_items: "{{ client_users }}"
- name: Update the permissions on the admin client config(s)
file:
@@ -229,4 +256,4 @@
mode: 0700
owner: "{{ item }}"
group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}"
- with_items: client_users
+ with_items: "{{ client_users }}"
diff --git a/roles/openshift_master/templates/atomic-openshift-master.j2 b/roles/openshift_master/templates/atomic-openshift-master.j2
index 862cfa8f1..4cf632841 100644
--- a/roles/openshift_master/templates/atomic-openshift-master.j2
+++ b/roles/openshift_master/templates/atomic-openshift-master.j2
@@ -10,8 +10,13 @@ AWS_SECRET_ACCESS_KEY={{ openshift.cloudprovider.aws.secret_key }}
{% endif %}
# Proxy configuration
-# Origin uses standard HTTP_PROXY environment variables. Be sure to set
-# NO_PROXY for your master
-#NO_PROXY=master.example.com
-#HTTP_PROXY=http://USER:PASSWORD@IPADDR:PORT
-#HTTPS_PROXY=https://USER:PASSWORD@IPADDR:PORT
+# See https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html#configuring-global-proxy
+{% if 'http_proxy' in openshift.common %}
+HTTP_PROXY='{{ openshift.common.http_proxy | default('') }}'
+{% endif %}
+{% if 'https_proxy' in openshift.common %}
+HTTPS_PROXY='{{ openshift.common.https_proxy | default('')}}'
+{% endif %}
+{% if 'no_proxy' in openshift.common %}
+NO_PROXY='{{ openshift.common.no_proxy | default('') | join(',') }},{{ openshift.common.portal_net }},{{ openshift.master.sdn_cluster_network_cidr }}'
+{% endif %}
diff --git a/roles/openshift_master/templates/htpasswd.j2 b/roles/openshift_master/templates/htpasswd.j2
new file mode 100644
index 000000000..ba2c02e20
--- /dev/null
+++ b/roles/openshift_master/templates/htpasswd.j2
@@ -0,0 +1,5 @@
+{% if 'htpasswd_users' in openshift.master %}
+{% for user,pass in openshift.master.htpasswd_users.iteritems() %}
+{{ user ~ ':' ~ pass }}
+{% endfor %}
+{% endif %}
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 1009aa318..48bb8a13f 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -123,7 +123,7 @@ kubernetesMasterConfig:
keyFile: master.proxy-client.key
schedulerConfigFile: {{ openshift_master_scheduler_conf }}
servicesNodePortRange: ""
- servicesSubnet: {{ openshift.master.portal_net }}
+ servicesSubnet: {{ openshift.common.portal_net }}
staticNodeNames: {{ openshift_node_ips | default([], true) }}
{% endif %}
masterClients:
@@ -138,7 +138,7 @@ networkConfig:
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
{% endif %}
# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
- serviceNetworkCIDR: {{ openshift.master.portal_net }}
+ serviceNetworkCIDR: {{ openshift.common.portal_net }}
oauthConfig:
{% if 'oauth_always_show_provider_selection' in openshift.master %}
alwaysShowProviderSelection: {{ openshift.master.oauth_always_show_provider_selection }}
@@ -209,3 +209,5 @@ servingInfo:
{% endfor %}
{% endfor %}
{% endif %}
+volumeConfig:
+ dynamicProvisioningEnabled: {{ openshift.master.dynamic_provisioning_enabled }}
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
index 69754ee10..01a8428a0 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
@@ -10,8 +10,13 @@ AWS_SECRET_ACCESS_KEY={{ openshift.cloudprovider.aws.secret_key }}
{% endif %}
# Proxy configuration
-# Origin uses standard HTTP_PROXY environment variables. Be sure to set
-# NO_PROXY for your master
-#NO_PROXY=master.example.com
-#HTTP_PROXY=http://USER:PASSWORD@IPADDR:PORT
-#HTTPS_PROXY=https://USER:PASSWORD@IPADDR:PORT
+# See https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html#configuring-global-proxy
+{% if 'http_proxy' in openshift.common %}
+HTTP_PROXY='{{ openshift.common.http_proxy | default('') }}'
+{% endif %}
+{% if 'https_proxy' in openshift.common %}
+HTTPS_PROXY='{{ openshift.common.https_proxy | default('')}}'
+{% endif %}
+{% if 'no_proxy' in openshift.common %}
+NO_PROXY='{{ openshift.common.no_proxy | default('') | join(',') }},{{ openshift.common.portal_net }},{{ openshift.master.sdn_cluster_network_cidr }}'
+{% endif %}
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
index 048a4305a..89ccb1eed 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
@@ -10,8 +10,13 @@ AWS_SECRET_ACCESS_KEY={{ openshift.cloudprovider.aws.secret_key }}
{% endif %}
# Proxy configuration
-# Origin uses standard HTTP_PROXY environment variables. Be sure to set
-# NO_PROXY for your master
-#NO_PROXY=master.example.com
-#HTTP_PROXY=http://USER:PASSWORD@IPADDR:PORT
-#HTTPS_PROXY=https://USER:PASSWORD@IPADDR:PORT
+# See https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html#configuring-global-proxy
+{% if 'http_proxy' in openshift.common %}
+HTTP_PROXY='{{ openshift.common.http_proxy | default('') }}'
+{% endif %}
+{% if 'https_proxy' in openshift.common %}
+HTTPS_PROXY='{{ openshift.common.https_proxy | default('')}}'
+{% endif %}
+{% if 'no_proxy' in openshift.common %}
+NO_PROXY='{{ openshift.common.no_proxy | default('') | join(',') }},{{ openshift.common.portal_net }},{{ openshift.master.sdn_cluster_network_cidr }}'
+{% endif %}
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index 9017b7d2b..394f9d381 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -4,14 +4,14 @@
path: "{{ openshift_generated_configs_dir }}/{{ item.master_cert_subdir }}"
state: directory
mode: 0700
- with_items: masters_needing_certs
+ with_items: "{{ masters_needing_certs | default([]) }}"
- file:
src: "{{ openshift_master_config_dir }}/{{ item.1 }}"
dest: "{{ openshift_generated_configs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}"
state: hard
with_nested:
- - masters_needing_certs
+ - "{{ masters_needing_certs | default([]) }}"
-
- ca.crt
- ca.key
@@ -26,7 +26,7 @@
--cert-dir={{ openshift_generated_configs_dir }}/{{ item.master_cert_subdir }}
--overwrite=false
when: item.master_certs_missing | bool
- with_items: masters_needing_certs
+ with_items: "{{ masters_needing_certs | default([]) }}"
- file:
src: "{{ openshift_master_config_dir }}/{{ item.1 }}"
@@ -34,5 +34,5 @@
state: hard
force: true
with_nested:
- - masters_needing_certs
+ - "{{ masters_needing_certs | default([]) }}"
- "{{ hostvars[inventory_hostname] | certificates_to_synchronize }}"
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index 4d7c04065..0cbbaffc2 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -22,7 +22,6 @@
extension_scripts: "{{ openshift_master_extension_scripts | default(None) }}"
extension_stylesheets: "{{ openshift_master_extension_stylesheets | default(None) }}"
extensions: "{{ openshift_master_extensions | default(None) }}"
- oauth_template: "{{ openshift_master_oauth_template | default(None) }}"
etcd_hosts: "{{ openshift_master_etcd_hosts | default(None) }}"
etcd_port: "{{ openshift_master_etcd_port | default(None) }}"
etcd_use_ssl: "{{ openshift_master_etcd_use_ssl | default(None) }}"
@@ -30,10 +29,10 @@
embedded_etcd: "{{ openshift_master_embedded_etcd | default(None) }}"
embedded_kube: "{{ openshift_master_embedded_kube | default(None) }}"
embedded_dns: "{{ openshift_master_embedded_dns | default(None) }}"
+ # defaults to 8053 when using dnsmasq in 1.2/3.2
dns_port: "{{ openshift_master_dns_port | default(None) }}"
bind_addr: "{{ openshift_master_bind_addr | default(None) }}"
pod_eviction_timeout: "{{ openshift_master_pod_eviction_timeout | default(None) }}"
- portal_net: "{{ openshift_master_portal_net | default(None) }}"
session_max_seconds: "{{ openshift_master_session_max_seconds | default(None) }}"
session_name: "{{ openshift_master_session_name | default(None) }}"
session_secrets_file: "{{ openshift_master_session_secrets_file | default(None) }}"
@@ -42,11 +41,15 @@
access_token_max_seconds: "{{ openshift_master_access_token_max_seconds | default(None) }}"
auth_token_max_seconds: "{{ openshift_master_auth_token_max_seconds | default(None) }}"
identity_providers: "{{ openshift_master_identity_providers | default(None) }}"
+ htpasswd_users: "{{ openshift_master_htpasswd_users | default(lookup('file', openshift_master_htpasswd_file) | oo_htpasswd_users_from_file if openshift_master_htpasswd_file is defined else None) }}"
+ ldap_ca: "{{ openshift_master_ldap_ca | default(lookup('file', openshift_master_ldap_ca_file) if openshift_master_ldap_ca_file is defined else None) }}"
+ openid_ca: "{{ openshift_master_openid_ca | default(lookup('file', openshift_master_openid_ca_file) if openshift_master_openid_ca_file is defined else None) }}"
+ request_header_ca: "{{ openshift_master_request_header_ca | default(lookup('file', openshift_master_request_header_ca_file) if openshift_master_request_header_ca_file is defined else None) }}"
registry_url: "{{ oreg_url | default(None) }}"
oauth_grant_method: "{{ openshift_master_oauth_grant_method | default(None) }}"
sdn_cluster_network_cidr: "{{ osm_cluster_network_cidr | default(None) }}"
sdn_host_subnet_length: "{{ osm_host_subnet_length | default(None) }}"
- default_subdomain: "{{ openshift_master_default_subdomain | default(osm_default_subdomain) | default(None) }}"
+ default_subdomain: "{{ openshift_master_default_subdomain | default(osm_default_subdomain | default(None), true) }}"
custom_cors_origins: "{{ osm_custom_cors_origins | default(None) }}"
default_node_selector: "{{ osm_default_node_selector | default(None) }}"
project_request_message: "{{ osm_project_request_message | default(None) }}"
@@ -54,7 +57,6 @@
mcs_allocator_range: "{{ osm_mcs_allocator_range | default(None) }}"
mcs_labels_per_project: "{{ osm_mcs_labels_per_project | default(None) }}"
uid_allocator_range: "{{ osm_uid_allocator_range | default(None) }}"
- router_selector: "{{ openshift_router_selector | default(None) }}"
registry_selector: "{{ openshift_registry_selector | default(None) }}"
api_server_args: "{{ osm_api_server_args | default(None) }}"
controller_args: "{{ osm_controller_args | default(None) }}"
@@ -73,3 +75,4 @@
oauth_templates: "{{ openshift_master_oauth_templates | default(None) }}"
oauth_always_show_provider_selection: "{{ openshift_master_oauth_always_show_provider_selection | default(None) }}"
image_policy_config: "{{ openshift_master_image_policy_config | default(None) }}"
+ dynamic_provisioning_enabled: "{{ openshift_master_dynamic_provisioning_enabled | default(None) }}"
diff --git a/roles/openshift_master_facts/vars/main.yml b/roles/openshift_master_facts/vars/main.yml
new file mode 100644
index 000000000..3b0ee2761
--- /dev/null
+++ b/roles/openshift_master_facts/vars/main.yml
@@ -0,0 +1,14 @@
+builddefaults_yaml:
+ BuildDefaults:
+ configuration:
+ apiVersion: v1
+ kind: BuildDefaultsConfig
+ gitHTTPProxy: "{{ openshift.master.builddefaults_git_http_proxy | default(omit, true) }}"
+ gitHTTPSProxy: "{{ openshift.master.builddefaults_git_https_proxy | default(omit, true) }}"
+ env:
+ - name: HTTP_PROXY
+ value: "{{ openshift.master.builddefaults_http_proxy | default(omit, true) }}"
+ - name: HTTPS_PROXY
+ value: "{{ openshift.master.builddefaults_https_proxy | default(omit, true) }}"
+ - name: NO_PROXY
+ value: "{{ openshift.master.builddefaults_no_proxy | default(omit, true) | join(',') }}" \ No newline at end of file
diff --git a/roles/openshift_metrics/README.md b/roles/openshift_metrics/README.md
new file mode 100644
index 000000000..2e903379a
--- /dev/null
+++ b/roles/openshift_metrics/README.md
@@ -0,0 +1,57 @@
+OpenShift Metrics with Hawkular
+====================
+
+OpenShift Metrics Installation
+
+Requirements
+------------
+It requires subdomain fqdn to be set.
+If persistence is enabled, then it also requires NFS.
+
+Role Variables
+--------------
+
+From this role:
+
+| Name | Default value | |
+|-------------------------------------------------|-----------------------|-------------------------------------------------------------|
+| openshift_hosted_metrics_deploy | False | If metrics should be deployed |
+| openshift_hosted_metrics_storage_nfs_directory | /exports | Root export directory. |
+| openshift_hosted_metrics_storage_volume_name | metrics | Metrics volume within openshift_hosted_metrics_volume_dir |
+| openshift_hosted_metrics_storage_volume_size | 10Gi | Metrics volume size |
+| openshift_hosted_metrics_storage_nfs_options | *(rw,root_squash) | NFS options for configured exports. |
+| openshift_hosted_metrics_duration | 7 | Metrics query duration |
+| openshift_hosted_metrics_resolution | 10 | Metrics resolution |
+
+
+From openshift_common:
+
+| Name | Default Value | |
+|---------------------------------------|----------------|----------------------------------------|
+| openshift_master_default_subdomain | null | Subdomain FQDN (Mandatory) |
+
+
+Dependencies
+------------
+openshift_facts
+openshift_examples
+
+Example Playbook
+----------------
+
+```
+- name: Configure openshift-metrics
+ hosts: oo_first_master
+ roles:
+ - role: openshift_metrics
+```
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Jose David Martín (j.david.nieto@gmail.com)
diff --git a/roles/openshift_metrics/meta/main.yaml b/roles/openshift_metrics/meta/main.yaml
new file mode 100644
index 000000000..5f8d4f5c5
--- /dev/null
+++ b/roles/openshift_metrics/meta/main.yaml
@@ -0,0 +1,3 @@
+dependencies:
+- { role: openshift_examples }
+- { role: openshift_facts } \ No newline at end of file
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
new file mode 100644
index 000000000..ca29ad6e1
--- /dev/null
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -0,0 +1,57 @@
+---
+- name: Copy Configuration to temporary conf
+ command: >
+ cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{hawkular_tmp_conf}}
+ changed_when: false
+
+- name: Create metrics-deployer Service Account
+ shell: >
+ echo {{ deployer_service_account | to_json | quote }} |
+ {{ openshift.common.client_binary }} create
+ -n openshift-infra
+ --config={{hawkular_tmp_conf}}
+ -f -
+ register: deployer_create_service_account
+ failed_when: "'already exists' not in deployer_create_service_account.stderr and deployer_create_service_account.rc != 0"
+ changed_when: deployer_create_service_account.rc == 0
+
+- name: Create metrics-deployer Secret
+ command: >
+ {{ openshift.common.client_binary }}
+ secrets new metrics-deployer
+ nothing=/dev/null
+ --config={{hawkular_tmp_conf}}
+ -n openshift-infra
+ register: deployer_create_secret
+ failed_when: "'already exists' not in deployer_create_secret.stderr and deployer_create_secret.rc !=0"
+ changed_when: deployer_create_secret.rc == 0
+
+- name: Configure role/user permissions
+ command: >
+ {{ openshift.common.admin_binary }} {{item}}
+ --config={{hawkular_tmp_conf}}
+ with_items: "{{hawkular_permission_oc_commands}}"
+ register: hawkular_perm_task
+ failed_when: "'already exists' not in hawkular_perm_task.stderr and hawkular_perm_task.rc != 0"
+ changed_when: hawkular_perm_task.rc == 0
+
+- name: Check openshift_master_default_subdomain
+ fail:
+ msg: "Default subdomain should be defined"
+ when: openshift.master.default_subdomain is not defined
+
+- name: Create Heapster and Hawkular/Cassandra Services
+ shell: >
+ {{ openshift.common.client_binary }} process -f \
+ /usr/share/openshift/examples/infrastructure-templates/{{ hawkular_type }}/metrics-deployer.yaml -v \
+ HAWKULAR_METRICS_HOSTNAME=hawkular-metrics.{{ openshift.master.default_subdomain }} USE_PERSISTENT_STORAGE={{ hawkular_persistence }} \
+ METRIC_DURATION={{ openshift.hosted.metrics.duration }} METRIC_RESOLUTION={{ openshift.hosted.metrics.resolution }} |
+ {{ openshift.common.client_binary }} create -n openshift-infra --config={{hawkular_tmp_conf}} -f -
+ register: oex_heapster_services
+ failed_when: "'already exists' not in oex_heapster_services.stderr and oex_heapster_services.rc != 0"
+ changed_when: false
+
+- name: Clean temporary config file
+ command: >
+ rm -rf {{hawkular_tmp_conf}}
+ changed_when: false
diff --git a/roles/openshift_metrics/vars/main.yaml b/roles/openshift_metrics/vars/main.yaml
new file mode 100644
index 000000000..82d9d29f7
--- /dev/null
+++ b/roles/openshift_metrics/vars/main.yaml
@@ -0,0 +1,19 @@
+hawkular_permission_oc_commands:
+ - policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer -n openshift-infra
+ - policy add-cluster-role-to-user cluster-admin system:serviceaccount:openshift-infra:heapster
+
+deployer_service_account:
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: metrics-deployer
+ secrets:
+ - name: metrics-deployer
+
+
+hawkular_tmp_conf: /tmp/hawkular_admin.kubeconfig
+
+hawkular_persistence: "{% if openshift.hosted.metrics.storage.kind != None %}true{% else %}false{% endif %}"
+
+hawkular_type: "{{ 'origin' if deployment_type == 'origin' else 'enterprise' }}"
+
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index 84ba9ac2e..ca0c332ea 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -15,3 +15,6 @@ dependencies:
- role: openshift_docker
- role: openshift_cloud_provider
- role: openshift_common
+- role: openshift_node_dnsmasq
+ when: openshift.common.use_dnsmasq
+
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 8987e0191..06fde88af 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -9,13 +9,6 @@
role: "{{ item.role }}"
local_facts: "{{ item.local_facts }}"
with_items:
- - role: common
- local_facts:
- # TODO: Replace this with a lookup or filter plugin.
- # TODO: Move this to the node role
- dns_ip: "{{ openshift_dns_ip
- | default(openshift_master_cluster_vip
- | default(None if openshift.common.version_gte_3_1_or_1_1 | bool else openshift_node_first_master_ip | default(None, true), true), true) }}"
- role: node
local_facts:
annotations: "{{ openshift_node_annotations | default(none) }}"
@@ -32,6 +25,7 @@
ovs_image: "{{ osn_ovs_image | default(None) }}"
proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}"
local_quota_per_fsgroup: "{{ openshift_node_local_quota_per_fsgroup | default(None) }}"
+ dns_ip: "{{ openshift_dns_ip | default(none) | get_dns_ip(hostvars[inventory_hostname])}}"
# We have to add tuned-profiles in the same transaction otherwise we run into depsolving
# problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging.
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 28cb1ea26..9ba1a01dd 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -1,8 +1,8 @@
allowDisabledDocker: false
apiVersion: v1
dnsDomain: {{ openshift.common.dns_domain }}
-{% if 'dns_ip' in openshift.common %}
-dnsIP: {{ openshift.common.dns_ip }}
+{% if 'dns_ip' in openshift.node %}
+dnsIP: {{ openshift.node.dns_ip }}
{% endif %}
dockerConfig:
execHandlerName: ""
diff --git a/roles/openshift_node_certificates/tasks/main.yml b/roles/openshift_node_certificates/tasks/main.yml
index c9a7a40c8..216c11093 100644
--- a/roles/openshift_node_certificates/tasks/main.yml
+++ b/roles/openshift_node_certificates/tasks/main.yml
@@ -1,5 +1,5 @@
---
-- name: Create openshift_generated_configs_dir if it doesn't exist
+- name: Create openshift_generated_configs_dir if it doesn\'t exist
file:
path: "{{ openshift_generated_configs_dir }}"
state: directory
@@ -19,7 +19,7 @@
--user=system:node:{{ item.openshift.common.hostname }}
args:
creates: "{{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}"
- with_items: nodes_needing_certs
+ with_items: "{{ nodes_needing_certs | default([]) }}"
- name: Generate the node server certificate
command: >
@@ -33,4 +33,4 @@
--signer-serial={{ openshift_master_ca_serial }}
args:
creates: "{{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}/server.crt"
- with_items: nodes_needing_certs
+ with_items: "{{ nodes_needing_certs | default([]) }}"
diff --git a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
new file mode 100755
index 000000000..51e0751e9
--- /dev/null
+++ b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
@@ -0,0 +1,55 @@
+#!/bin/bash -x
+
+# This NetworkManager dispatcher script replicates the functionality of
+# NetworkManager's dns=dnsmasq however, rather than hardcoding the listening
+# address and /etc/resolv.conf to 127.0.0.1 it pulls the IP address from the
+# interface that owns the default route. This enables us to then configure pods
+# to use this IP address as their only resolver, where as using 127.0.0.1 inside
+# a pod would fail.
+#
+# To use this,
+# Drop this script in /etc/NetworkManager/dispatcher.d/
+# systemctl restart NetworkManager
+# Configure node-config.yaml to set dnsIP: to the ip address of this
+# node
+#
+# Test it:
+# host kubernetes.default.svc.cluster.local
+# host google.com
+#
+# TODO: I think this would be easy to add as a config option in NetworkManager
+# natively, look at hacking that up
+
+cd /etc/sysconfig/network-scripts
+. ./network-functions
+
+[ -f ../network ] && . ../network
+
+if [[ $2 =~ ^(up|dhcp4-change)$ ]]; then
+ # couldn't find an existing method to determine if the interface owns the
+ # default route
+ def_route=$(/sbin/ip route list match 0.0.0.0/0 | awk '{print $3 }')
+ def_route_int=$(/sbin/ip route get to ${def_route} | awk '{print $3}')
+ def_route_ip=$(/sbin/ip route get to ${def_route} | awk '{print $5}')
+ if [[ ${DEVICE_IFACE} == ${def_route_int} ]]; then
+ if [ ! -f /etc/dnsmasq.d/origin-dns.conf ]; then
+ cat << EOF > /etc/dnsmasq.d/origin-dns.conf
+strict-order
+no-resolv
+domain-needed
+server=/cluster.local/172.30.0.1
+server=/30.172.in-addr.arpa/172.30.0.1
+EOF
+ fi
+ # zero out our upstream servers list and feed it into dnsmasq
+ echo '' > /etc/dnsmasq.d/origin-upstream-dns.conf
+ for ns in ${DHCP4_DOMAIN_NAME_SERVERS}; do
+ echo "server=${ns}" >> /etc/dnsmasq.d/origin-upstream-dns.conf
+ done
+ echo "listen-address=${def_route_ip}" >> /etc/dnsmasq.d/origin-upstream-dns.conf
+ systemctl restart dnsmasq
+
+ sed -i 's/^nameserver.*$/nameserver '"${def_route_ip}"'/g' /etc/resolv.conf
+ echo "# nameserver updated by /etc/NetworkManager/dispatcher.d/99-origin-dns.sh" >> /etc/resolv.conf
+ fi
+fi
diff --git a/roles/openshift_node_dnsmasq/handlers/main.yml b/roles/openshift_node_dnsmasq/handlers/main.yml
new file mode 100644
index 000000000..7e9e4d299
--- /dev/null
+++ b/roles/openshift_node_dnsmasq/handlers/main.yml
@@ -0,0 +1,5 @@
+---
+- name: restart NetworkManager
+ service:
+ name: NetworkManager
+ state: restarted
diff --git a/roles/openshift_node_dnsmasq/meta/main.yml b/roles/openshift_node_dnsmasq/meta/main.yml
new file mode 100644
index 000000000..c83d64ae4
--- /dev/null
+++ b/roles/openshift_node_dnsmasq/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: Scott Dodson
+ description: OpenShift Node DNSMasq support
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.7
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: openshift_common
diff --git a/roles/openshift_node_dnsmasq/tasks/main.yml b/roles/openshift_node_dnsmasq/tasks/main.yml
new file mode 100644
index 000000000..4cb24469d
--- /dev/null
+++ b/roles/openshift_node_dnsmasq/tasks/main.yml
@@ -0,0 +1,27 @@
+---
+- name: Check for NetworkManager service
+ command: >
+ systemctl show NetworkManager
+ register: nm_show
+
+- name: Set fact using_network_manager
+ set_fact:
+ network_manager_active: "{{ True if 'ActiveState=active' in nm_show.stdout else False }}"
+
+- name: Install dnsmasq
+ action: "{{ ansible_pkg_mgr }} name=dnsmasq state=installed"
+ when: not openshift.common.is_atomic | bool
+
+- name: Install dnsmasq configuration
+ template:
+ src: origin-dns.conf.j2
+ dest: /etc/dnsmasq.d/origin-dns.conf
+
+# Dynamic NetworkManager based dispatcher
+- include: ./network-manager.yml
+ when: network_manager_active | bool
+
+# Relies on ansible in order to configure static config
+- include: ./no-network-manager.yml
+ when: not network_manager_active | bool
+
diff --git a/roles/openshift_node_dnsmasq/tasks/network-manager.yml b/roles/openshift_node_dnsmasq/tasks/network-manager.yml
new file mode 100644
index 000000000..dddcfc9da
--- /dev/null
+++ b/roles/openshift_node_dnsmasq/tasks/network-manager.yml
@@ -0,0 +1,9 @@
+---
+- name: Install network manager dispatch script
+ copy:
+ src: networkmanager/99-origin-dns.sh
+ dest: /etc/NetworkManager/dispatcher.d/
+ mode: 0755
+ notify: restart NetworkManager
+
+- meta: flush_handlers
diff --git a/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml b/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml
new file mode 100644
index 000000000..4d1bd3794
--- /dev/null
+++ b/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml
@@ -0,0 +1,2 @@
+---
+- fail: msg="Currently, NetworkManager must be installed and enabled prior to installation." \ No newline at end of file
diff --git a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
new file mode 100644
index 000000000..1753bb821
--- /dev/null
+++ b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
@@ -0,0 +1,4 @@
+strict-order
+no-resolv
+domain-needed
+server=/{{ openshift.common.dns_domain }}/{{ openshift.common.kube_svc_ip }}
diff --git a/roles/openshift_serviceaccounts/tasks/main.yml b/roles/openshift_serviceaccounts/tasks/main.yml
index 5dd28d52a..bafda9695 100644
--- a/roles/openshift_serviceaccounts/tasks/main.yml
+++ b/roles/openshift_serviceaccounts/tasks/main.yml
@@ -1,7 +1,7 @@
- name: test if service accounts exists
command: >
{{ openshift.common.client_binary }} get sa {{ item }} -n {{ openshift_serviceaccounts_namespace }}
- with_items: openshift_serviceaccounts_names
+ with_items: "{{ openshift_serviceaccounts_names }}"
failed_when: false
changed_when: false
register: account_test
@@ -13,8 +13,8 @@
-n {{ openshift_serviceaccounts_namespace }} create -f -
when: item.1.rc != 0
with_together:
- - openshift_serviceaccounts_names
- - account_test.results
+ - "{{ openshift_serviceaccounts_names }}"
+ - "{{ account_test.results }}"
- name: test if scc needs to be updated
command: >
@@ -22,7 +22,7 @@
changed_when: false
failed_when: false
register: scc_test
- with_items: openshift_serviceaccounts_sccs
+ with_items: "{{ openshift_serviceaccounts_sccs }}"
- name: Grant the user access to the privileged scc
command: >
@@ -30,8 +30,8 @@
privileged system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item.0 }}
when: "openshift.common.version_gte_3_1_or_1_1 and item.1.rc == 0 and 'system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item.0 }}' not in {{ (item.1.stdout | from_yaml).users }}"
with_nested:
- - openshift_serviceaccounts_names
- - scc_test.results
+ - "{{ openshift_serviceaccounts_names }}"
+ - "{{ scc_test.results }}"
- include: legacy_add_scc_to_user.yml
when: not openshift.common.version_gte_3_1_or_1_1
diff --git a/roles/openshift_storage_nfs/defaults/main.yml b/roles/openshift_storage_nfs/defaults/main.yml
index 5f6893129..90592e9d0 100644
--- a/roles/openshift_storage_nfs/defaults/main.yml
+++ b/roles/openshift_storage_nfs/defaults/main.yml
@@ -8,6 +8,14 @@ openshift:
options: "*(rw,root_squash)"
volume:
name: "registry"
+ metrics:
+ deploy: False
+ storage:
+ nfs:
+ directory: "/exports"
+ options: "*(rw,root_squash)"
+ volume:
+ name: "metrics"
os_firewall_use_firewalld: False
os_firewall_allow:
- service: nfs
diff --git a/roles/openshift_storage_nfs_lvm/README.md b/roles/openshift_storage_nfs_lvm/README.md
index 1ee02e18a..3680ef5b5 100644
--- a/roles/openshift_storage_nfs_lvm/README.md
+++ b/roles/openshift_storage_nfs_lvm/README.md
@@ -62,7 +62,7 @@ Both of them are mounted into `/exports/openshift` directory. Both directories
exported via NFS. json files are created in /root.
- hosts: nfsservers
- sudo: no
+ become: no
remote_user: root
gather_facts: no
roles:
@@ -87,7 +87,7 @@ exported via NFS. json files are created in /root.
* Create an ansible playbook, say `setupnfs.yaml`:
```
- hosts: nfsservers
- sudo: no
+ become: no
remote_user: root
gather_facts: no
roles:
diff --git a/roles/os_firewall/defaults/main.yml b/roles/os_firewall/defaults/main.yml
index e3176e611..20413d563 100644
--- a/roles/os_firewall/defaults/main.yml
+++ b/roles/os_firewall/defaults/main.yml
@@ -1,3 +1,5 @@
---
os_firewall_enabled: True
os_firewall_use_firewalld: True
+os_firewall_allow: []
+os_firewall_deny: []
diff --git a/roles/os_firewall/tasks/firewall/firewalld.yml b/roles/os_firewall/tasks/firewall/firewalld.yml
index ac4600f83..241fa8823 100644
--- a/roles/os_firewall/tasks/firewall/firewalld.yml
+++ b/roles/os_firewall/tasks/firewall/firewalld.yml
@@ -52,29 +52,25 @@
port: "{{ item.port }}"
permanent: false
state: enabled
- with_items: os_firewall_allow
- when: os_firewall_allow is defined
+ with_items: "{{ os_firewall_allow }}"
- name: Persist firewalld allow rules
firewalld:
port: "{{ item.port }}"
permanent: true
state: enabled
- with_items: os_firewall_allow
- when: os_firewall_allow is defined
+ with_items: "{{ os_firewall_allow }}"
- name: Remove firewalld allow rules
firewalld:
port: "{{ item.port }}"
permanent: false
state: disabled
- with_items: os_firewall_deny
- when: os_firewall_deny is defined
+ with_items: "{{ os_firewall_deny }}"
- name: Persist removal of firewalld allow rules
firewalld:
port: "{{ item.port }}"
permanent: true
state: disabled
- with_items: os_firewall_deny
- when: os_firewall_deny is defined
+ with_items: "{{ os_firewall_deny }}"
diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml
index 3b584f8eb..070fe6a3a 100644
--- a/roles/os_firewall/tasks/firewall/iptables.yml
+++ b/roles/os_firewall/tasks/firewall/iptables.yml
@@ -49,8 +49,7 @@
action: add
protocol: "{{ item.port.split('/')[1] }}"
port: "{{ item.port.split('/')[0] }}"
- with_items: os_firewall_allow
- when: os_firewall_allow is defined
+ with_items: "{{ os_firewall_allow }}"
- name: Remove iptables rules
os_firewall_manage_iptables:
@@ -58,5 +57,4 @@
action: remove
protocol: "{{ item.port.split('/')[1] }}"
port: "{{ item.port.split('/')[0] }}"
- with_items: os_firewall_deny
- when: os_firewall_deny is defined
+ with_items: "{{ os_firewall_deny }}"
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index aaf3b7972..eacf340aa 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -315,7 +315,7 @@ Note: Containerized storage hosts are not currently supported.
hostname_or_ip = click.prompt('Enter hostname or IP address',
value_proc=validate_prompt_hostname,
- default=first_master)
+ default=first_master.connect_to)
existing, existing_host = is_host_already_node_or_master(hostname_or_ip, hosts)
if existing and existing_host.node:
existing_host.storage = True
@@ -520,6 +520,34 @@ def error_if_missing_info(oo_cfg):
if missing_info:
sys.exit(1)
+def get_proxy_hostnames_and_excludes():
+ message = """
+If a proxy is needed to reach HTTP and HTTPS traffic please enter the name below.
+This proxy will be configured by default for all processes needing to reach systems outside
+the cluster.
+
+More advanced configuration is possible if using ansible directly:
+
+https://docs.openshift.com/enterprise/latest/install_config/http_proxies.html
+"""
+ click.echo(message)
+
+ message = "Specify your http proxy ? (ENTER for none)"
+ http_proxy_hostname = click.prompt(message, default='')
+
+ message = "Specify your https proxy ? (ENTER for none)"
+ https_proxy_hostname = click.prompt(message, default=http_proxy_hostname)
+
+ if http_proxy_hostname or https_proxy_hostname:
+ message = """
+All hosts in your openshift inventory will automatically be added to the NO_PROXY value.
+Please provide any additional hosts to be added to NO_PROXY. (ENTER for none)
+"""
+ proxy_excludes = click.prompt(message, default='')
+ else:
+ proxy_excludes = ''
+
+ return http_proxy_hostname, https_proxy_hostname, proxy_excludes
def get_missing_info_from_user(oo_cfg):
""" Prompts the user for any information missing from the given configuration. """
@@ -566,6 +594,13 @@ https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.h
oo_cfg.settings['master_routingconfig_subdomain'] = get_master_routingconfig_subdomain()
click.clear()
+ if not oo_cfg.settings.get('openshift_http_proxy', None):
+ http_proxy, https_proxy, proxy_excludes = get_proxy_hostnames_and_excludes()
+ oo_cfg.settings['openshift_http_proxy'] = http_proxy
+ oo_cfg.settings['openshift_https_proxy'] = https_proxy
+ oo_cfg.settings['openshift_no_proxy'] = proxy_excludes
+ click.clear()
+
return oo_cfg
diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py
index 0f1f5caf7..24dfbe013 100644
--- a/utils/src/ooinstall/oo_config.py
+++ b/utils/src/ooinstall/oo_config.py
@@ -11,6 +11,8 @@ PERSIST_SETTINGS = [
'ansible_config',
'ansible_log_path',
'master_routingconfig_subdomain',
+ 'proxy',
+ 'proxy_exclude_hosts',
'variant',
'variant_version',
'version',
diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py
index cf2ca51ca..5ace63918 100644
--- a/utils/src/ooinstall/openshift_ansible.py
+++ b/utils/src/ooinstall/openshift_ansible.py
@@ -121,7 +121,7 @@ def write_inventory_vars(base_inventory, multiple_masters, proxy):
base_inventory.write('\n[OSEv3:vars]\n')
base_inventory.write('ansible_ssh_user={}\n'.format(CFG.settings['ansible_ssh_user']))
if CFG.settings['ansible_ssh_user'] != 'root':
- base_inventory.write('ansible_become=true\n')
+ base_inventory.write('ansible_become=yes\n')
if multiple_masters and proxy is not None:
base_inventory.write('openshift_master_cluster_method=native\n')
base_inventory.write("openshift_master_cluster_hostname={}\n".format(proxy.hostname))
@@ -129,7 +129,8 @@ def write_inventory_vars(base_inventory, multiple_masters, proxy):
"openshift_master_cluster_public_hostname={}\n".format(proxy.public_hostname))
if CFG.settings.get('master_routingconfig_subdomain', False):
base_inventory.write(
- "openshift_master_default_subdomain={}\n".format(CFG.settings['master_routingconfig_subdomain']))
+ "openshift_master_default_subdomain={}\n".format(
+ CFG.settings['master_routingconfig_subdomain']))
if CFG.settings.get('variant_version', None) == '3.1':
#base_inventory.write('openshift_image_tag=v{}\n'.format(CFG.settings.get('variant_version')))
base_inventory.write('openshift_image_tag=v{}\n'.format('3.1.1.6'))
@@ -169,7 +170,7 @@ def write_host(host, inventory, schedulable=None):
if no_pwd_sudo == 1:
print 'The atomic-openshift-installer requires sudo access without a password.'
sys.exit(1)
- facts += ' ansible_become=true'
+ facts += ' ansible_become=yes'
inventory.write('{} {}\n'.format(host.connect_to, facts))
diff --git a/utils/test/fixture.py b/utils/test/fixture.py
index 1657d8f46..e01eaebaf 100644
--- a/utils/test/fixture.py
+++ b/utils/test/fixture.py
@@ -201,6 +201,9 @@ def build_input(ssh_user=None, hosts=None, variant_num=None,
inputs.append(storage)
inputs.append('subdomain.example.com')
+ inputs.append('proxy.example.com')
+ inputs.append('proxy-private.example.com')
+ inputs.append('exclude.example.com')
# TODO: support option 2, fresh install
if add_nodes: