summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--bin/README_SHELL_COMPLETION2
-rw-r--r--bin/openshift_ansible.conf.example2
-rw-r--r--bin/openshift_ansible/awsutil.py11
-rwxr-xr-xbin/ossh_bash_completion20
-rw-r--r--bin/ossh_zsh_completion10
-rw-r--r--bin/zsh_functions/_ossh4
-rw-r--r--filter_plugins/oo_filters.py67
-rw-r--r--inventory/byo/hosts.example5
-rw-r--r--inventory/multi_ec2.yaml.example32
-rwxr-xr-xinventory/multi_inventory.py (renamed from inventory/multi_ec2.py)144
-rw-r--r--inventory/multi_inventory.yaml.example51
-rw-r--r--openshift-ansible.spec51
-rw-r--r--playbooks/common/openshift-master/config.yml9
-rw-r--r--roles/openshift_ansible_inventory/tasks/main.yml16
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py14
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j216
-rw-r--r--roles/os_zabbix/vars/template_os_linux.yml8
-rw-r--r--test/units/README.md2
-rwxr-xr-xtest/units/multi_inventory_test.py114
-rwxr-xr-xtest/units/mutli_ec2_test.py95
-rw-r--r--utils/src/ooinstall/cli_installer.py2
22 files changed, 455 insertions, 222 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 92f545b25..6046a1a86 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.0.6-1 ./
+3.0.7-1 ./
diff --git a/bin/README_SHELL_COMPLETION b/bin/README_SHELL_COMPLETION
index 5f05df7fc..49bba3acc 100644
--- a/bin/README_SHELL_COMPLETION
+++ b/bin/README_SHELL_COMPLETION
@@ -14,7 +14,7 @@ will populate the cache file and the completions should
become available.
This script will look at the cached version of your
-multi_ec2 results in ~/.ansible/tmp/multi_ec2_inventory.cache.
+multi_inventory results in ~/.ansible/tmp/multi_inventory.cache.
It will then parse a few {host}.{env} out of the json
and return them to be completable.
diff --git a/bin/openshift_ansible.conf.example b/bin/openshift_ansible.conf.example
index e891b855a..8786dfc13 100644
--- a/bin/openshift_ansible.conf.example
+++ b/bin/openshift_ansible.conf.example
@@ -1,5 +1,5 @@
#[main]
-#inventory = /usr/share/ansible/inventory/multi_ec2.py
+#inventory = /usr/share/ansible/inventory/multi_inventory.py
#[host_type_aliases]
#host-type-one = aliasa,aliasb
diff --git a/bin/openshift_ansible/awsutil.py b/bin/openshift_ansible/awsutil.py
index 9df034f57..45345007c 100644
--- a/bin/openshift_ansible/awsutil.py
+++ b/bin/openshift_ansible/awsutil.py
@@ -4,7 +4,10 @@
import os
import re
-from openshift_ansible import multi_ec2
+
+# Buildbot does not have multi_inventory installed
+#pylint: disable=no-name-in-module
+from openshift_ansible import multi_inventory
class ArgumentError(Exception):
"""This class is raised when improper arguments are passed."""
@@ -49,9 +52,9 @@ class AwsUtil(object):
Keyword arguments:
args -- optional arguments to pass to the inventory script
"""
- mec2 = multi_ec2.MultiEc2(args)
- mec2.run()
- return mec2.result
+ minv = multi_inventory.MultiInventory(args)
+ minv.run()
+ return minv.result
def get_environments(self):
"""Searches for env tags in the inventory and returns all of the envs found."""
diff --git a/bin/ossh_bash_completion b/bin/ossh_bash_completion
index 5072161f0..997ff0f9c 100755
--- a/bin/ossh_bash_completion
+++ b/bin/ossh_bash_completion
@@ -1,12 +1,12 @@
__ossh_known_hosts(){
if python -c 'import openshift_ansible' &>/dev/null; then
- /usr/bin/python -c 'from openshift_ansible import multi_ec2; m=multi_ec2.MultiEc2(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+ /usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
- elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+ elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
+ /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
- elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+ elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
+ /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
fi
}
@@ -26,13 +26,13 @@ complete -F _ossh ossh oscp
__opssh_known_hosts(){
if python -c 'import openshift_ansible' &>/dev/null; then
- /usr/bin/python -c 'from openshift_ansible.multi_ec2 import MultiEc2; m=MultiEc2(); m.run(); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in m.result["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+ /usr/bin/python -c 'from openshift_ansible.multi_inventory import MultiInventory; m=MultiInventory(); m.run(); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in m.result["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
- elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+ elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
+ /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
- elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+ elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
+ /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
fi
}
diff --git a/bin/ossh_zsh_completion b/bin/ossh_zsh_completion
index 44500c618..3c4018636 100644
--- a/bin/ossh_zsh_completion
+++ b/bin/ossh_zsh_completion
@@ -2,13 +2,13 @@
_ossh_known_hosts(){
if python -c 'import openshift_ansible' &>/dev/null; then
- print $(/usr/bin/python -c 'from openshift_ansible import multi_ec2; m=multi_ec2.MultiEc2(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
+ print $(/usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
- elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
+ elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
+ print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
- elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
+ elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
+ print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
fi
diff --git a/bin/zsh_functions/_ossh b/bin/zsh_functions/_ossh
index 7c6cb7b0b..d205e1055 100644
--- a/bin/zsh_functions/_ossh
+++ b/bin/zsh_functions/_ossh
@@ -1,8 +1,8 @@
#compdef ossh oscp
_ossh_known_hosts(){
- if [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items()])')
+ if [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
+ print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items()])')
fi
}
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index a57b0f895..dfd9a111e 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -7,6 +7,8 @@ Custom filters for use in openshift-ansible
from ansible import errors
from operator import itemgetter
+import OpenSSL.crypto
+import os.path
import pdb
import re
import json
@@ -327,6 +329,68 @@ class FilterModule(object):
return revamped_outputs
+ @staticmethod
+ # pylint: disable=too-many-branches
+ def oo_parse_certificate_names(certificates, data_dir, internal_hostnames):
+ ''' Parses names from list of certificate hashes.
+
+ Ex: certificates = [{ "certfile": "/etc/origin/master/custom1.crt",
+ "keyfile": "/etc/origin/master/custom1.key" },
+ { "certfile": "custom2.crt",
+ "keyfile": "custom2.key" }]
+
+ returns [{ "certfile": "/etc/origin/master/custom1.crt",
+ "keyfile": "/etc/origin/master/custom1.key",
+ "names": [ "public-master-host.com",
+ "other-master-host.com" ] },
+ { "certfile": "/etc/origin/master/custom2.crt",
+ "keyfile": "/etc/origin/master/custom2.key",
+ "names": [ "some-hostname.com" ] }]
+ '''
+ if not issubclass(type(certificates), list):
+ raise errors.AnsibleFilterError("|failed expects certificates is a list")
+
+ if not issubclass(type(data_dir), unicode):
+ raise errors.AnsibleFilterError("|failed expects data_dir is unicode")
+
+ if not issubclass(type(internal_hostnames), list):
+ raise errors.AnsibleFilterError("|failed expects internal_hostnames is list")
+
+ for certificate in certificates:
+ if 'names' in certificate.keys():
+ continue
+ else:
+ certificate['names'] = []
+
+ if not os.path.isfile(certificate['certfile']) and not os.path.isfile(certificate['keyfile']):
+ # Unable to find cert/key, try to prepend data_dir to paths
+ certificate['certfile'] = os.path.join(data_dir, certificate['certfile'])
+ certificate['keyfile'] = os.path.join(data_dir, certificate['keyfile'])
+ if not os.path.isfile(certificate['certfile']) and not os.path.isfile(certificate['keyfile']):
+ # Unable to find cert/key in data_dir
+ raise errors.AnsibleFilterError("|certificate and/or key does not exist '%s', '%s'" %
+ (certificate['certfile'], certificate['keyfile']))
+
+ try:
+ st_cert = open(certificate['certfile'], 'rt').read()
+ cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, st_cert)
+ certificate['names'].append(str(cert.get_subject().commonName.decode()))
+ for i in range(cert.get_extension_count()):
+ if cert.get_extension(i).get_short_name() == 'subjectAltName':
+ for name in str(cert.get_extension(i)).replace('DNS:', '').split(', '):
+ certificate['names'].append(name)
+ except:
+ raise errors.AnsibleFilterError(("|failed to parse certificate '%s', " % certificate['certfile'] +
+ "please specify certificate names in host inventory"))
+
+ certificate['names'] = [name for name in certificate['names'] if name not in internal_hostnames]
+ certificate['names'] = list(set(certificate['names']))
+ if not certificate['names']:
+ raise errors.AnsibleFilterError(("|failed to parse certificate '%s' or " % certificate['certfile'] +
+ "detected a collision with internal hostname, please specify " +
+ "certificate names in host inventory"))
+ return certificates
+
def filters(self):
''' returns a mapping of filters to methods '''
return {
@@ -342,5 +406,6 @@ class FilterModule(object):
"oo_combine_dict": self.oo_combine_dict,
"oo_split": self.oo_split,
"oo_filter_list": self.oo_filter_list,
- "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs
+ "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs,
+ "oo_parse_certificate_names": self.oo_parse_certificate_names
}
diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example
index ad19fe116..f60918e6d 100644
--- a/inventory/byo/hosts.example
+++ b/inventory/byo/hosts.example
@@ -99,6 +99,11 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# set RPM version for debugging purposes
#openshift_pkg_version=-3.0.0.0
+# Configure custom master certificates
+#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key"}]
+# Detected names may be overridden by specifying the "names" key
+#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"]}]
+
# host group for masters
[masters]
ose3-master[1:3]-ansible.test.example.com
diff --git a/inventory/multi_ec2.yaml.example b/inventory/multi_ec2.yaml.example
deleted file mode 100644
index bbd81ad20..000000000
--- a/inventory/multi_ec2.yaml.example
+++ /dev/null
@@ -1,32 +0,0 @@
-# multi ec2 inventory configs
-#
-cache_location: ~/.ansible/tmp/multi_ec2_inventory.cache
-
-accounts:
- - name: aws1
- provider: aws/hosts/ec2.py
- provider_config:
- ec2:
- regions: all
- regions_exclude: us-gov-west-1,cn-north-1
- destination_variable: public_dns_name
- route53: False
- cache_path: ~/.ansible/tmp
- cache_max_age: 300
- vpc_destination_variable: ip_address
- env_vars:
- AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
- AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
- all_group: ec2
- extra_vars:
- cloud: aws
- account: aws1
-
-- name: aws2
- provider: aws/hosts/ec2.py
- env_vars:
- AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
- AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
- EC2_INI_PATH: /etc/ansible/ec2.ini
-
-cache_max_age: 60
diff --git a/inventory/multi_ec2.py b/inventory/multi_inventory.py
index 98dde3f3c..354a8c10c 100755
--- a/inventory/multi_ec2.py
+++ b/inventory/multi_inventory.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python2
'''
- Fetch and combine multiple ec2 account settings into a single
+ Fetch and combine multiple inventory account settings into a single
json hash.
'''
# vim: expandtab:tabstop=4:shiftwidth=4
@@ -15,13 +15,19 @@ import errno
import fcntl
import tempfile
import copy
+from string import Template
+import shutil
-CONFIG_FILE_NAME = 'multi_ec2.yaml'
-DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_ec2_inventory.cache')
+CONFIG_FILE_NAME = 'multi_inventory.yaml'
+DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_inventory.cache')
-class MultiEc2(object):
+class MultiInventoryException(Exception):
+ '''Exceptions for MultiInventory class'''
+ pass
+
+class MultiInventory(object):
'''
- MultiEc2 class:
+ MultiInventory class:
Opens a yaml config file and reads aws credentials.
Stores a json hash of resources in result.
'''
@@ -35,7 +41,7 @@ class MultiEc2(object):
self.cache_path = DEFAULT_CACHE_PATH
self.config = None
- self.all_ec2_results = {}
+ self.all_inventory_results = {}
self.result = {}
self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
@@ -56,7 +62,7 @@ class MultiEc2(object):
cache is valid for the inventory.
if the cache is valid; return cache
- else the credentials are loaded from multi_ec2.yaml or from the env
+ else the credentials are loaded from multi_inventory.yaml or from the env
and we attempt to get the inventory from the provider specified.
'''
# load yaml
@@ -111,6 +117,10 @@ class MultiEc2(object):
with open(conf_file) as conf:
config = yaml.safe_load(conf)
+ # Provide a check for unique account names
+ if len(set([acc['name'] for acc in config['accounts']])) != len(config['accounts']):
+ raise MultiInventoryException('Duplicate account names in config file')
+
return config
def get_provider_tags(self, provider, env=None):
@@ -136,23 +146,25 @@ class MultiEc2(object):
else:
cmds.append('--list')
- cmds.append('--refresh-cache')
+ if 'aws' in provider.lower():
+ cmds.append('--refresh-cache')
return subprocess.Popen(cmds, stderr=subprocess.PIPE, \
stdout=subprocess.PIPE, env=env)
@staticmethod
- def generate_config(config_data):
- """Generate the ec2.ini file in as a secure temp file.
- Once generated, pass it to the ec2.py as an environment variable.
+ def generate_config(provider_files):
+ """Generate the provider_files in a temporary directory.
"""
- fildes, tmp_file_path = tempfile.mkstemp(prefix='multi_ec2.ini.')
- for section, values in config_data.items():
- os.write(fildes, "[%s]\n" % section)
- for option, value in values.items():
- os.write(fildes, "%s = %s\n" % (option, value))
- os.close(fildes)
- return tmp_file_path
+ prefix = 'multi_inventory.'
+ tmp_dir_path = tempfile.mkdtemp(prefix=prefix)
+ for provider_file in provider_files:
+ filedes = open(os.path.join(tmp_dir_path, provider_file['name']), 'w+')
+ content = Template(provider_file['contents']).substitute(tmpdir=tmp_dir_path)
+ filedes.write(content)
+ filedes.close()
+
+ return tmp_dir_path
def run_provider(self):
'''Setup the provider call with proper variables
@@ -160,13 +172,21 @@ class MultiEc2(object):
'''
try:
all_results = []
- tmp_file_paths = []
+ tmp_dir_paths = []
processes = {}
for account in self.config['accounts']:
- env = account['env_vars']
- if account.has_key('provider_config'):
- tmp_file_paths.append(MultiEc2.generate_config(account['provider_config']))
- env['EC2_INI_PATH'] = tmp_file_paths[-1]
+ tmp_dir = None
+ if account.has_key('provider_files'):
+ tmp_dir = MultiInventory.generate_config(account['provider_files'])
+ tmp_dir_paths.append(tmp_dir)
+
+ # Update env vars after creating provider_config_files
+ # so that we can grab the tmp_dir if it exists
+ env = account.get('env_vars', {})
+ if env and tmp_dir:
+ for key, value in env.items():
+ env[key] = Template(value).substitute(tmpdir=tmp_dir)
+
name = account['name']
provider = account['provider']
processes[name] = self.get_provider_tags(provider, env)
@@ -182,9 +202,9 @@ class MultiEc2(object):
})
finally:
- # Clean up the mkstemp file
- for tmp_file in tmp_file_paths:
- os.unlink(tmp_file)
+ # Clean up the mkdtemp dirs
+ for tmp_dir in tmp_dir_paths:
+ shutil.rmtree(tmp_dir)
return all_results
@@ -223,7 +243,7 @@ class MultiEc2(object):
]
raise RuntimeError('\n'.join(err_msg).format(**result))
else:
- self.all_ec2_results[result['name']] = json.loads(result['out'])
+ self.all_inventory_results[result['name']] = json.loads(result['out'])
# Check if user wants extra vars in yaml by
# having hostvars and all_group defined
@@ -231,29 +251,52 @@ class MultiEc2(object):
self.apply_account_config(acc_config)
# Build results by merging all dictionaries
- values = self.all_ec2_results.values()
+ values = self.all_inventory_results.values()
values.insert(0, self.result)
for result in values:
- MultiEc2.merge_destructively(self.result, result)
+ MultiInventory.merge_destructively(self.result, result)
+
+ def add_entry(self, data, keys, item):
+ ''' Add an item to a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ keys = a.b
+ item = c
+ '''
+ if "." in keys:
+ key, rest = keys.split(".", 1)
+ if key not in data:
+ data[key] = {}
+ self.add_entry(data[key], rest, item)
+ else:
+ data[keys] = item
+
+ def get_entry(self, data, keys):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ keys = a.b
+ return c
+ '''
+ if keys and "." in keys:
+ key, rest = keys.split(".", 1)
+ return self.get_entry(data[key], rest)
+ else:
+ return data.get(keys, None)
def apply_account_config(self, acc_config):
''' Apply account config settings
'''
- results = self.all_ec2_results[acc_config['name']]
+ results = self.all_inventory_results[acc_config['name']]
+ results['all_hosts'] = results['_meta']['hostvars'].keys()
# Update each hostvar with the newly desired key: value from extra_*
- for _extra in ['extra_groups', 'extra_vars']:
+ for _extra in ['extra_vars', 'extra_groups']:
for new_var, value in acc_config.get(_extra, {}).items():
- # Verify the account results look sane
- # by checking for these keys ('_meta' and 'hostvars' exist)
- if results.has_key('_meta') and results['_meta'].has_key('hostvars'):
- for data in results['_meta']['hostvars'].values():
- data[str(new_var)] = str(value)
+ for data in results['_meta']['hostvars'].values():
+ self.add_entry(data, new_var, value)
# Add this group
- if _extra == 'extra_groups' and results.has_key(acc_config['all_group']):
- results["%s_%s" % (new_var, value)] = \
- copy.copy(results[acc_config['all_group']])
+ if _extra == 'extra_groups':
+ results["%s_%s" % (new_var, value)] = copy.copy(results['all_hosts'])
# Clone groups goes here
for to_name, from_name in acc_config.get('clone_groups', {}).items():
@@ -262,14 +305,11 @@ class MultiEc2(object):
# Clone vars goes here
for to_name, from_name in acc_config.get('clone_vars', {}).items():
- # Verify the account results look sane
- # by checking for these keys ('_meta' and 'hostvars' exist)
- if results.has_key('_meta') and results['_meta'].has_key('hostvars'):
- for data in results['_meta']['hostvars'].values():
- data[str(to_name)] = data.get(str(from_name), 'nil')
+ for data in results['_meta']['hostvars'].values():
+ self.add_entry(data, to_name, self.get_entry(data, from_name))
- # store the results back into all_ec2_results
- self.all_ec2_results[acc_config['name']] = results
+ # store the results back into all_inventory_results
+ self.all_inventory_results[acc_config['name']] = results
@staticmethod
def merge_destructively(input_a, input_b):
@@ -277,7 +317,7 @@ class MultiEc2(object):
for key in input_b:
if key in input_a:
if isinstance(input_a[key], dict) and isinstance(input_b[key], dict):
- MultiEc2.merge_destructively(input_a[key], input_b[key])
+ MultiInventory.merge_destructively(input_a[key], input_b[key])
elif input_a[key] == input_b[key]:
pass # same leaf value
# both lists so add each element in b to a if it does ! exist
@@ -333,7 +373,7 @@ class MultiEc2(object):
if exc.errno != errno.EEXIST or not os.path.isdir(path):
raise
- json_data = MultiEc2.json_format_dict(self.result, True)
+ json_data = MultiInventory.json_format_dict(self.result, True)
with open(self.cache_path, 'w') as cache:
try:
fcntl.flock(cache, fcntl.LOCK_EX)
@@ -369,7 +409,7 @@ class MultiEc2(object):
if __name__ == "__main__":
- MEC2 = MultiEc2()
- MEC2.parse_cli_args()
- MEC2.run()
- print MEC2.result_str()
+ MI2 = MultiInventory()
+ MI2.parse_cli_args()
+ MI2.run()
+ print MI2.result_str()
diff --git a/inventory/multi_inventory.yaml.example b/inventory/multi_inventory.yaml.example
new file mode 100644
index 000000000..0f0788d18
--- /dev/null
+++ b/inventory/multi_inventory.yaml.example
@@ -0,0 +1,51 @@
+# multi ec2 inventory configs
+#
+cache_location: ~/.ansible/tmp/multi_inventory.cache
+
+accounts:
+ - name: aws1
+ provider: aws/ec2.py
+ provider_files:
+ - name: ec2.ini
+ content: |-
+ [ec2]
+ regions = all
+ regions_exclude = us-gov-west-1,cn-north-1
+ destination_variable = public_dns_name
+ route53 = False
+ cache_path = ~/.ansible/tmp
+ cache_max_age = 300
+ vpc_destination_variable = ip_address
+ env_vars:
+ AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
+ AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ EC2_INI_PATH: ${tmpdir}/ec2.ini # we replace ${tmpdir} with the temporary directory that we've created for the provider.
+ extra_vars:
+ cloud: aws
+ account: aws1
+
+- name: mygce
+ extra_vars:
+ cloud: gce
+ account: gce1
+ env_vars:
+ GCE_INI_PATH: ${tmpdir}/gce.ini # we replace ${tmpdir} with the temporary directory that we've created for the provider.
+ provider: gce/gce.py
+ provider_files:
+ - name: priv_key.pem
+ contents: |-
+ -----BEGIN PRIVATE KEY-----
+ yourprivatekeydatahere
+ -----END PRIVATE KEY-----
+ - name: gce.ini
+ contents: |-
+ [gce]
+ gce_service_account_email_address = <uuid>@developer.gserviceaccount.com
+ gce_service_account_pem_file_path = ${tmpdir}/priv_key.pem # we replace ${tmpdir} with the temporary directory that we've created for the provider.
+ gce_project_id = gce-project
+ zone = us-central1-a
+ network = default
+ gce_machine_type = n1-standard-2
+ gce_machine_image = rhel7
+
+cache_max_age: 600
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index d034e6d84..f819624b3 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -5,7 +5,7 @@
}
Name: openshift-ansible
-Version: 3.0.6
+Version: 3.0.7
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -47,9 +47,9 @@ cp -pP bin/openshift_ansible/* %{buildroot}%{python_sitelib}/openshift_ansible
cp -p bin/ossh_bash_completion %{buildroot}/etc/bash_completion.d
cp -p bin/openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshift_ansible.conf
# Fix links
-rm -f %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py
+rm -f %{buildroot}%{python_sitelib}/openshift_ansible/multi_inventory.py
rm -f %{buildroot}%{python_sitelib}/openshift_ansible/aws
-ln -sf %{_datadir}/ansible/inventory/multi_ec2.py %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py
+ln -sf %{_datadir}/ansible/inventory/multi_inventory.py %{buildroot}%{python_sitelib}/openshift_ansible/multi_inventory.py
ln -sf %{_datadir}/ansible/inventory/aws %{buildroot}%{python_sitelib}/openshift_ansible/aws
# openshift-ansible-docs install
@@ -60,8 +60,8 @@ mkdir -p %{buildroot}/etc/ansible
mkdir -p %{buildroot}%{_datadir}/ansible/inventory
mkdir -p %{buildroot}%{_datadir}/ansible/inventory/aws
mkdir -p %{buildroot}%{_datadir}/ansible/inventory/gce
-cp -p inventory/multi_ec2.py %{buildroot}%{_datadir}/ansible/inventory
-cp -p inventory/multi_ec2.yaml.example %{buildroot}/etc/ansible/multi_ec2.yaml
+cp -p inventory/multi_inventory.py %{buildroot}%{_datadir}/ansible/inventory
+cp -p inventory/multi_inventory.yaml.example %{buildroot}/etc/ansible/multi_inventory.yaml
cp -p inventory/aws/hosts/ec2.py %{buildroot}%{_datadir}/ansible/inventory/aws
cp -p inventory/gce/hosts/gce.py %{buildroot}%{_datadir}/ansible/inventory/gce
@@ -137,7 +137,7 @@ Ansible Inventories used with the openshift-ansible scripts and playbooks.
%files inventory
%config(noreplace) /etc/ansible/*
%dir %{_datadir}/ansible/inventory
-%{_datadir}/ansible/inventory/multi_ec2.py*
+%{_datadir}/ansible/inventory/multi_inventory.py*
%package inventory-aws
Summary: Openshift and Atomic Enterprise Ansible Inventories for AWS
@@ -249,6 +249,45 @@ Atomic OpenShift Utilities includes
%changelog
+* Wed Nov 04 2015 Kenny Woodson <kwoodson@redhat.com> 3.0.7-1
+- added the %%util in zabbix (mwoodson@redhat.com)
+- atomic-openshift-installer: Correct default playbook directory
+ (smunilla@redhat.com)
+- Support for gce (kwoodson@redhat.com)
+- fixed a dumb naming mistake (mwoodson@redhat.com)
+- added disk tps checks to zabbix (mwoodson@redhat.com)
+- atomic-openshift-installer: Correct inaccurate prompt (smunilla@redhat.com)
+- atomic-openshift-installer: Add default openshift-ansible-playbook
+ (smunilla@redhat.com)
+- ooinstall: Add check for nopwd sudo (smunilla@redhat.com)
+- ooinstall: Update local install check (smunilla@redhat.com)
+- oo-install: Support running on the host to be deployed (smunilla@redhat.com)
+- Moving to Openshift Etcd application (mmahut@redhat.com)
+- Add all the possible servicenames to openshift_all_hostnames for masters
+ (sdodson@redhat.com)
+- Adding openshift.node.etcd items (mmahut@redhat.com)
+- Fix etcd cert generation when etcd_interface is defined (jdetiber@redhat.com)
+- get zabbix ready to start tracking status of pcp (jdiaz@redhat.com)
+- split inventory into subpackages (tdawson@redhat.com)
+- changed the cpu alert to only alert if cpu idle more than 5x. Change alert to
+ warning (mwoodson@redhat.com)
+- Rename install_transactions module to openshift_ansible.
+ (dgoodwin@redhat.com)
+- atomic-openshift-installer: Text improvements (smunilla@redhat.com)
+- Add utils subpackage missing dep on openshift-ansible-roles.
+ (dgoodwin@redhat.com)
+- Disable requiretty for only the openshift user (error@ioerror.us)
+- Don't require tty to run sudo (error@ioerror.us)
+- Attempt to remove the various interfaces left over from an install
+ (bleanhar@redhat.com)
+- Pulling latest gce.py module from ansible (kwoodson@redhat.com)
+- Disable OpenShift features if installing Atomic Enterprise
+ (jdetiber@redhat.com)
+- Use default playbooks if available. (dgoodwin@redhat.com)
+- Add uninstall subcommand. (dgoodwin@redhat.com)
+- Add subcommands to CLI. (dgoodwin@redhat.com)
+- Remove images options in oadm command (nakayamakenjiro@gmail.com)
+
* Fri Oct 30 2015 Kenny Woodson <kwoodson@redhat.com> 3.0.6-1
- Adding python-boto and python-libcloud to openshift-ansible-inventory
dependency (kwoodson@redhat.com)
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 1dec923fc..59c4b2370 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -199,9 +199,18 @@
validate_checksum: yes
with_items: masters_needing_certs
+- name: Inspect named certificates
+ hosts: oo_first_master
+ tasks:
+ - name: Collect certificate names
+ set_fact:
+ parsed_named_certificates: "{{ openshift_master_named_certificates | oo_parse_certificate_names(master_cert_config_dir, openshift.common.internal_hostnames) }}"
+ when: openshift_master_named_certificates is defined
+
- name: Configure master instances
hosts: oo_masters_to_config
vars:
+ named_certificates: "{{ hostvars[groups['oo_first_master'][0]]['parsed_named_certificates'] | default([])}}"
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
embedded_etcd: "{{ openshift.master.embedded_etcd }}"
diff --git a/roles/openshift_ansible_inventory/tasks/main.yml b/roles/openshift_ansible_inventory/tasks/main.yml
index 9cc15c0a8..f6919dada 100644
--- a/roles/openshift_ansible_inventory/tasks/main.yml
+++ b/roles/openshift_ansible_inventory/tasks/main.yml
@@ -1,12 +1,16 @@
---
- yum:
- name: openshift-ansible-inventory
+ name: "{{ item }}"
state: present
+ with_items:
+ - openshift-ansible-inventory
+ - openshift-ansible-inventory-aws
+ - openshift-ansible-inventory-gce
- name:
copy:
content: "{{ oo_inventory_accounts | to_nice_yaml }}"
- dest: /etc/ansible/multi_ec2.yaml
+ dest: /etc/ansible/multi_inventory.yaml
group: "{{ oo_inventory_group }}"
owner: "{{ oo_inventory_owner }}"
mode: "0640"
@@ -20,17 +24,17 @@
- file:
state: link
- src: /usr/share/ansible/inventory/multi_ec2.py
- dest: /etc/ansible/inventory/multi_ec2.py
+ src: /usr/share/ansible/inventory/multi_inventory.py
+ dest: /etc/ansible/inventory/multi_inventory.py
owner: root
group: libra_ops
# This cron uses the above location to call its job
- name: Cron to keep cache fresh
cron:
- name: 'multi_ec2_inventory'
+ name: 'multi_inventory'
minute: '*/10'
- job: '/usr/share/ansible/inventory/multi_ec2.py --refresh-cache &> /dev/null'
+ job: '/usr/share/ansible/inventory/multi_inventory.py --refresh-cache &> /dev/null'
when: oo_cron_refresh_cache is defined and oo_cron_refresh_cache
- name: Set cache location
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 163e67f62..28866bd48 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -484,12 +484,16 @@ def set_aggregate_facts(facts):
dict: the facts dict updated with aggregated facts
"""
all_hostnames = set()
+ internal_hostnames = set()
if 'common' in facts:
all_hostnames.add(facts['common']['hostname'])
all_hostnames.add(facts['common']['public_hostname'])
all_hostnames.add(facts['common']['ip'])
all_hostnames.add(facts['common']['public_ip'])
+ internal_hostnames.add(facts['common']['hostname'])
+ internal_hostnames.add(facts['common']['ip'])
+
if 'master' in facts:
# FIXME: not sure why but facts['dns']['domain'] fails
cluster_domain = 'cluster.local'
@@ -497,13 +501,17 @@ def set_aggregate_facts(facts):
all_hostnames.add(facts['master']['cluster_hostname'])
if 'cluster_public_hostname' in facts['master']:
all_hostnames.add(facts['master']['cluster_public_hostname'])
- all_hostnames.update(['openshift', 'openshift.default', 'openshift.default.svc',
- 'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default',
- 'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain])
+ svc_names = ['openshift', 'openshift.default', 'openshift.default.svc',
+ 'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default',
+ 'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]
+ all_hostnames.update(svc_names)
+ internal_hostnames.update(svc_names)
first_svc_ip = str(IPNetwork(facts['master']['portal_net'])[1])
all_hostnames.add(first_svc_ip)
+ internal_hostnames.add(first_svc_ip)
facts['common']['all_hostnames'] = list(all_hostnames)
+ facts['common']['internal_hostnames'] = list(all_hostnames)
return facts
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 73a0bc6cc..9547a6945 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -16,12 +16,15 @@ assetConfig:
maxRequestsInFlight: 0
requestTimeoutSeconds: 0
corsAllowedOrigins:
-{% for origin in ['127.0.0.1', 'localhost', openshift.common.hostname, openshift.common.ip, openshift.common.public_hostname, openshift.common.public_ip] %}
+{% for origin in ['127.0.0.1', 'localhost', openshift.common.hostname, openshift.common.ip, openshift.common.public_hostname, openshift.common.public_ip] | unique %}
- {{ origin }}
{% endfor %}
{% for custom_origin in openshift.master.custom_cors_origins | default("") %}
- {{ custom_origin }}
{% endfor %}
+{% for name in (named_certificates | map(attribute='names')) | list | oo_flatten %}
+ - {{ name }}
+{% endfor %}
{% if 'disabled_features' in openshift.master %}
disabledFeatures: {{ openshift.master.disabled_features | to_json }}
{% endif %}
@@ -133,3 +136,14 @@ servingInfo:
keyFile: master.server.key
maxRequestsInFlight: 500
requestTimeoutSeconds: 3600
+{% if named_certificates %}
+ namedCertificates:
+{% for named_certificate in named_certificates %}
+ - certFile: {{ named_certificate['certfile'] }}
+ keyFile: {{ named_certificate['keyfile'] }}
+ names:
+{% for name in named_certificate['names'] %}
+ - "{{ name }}"
+{% endfor %}
+{% endfor %}
+{% endif %}
diff --git a/roles/os_zabbix/vars/template_os_linux.yml b/roles/os_zabbix/vars/template_os_linux.yml
index d494f1bad..fbc20cd63 100644
--- a/roles/os_zabbix/vars/template_os_linux.yml
+++ b/roles/os_zabbix/vars/template_os_linux.yml
@@ -224,6 +224,14 @@ g_template_os_linux:
applications:
- Disk
+ - discoveryrule_key: disc.disk
+ name: "Percent Utilized for disk {#OSO_DISK}"
+ key: "disc.disk.putil[{#OSO_DISK}]"
+ value_type: float
+ description: "PCP disk.dev.avactive metric measured over a period of time. This is the '%util' in the iostat command"
+ applications:
+ - Disk
+
ztriggerprototypes:
- name: 'Filesystem: {#OSO_FILESYS} has less than 15% free disk space on {HOST.NAME}'
expression: '{Template OS Linux:disc.filesys.full[{#OSO_FILESYS}].last()}>85'
diff --git a/test/units/README.md b/test/units/README.md
index 3bed227eb..78a02c3ea 100644
--- a/test/units/README.md
+++ b/test/units/README.md
@@ -4,4 +4,4 @@ These should be run by sourcing the env-setup:
$ source test/env-setup
Then navigate to the test/units/ directory.
-$ python -m unittest multi_ec2_test
+$ python -m unittest multi_inventory_test
diff --git a/test/units/multi_inventory_test.py b/test/units/multi_inventory_test.py
new file mode 100755
index 000000000..168cd82b7
--- /dev/null
+++ b/test/units/multi_inventory_test.py
@@ -0,0 +1,114 @@
+#!/usr/bin/env python2
+'''
+ Unit tests for MultiInventory
+'''
+
+import unittest
+import multi_inventory
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name
+class MultiInventoryTest(unittest.TestCase):
+ '''
+ Test class for multiInventory
+ '''
+
+# def setUp(self):
+# '''setup method'''
+# pass
+
+ def test_merge_simple_1(self):
+ '''Testing a simple merge of 2 dictionaries'''
+ a = {"key1" : 1}
+ b = {"key1" : 2}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"key1": [1, 2]})
+
+ def test_merge_b_empty(self):
+ '''Testing a merge of an emtpy dictionary'''
+ a = {"key1" : 1}
+ b = {}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"key1": 1})
+
+ def test_merge_a_empty(self):
+ '''Testing a merge of an emtpy dictionary'''
+ b = {"key1" : 1}
+ a = {}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"key1": 1})
+
+ def test_merge_hash_array(self):
+ '''Testing a merge of a dictionary and a dictionary with an array'''
+ a = {"key1" : {"hasha": 1}}
+ b = {"key1" : [1, 2]}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"key1": [{"hasha": 1}, 1, 2]})
+
+ def test_merge_array_hash(self):
+ '''Testing a merge of a dictionary with an array and a dictionary with a hash'''
+ a = {"key1" : [1, 2]}
+ b = {"key1" : {"hasha": 1}}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"key1": [1, 2, {"hasha": 1}]})
+
+ def test_merge_keys_1(self):
+ '''Testing a merge on a dictionary for keys'''
+ a = {"key1" : [1, 2], "key2" : {"hasha": 2}}
+ b = {"key2" : {"hashb": 1}}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"key1": [1, 2], "key2": {"hasha": 2, "hashb": 1}})
+
+ def test_merge_recursive_1(self):
+ '''Testing a recursive merge'''
+ a = {"a" : {"b": {"c": 1}}}
+ b = {"a" : {"b": {"c": 2}}}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"a": {"b": {"c": [1, 2]}}})
+
+ def test_merge_recursive_array_item(self):
+ '''Testing a recursive merge for an array'''
+ a = {"a" : {"b": {"c": [1]}}}
+ b = {"a" : {"b": {"c": 2}}}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"a": {"b": {"c": [1, 2]}}})
+
+ def test_merge_recursive_hash_item(self):
+ '''Testing a recursive merge for a hash'''
+ a = {"a" : {"b": {"c": {"d": 1}}}}
+ b = {"a" : {"b": {"c": 2}}}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"a": {"b": {"c": [{"d": 1}, 2]}}})
+
+ def test_merge_recursive_array_hash(self):
+ '''Testing a recursive merge for an array and a hash'''
+ a = {"a" : [{"b": {"c": 1}}]}
+ b = {"a" : {"b": {"c": 1}}}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"a": [{"b": {"c": 1}}]})
+
+ def test_merge_recursive_hash_array(self):
+ '''Testing a recursive merge for an array and a hash'''
+ a = {"a" : {"b": {"c": 1}}}
+ b = {"a" : [{"b": {"c": 1}}]}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"a": [{"b": {"c": 1}}]})
+
+# def tearDown(self):
+# '''TearDown method'''
+# pass
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/test/units/mutli_ec2_test.py b/test/units/mutli_ec2_test.py
deleted file mode 100755
index 95df93cd2..000000000
--- a/test/units/mutli_ec2_test.py
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/usr/bin/env python2
-
-import unittest
-import sys
-import os
-import sys
-import multi_ec2
-
-class MultiEc2Test(unittest.TestCase):
-
- def setUp(self):
- pass
-
- def test_merge_simple_1(self):
- a = {"key1" : 1}
- b = {"key1" : 2}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"key1": [1,2]})
-
- def test_merge_b_empty(self):
- a = {"key1" : 1}
- b = {}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"key1": 1})
-
- def test_merge_a_empty(self):
- b = {"key1" : 1}
- a = {}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"key1": 1})
-
- def test_merge_hash_array(self):
- a = {"key1" : {"hasha": 1}}
- b = {"key1" : [1,2]}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"key1": [{"hasha": 1}, 1,2]})
-
- def test_merge_array_hash(self):
- a = {"key1" : [1,2]}
- b = {"key1" : {"hasha": 1}}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"key1": [1,2, {"hasha": 1}]})
-
- def test_merge_keys_1(self):
- a = {"key1" : [1,2], "key2" : {"hasha": 2}}
- b = {"key2" : {"hashb": 1}}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"key1": [1,2], "key2": {"hasha": 2, "hashb": 1}})
-
- def test_merge_recursive_1(self):
- a = {"a" : {"b": {"c": 1}}}
- b = {"a" : {"b": {"c": 2}}}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"a": {"b": {"c": [1,2]}}})
-
- def test_merge_recursive_array_item(self):
- a = {"a" : {"b": {"c": [1]}}}
- b = {"a" : {"b": {"c": 2}}}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"a": {"b": {"c": [1,2]}}})
-
- def test_merge_recursive_hash_item(self):
- a = {"a" : {"b": {"c": {"d": 1}}}}
- b = {"a" : {"b": {"c": 2}}}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"a": {"b": {"c": [{"d": 1}, 2]}}})
-
- def test_merge_recursive_array_hash(self):
- a = {"a" : [{"b": {"c": 1}}]}
- b = {"a" : {"b": {"c": 1}}}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"a": [{"b": {"c": 1}}]})
-
- def test_merge_recursive_hash_array(self):
- a = {"a" : {"b": {"c": 1}}}
- b = {"a" : [{"b": {"c": 1}}]}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"a": [{"b": {"c": 1}}]})
-
- def tearDown(self):
- pass
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index ffcfc5db2..d3ee8c51e 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -385,7 +385,7 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force):
dir_okay=True,
readable=True),
# callback=validate_ansible_dir,
- default='/usr/share/openshift-ansible/',
+ default='/usr/share/ansible/openshift-ansible/',
envvar='OO_ANSIBLE_PLAYBOOK_DIRECTORY')
@click.option('--ansible-config',
type=click.Path(file_okay=True,