diff options
26 files changed, 1014 insertions, 520 deletions
diff --git a/ansible.cfg b/ansible.cfg.example index 6a7722ad8..6a7722ad8 100644 --- a/ansible.cfg +++ b/ansible.cfg.example @@ -17,13 +17,10 @@ from openshift_ansible.awsutil import ArgumentError CONFIG_MAIN_SECTION = 'main' CONFIG_HOST_TYPE_ALIAS_SECTION = 'host_type_aliases' -CONFIG_INVENTORY_OPTION = 'inventory' - class Ohi(object): def __init__(self): - self.inventory = None self.host_type_aliases = {} self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__))) @@ -35,7 +32,7 @@ class Ohi(object): self.parse_cli_args() self.parse_config_file() - self.aws = awsutil.AwsUtil(self.inventory, self.host_type_aliases) + self.aws = awsutil.AwsUtil(self.host_type_aliases) def run(self): if self.args.list_host_types: @@ -47,12 +44,12 @@ class Ohi(object): self.args.env is not None: # Both env and host-type specified hosts = self.aws.get_host_list(host_type=self.args.host_type, \ - env=self.args.env) + envs=self.args.env) if self.args.host_type is None and \ self.args.env is not None: # Only env specified - hosts = self.aws.get_host_list(env=self.args.env) + hosts = self.aws.get_host_list(envs=self.args.env) if self.args.host_type is not None and \ self.args.env is None: @@ -76,10 +73,6 @@ class Ohi(object): config = ConfigParser.ConfigParser() config.read(self.config_path) - if config.has_section(CONFIG_MAIN_SECTION) and \ - config.has_option(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION): - self.inventory = config.get(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION) - self.host_type_aliases = {} if config.has_section(CONFIG_HOST_TYPE_ALIAS_SECTION): for alias in config.options(CONFIG_HOST_TYPE_ALIAS_SECTION): diff --git a/bin/openshift-ansible-bin.spec b/bin/openshift-ansible-bin.spec index 29aaff9ae..884d4eb0a 100644 --- a/bin/openshift-ansible-bin.spec +++ b/bin/openshift-ansible-bin.spec @@ -1,6 +1,6 @@ Summary: OpenShift Ansible Scripts for working with metadata hosts Name: openshift-ansible-bin -Version: 0.0.12 +Version: 0.0.17 Release: 1%{?dist} License: ASL 2.0 URL: https://github.com/openshift/openshift-ansible @@ -24,7 +24,13 @@ mkdir -p %{buildroot}/etc/bash_completion.d mkdir -p %{buildroot}/etc/openshift_ansible cp -p ossh oscp opssh opscp ohi %{buildroot}%{_bindir} -cp -p openshift_ansible/* %{buildroot}%{python_sitelib}/openshift_ansible +cp -pP openshift_ansible/* %{buildroot}%{python_sitelib}/openshift_ansible + +# Make it so we can load multi_ec2.py as a library. +rm %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py* +ln -sf /usr/share/ansible/inventory/multi_ec2.py %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py +ln -sf /usr/share/ansible/inventory/multi_ec2.pyc %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.pyc + cp -p ossh_bash_completion %{buildroot}/etc/bash_completion.d cp -p openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshift_ansible.conf @@ -36,6 +42,15 @@ cp -p openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshif %config(noreplace) /etc/openshift_ansible/ %changelog +* Fri May 15 2015 Thomas Wiest <twiest@redhat.com> 0.0.17-1 +- fixed the openshift-ansible-bin build (twiest@redhat.com) + +* Fri May 15 2015 Thomas Wiest <twiest@redhat.com> 0.0.14-1 +- Command line tools import multi_ec2 as lib (kwoodson@redhat.com) +- Adding cache location for multi ec2 (kwoodson@redhat.com) +* Thu May 07 2015 Thomas Wiest <twiest@redhat.com> 0.0.13-1 +- added '-e all' to ohi and fixed pylint errors. (twiest@redhat.com) + * Tue May 05 2015 Thomas Wiest <twiest@redhat.com> 0.0.12-1 - fixed opssh and opscp to allow just environment or just host-type. (twiest@redhat.com) diff --git a/bin/openshift_ansible/awsutil.py b/bin/openshift_ansible/awsutil.py index 65b269930..9df034f57 100644 --- a/bin/openshift_ansible/awsutil.py +++ b/bin/openshift_ansible/awsutil.py @@ -1,113 +1,120 @@ # vim: expandtab:tabstop=4:shiftwidth=4 -import subprocess +"""This module comprises Aws specific utility functions.""" + import os -import json import re +from openshift_ansible import multi_ec2 class ArgumentError(Exception): + """This class is raised when improper arguments are passed.""" + def __init__(self, message): + """Initialize an ArgumentError. + + Keyword arguments: + message -- the exact error message being raised + """ + super(ArgumentError, self).__init__() self.message = message class AwsUtil(object): - def __init__(self, inventory_path=None, host_type_aliases={}): - self.host_type_aliases = host_type_aliases - self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__))) + """This class contains the AWS utility functions.""" - if inventory_path is None: - inventory_path = os.path.realpath(os.path.join(self.file_path, \ - '..', '..', 'inventory', \ - 'multi_ec2.py')) + def __init__(self, host_type_aliases=None): + """Initialize the AWS utility class. - if not os.path.isfile(inventory_path): - raise Exception("Inventory file not found [%s]" % inventory_path) + Keyword arguments: + host_type_aliases -- a list of aliases to common host-types (e.g. ex-node) + """ + + host_type_aliases = host_type_aliases or {} + + self.host_type_aliases = host_type_aliases + self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__))) - self.inventory_path = inventory_path self.setup_host_type_alias_lookup() def setup_host_type_alias_lookup(self): + """Sets up the alias to host-type lookup table.""" self.alias_lookup = {} for key, values in self.host_type_aliases.iteritems(): for value in values: self.alias_lookup[value] = key + @staticmethod + def get_inventory(args=None): + """Calls the inventory script and returns a dictionary containing the inventory." - - def get_inventory(self,args=[]): - cmd = [self.inventory_path] - - if args: - cmd.extend(args) - - env = os.environ - - p = subprocess.Popen(cmd, stderr=subprocess.PIPE, - stdout=subprocess.PIPE, env=env) - - out,err = p.communicate() - - if p.returncode != 0: - raise RuntimeError(err) - - return json.loads(out.strip()) + Keyword arguments: + args -- optional arguments to pass to the inventory script + """ + mec2 = multi_ec2.MultiEc2(args) + mec2.run() + return mec2.result def get_environments(self): + """Searches for env tags in the inventory and returns all of the envs found.""" pattern = re.compile(r'^tag_environment_(.*)') envs = [] inv = self.get_inventory() for key in inv.keys(): - m = pattern.match(key) - if m: - envs.append(m.group(1)) + matched = pattern.match(key) + if matched: + envs.append(matched.group(1)) envs.sort() return envs def get_host_types(self): + """Searches for host-type tags in the inventory and returns all host-types found.""" pattern = re.compile(r'^tag_host-type_(.*)') host_types = [] inv = self.get_inventory() for key in inv.keys(): - m = pattern.match(key) - if m: - host_types.append(m.group(1)) + matched = pattern.match(key) + if matched: + host_types.append(matched.group(1)) host_types.sort() return host_types def get_security_groups(self): + """Searches for security_groups in the inventory and returns all SGs found.""" pattern = re.compile(r'^security_group_(.*)') groups = [] inv = self.get_inventory() for key in inv.keys(): - m = pattern.match(key) - if m: - groups.append(m.group(1)) + matched = pattern.match(key) + if matched: + groups.append(matched.group(1)) groups.sort() return groups - def build_host_dict_by_env(self, args=[]): + def build_host_dict_by_env(self, args=None): + """Searches the inventory for hosts in an env and returns their hostvars.""" + args = args or [] inv = self.get_inventory(args) inst_by_env = {} - for dns, host in inv['_meta']['hostvars'].items(): + for _, host in inv['_meta']['hostvars'].items(): # If you don't have an environment tag, we're going to ignore you if 'ec2_tag_environment' not in host: continue if host['ec2_tag_environment'] not in inst_by_env: inst_by_env[host['ec2_tag_environment']] = {} - host_id = "%s:%s" % (host['ec2_tag_Name'],host['ec2_id']) + host_id = "%s:%s" % (host['ec2_tag_Name'], host['ec2_id']) inst_by_env[host['ec2_tag_environment']][host_id] = host return inst_by_env - # Display host_types def print_host_types(self): + """Gets the list of host types and aliases and outputs them in columns.""" host_types = self.get_host_types() ht_format_str = "%35s" alias_format_str = "%-20s" @@ -117,22 +124,31 @@ class AwsUtil(object): print combined_format_str % ('Host Types', 'Aliases') print combined_format_str % ('----------', '-------') - for ht in host_types: + for host_type in host_types: aliases = [] - if ht in self.host_type_aliases: - aliases = self.host_type_aliases[ht] - print combined_format_str % (ht, ", ".join(aliases)) + if host_type in self.host_type_aliases: + aliases = self.host_type_aliases[host_type] + print combined_format_str % (host_type, ", ".join(aliases)) else: - print ht_format_str % ht + print ht_format_str % host_type print - # Convert host-type aliases to real a host-type def resolve_host_type(self, host_type): + """Converts a host-type alias into a host-type. + + Keyword arguments: + host_type -- The alias or host_type to look up. + + Example (depends on aliases defined in config file): + host_type = ex-node + returns: openshift-node + """ if self.alias_lookup.has_key(host_type): return self.alias_lookup[host_type] return host_type - def gen_env_tag(self, env): + @staticmethod + def gen_env_tag(env): """Generate the environment tag """ return "tag_environment_%s" % env @@ -149,28 +165,44 @@ class AwsUtil(object): host_type = self.resolve_host_type(host_type) return "tag_env-host-type_%s-%s" % (env, host_type) - def get_host_list(self, host_type=None, env=None): + def get_host_list(self, host_type=None, envs=None): """Get the list of hosts from the inventory using host-type and environment """ + envs = envs or [] inv = self.get_inventory() - if host_type is not None and \ - env is not None: - # Both host type and environment were specified - env_host_type_tag = self.gen_env_host_type_tag(host_type, env) - return inv[env_host_type_tag] + # We prefer to deal with a list of environments + if issubclass(type(envs), basestring): + if envs == 'all': + envs = self.get_environments() + else: + envs = [envs] - if host_type is None and \ - env is not None: + if host_type and envs: + # Both host type and environment were specified + retval = [] + for env in envs: + env_host_type_tag = self.gen_env_host_type_tag(host_type, env) + if env_host_type_tag in inv.keys(): + retval += inv[env_host_type_tag] + return set(retval) + + if envs and not host_type: # Just environment was specified - host_type_tag = self.gen_env_tag(env) - return inv[host_type_tag] - - if host_type is not None and \ - env is None: + retval = [] + for env in envs: + env_tag = AwsUtil.gen_env_tag(env) + if env_tag in inv.keys(): + retval += inv[env_tag] + return set(retval) + + if host_type and not envs: # Just host-type was specified + retval = [] host_type_tag = self.gen_host_type_tag(host_type) - return inv[host_type_tag] + if host_type_tag in inv.keys(): + retval = inv[host_type_tag] + return set(retval) # We should never reach here! raise ArgumentError("Invalid combination of parameters") diff --git a/bin/openshift_ansible/multi_ec2.py b/bin/openshift_ansible/multi_ec2.py new file mode 120000 index 000000000..660a0418e --- /dev/null +++ b/bin/openshift_ansible/multi_ec2.py @@ -0,0 +1 @@ +../../inventory/multi_ec2.py
\ No newline at end of file @@ -11,11 +11,9 @@ import ConfigParser from openshift_ansible import awsutil CONFIG_MAIN_SECTION = 'main' -CONFIG_INVENTORY_OPTION = 'inventory' class Oscp(object): def __init__(self): - self.inventory = None self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__))) # Default the config path to /etc @@ -29,13 +27,13 @@ class Oscp(object): # parse host and user self.process_host() - self.aws = awsutil.AwsUtil(self.inventory) + self.aws = awsutil.AwsUtil() # get a dict of host inventory - if self.args.list: - self.get_hosts() - else: + if self.args.refresh_cache: self.get_hosts(True) + else: + self.get_hosts() if (self.args.src == '' or self.args.dest == '') and not self.args.list: self.parser.print_help() @@ -56,10 +54,6 @@ class Oscp(object): config = ConfigParser.ConfigParser() config.read(self.config_path) - if config.has_section(CONFIG_MAIN_SECTION) and \ - config.has_option(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION): - self.inventory = config.get(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION) - def parse_cli_args(self): parser = argparse.ArgumentParser(description='Openshift Online SSH Tool.') parser.add_argument('-e', '--env', @@ -68,6 +62,8 @@ class Oscp(object): action="store_true", help="debug mode") parser.add_argument('-v', '--verbose', default=False, action="store_true", help="Verbose?") + parser.add_argument('--refresh-cache', default=False, + action="store_true", help="Force a refresh on the host cache.") parser.add_argument('--list', default=False, action="store_true", help="list out hosts") parser.add_argument('-r', '--recurse', action='store_true', default=False, @@ -119,14 +115,14 @@ class Oscp(object): else: self.env = None - def get_hosts(self, cache_only=False): + def get_hosts(self, refresh_cache=False): '''Query our host inventory and return a dict where the format equals: dict['environment'] = [{'servername' : {}}, ] ''' - if cache_only: - self.host_inventory = self.aws.build_host_dict_by_env(['--cache-only']) + if refresh_cache: + self.host_inventory = self.aws.build_host_dict_by_env(['--refresh-cache']) else: self.host_inventory = self.aws.build_host_dict_by_env() @@ -11,11 +11,9 @@ import ConfigParser from openshift_ansible import awsutil CONFIG_MAIN_SECTION = 'main' -CONFIG_INVENTORY_OPTION = 'inventory' class Ossh(object): def __init__(self): - self.inventory = None self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__))) # Default the config path to /etc @@ -26,13 +24,12 @@ class Ossh(object): self.parse_cli_args() self.parse_config_file() - self.aws = awsutil.AwsUtil(self.inventory) + self.aws = awsutil.AwsUtil() - # get a dict of host inventory - if self.args.list: - self.get_hosts() - else: + if self.args.refresh_cache: self.get_hosts(True) + else: + self.get_hosts() # parse host and user self.process_host() @@ -55,10 +52,6 @@ class Ossh(object): config = ConfigParser.ConfigParser() config.read(self.config_path) - if config.has_section(CONFIG_MAIN_SECTION) and \ - config.has_option(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION): - self.inventory = config.get(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION) - def parse_cli_args(self): parser = argparse.ArgumentParser(description='Openshift Online SSH Tool.') parser.add_argument('-e', '--env', action="store", @@ -67,6 +60,8 @@ class Ossh(object): action="store_true", help="debug mode") parser.add_argument('-v', '--verbose', default=False, action="store_true", help="Verbose?") + parser.add_argument('--refresh-cache', default=False, + action="store_true", help="Force a refresh on the host cache.") parser.add_argument('--list', default=False, action="store_true", help="list out hosts") parser.add_argument('-c', '--command', action='store', @@ -109,14 +104,14 @@ class Ossh(object): if self.args.login_name: self.user = self.args.login_name - def get_hosts(self, cache_only=False): + def get_hosts(self, refresh_cache=False): '''Query our host inventory and return a dict where the format equals: dict['servername'] = dns_name ''' - if cache_only: - self.host_inventory = self.aws.build_host_dict_by_env(['--cache-only']) + if refresh_cache: + self.host_inventory = self.aws.build_host_dict_by_env(['--refresh-cache']) else: self.host_inventory = self.aws.build_host_dict_by_env() diff --git a/inventory/byo/hosts b/inventory/byo/hosts index 98dbb4fd8..728eec8aa 100644 --- a/inventory/byo/hosts +++ b/inventory/byo/hosts @@ -20,7 +20,8 @@ deployment_type=enterprise openshift_registry_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version} # Pre-release additional repo -openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}] +#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}] +openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterpriseErrata/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}] # Origin copr repo #openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}] @@ -31,4 +32,5 @@ ose3-master-ansible.test.example.com # host group for nodes [nodes] -ose3-node[1:2]-ansible.test.example.com +ose3-master-ansible.test.example.com openshift_node_labels="{'region': 'infra', 'zone': 'default'}" +ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}" diff --git a/inventory/multi_ec2.py b/inventory/multi_ec2.py index b839a33ea..f8196aefd 100755 --- a/inventory/multi_ec2.py +++ b/inventory/multi_ec2.py @@ -11,9 +11,13 @@ import yaml import os import subprocess import json - +import errno +import fcntl +import tempfile +import copy CONFIG_FILE_NAME = 'multi_ec2.yaml' +DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_ec2_inventory.cache') class MultiEc2(object): ''' @@ -22,12 +26,17 @@ class MultiEc2(object): Stores a json hash of resources in result. ''' - def __init__(self): - self.args = None + def __init__(self, args=None): + # Allow args to be passed when called as a library + if not args: + self.args = {} + else: + self.args = args + + self.cache_path = DEFAULT_CACHE_PATH self.config = None self.all_ec2_results = {} self.result = {} - self.cache_path = os.path.expanduser('~/.ansible/tmp/multi_ec2_inventory.cache') self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__))) same_dir_config_file = os.path.join(self.file_path, CONFIG_FILE_NAME) @@ -41,17 +50,26 @@ class MultiEc2(object): else: self.config_file = None # expect env vars - self.parse_cli_args() + def run(self): + '''This method checks to see if the local + cache is valid for the inventory. + + if the cache is valid; return cache + else the credentials are loaded from multi_ec2.yaml or from the env + and we attempt to get the inventory from the provider specified. + ''' # load yaml if self.config_file and os.path.isfile(self.config_file): self.config = self.load_yaml_config() elif os.environ.has_key("AWS_ACCESS_KEY_ID") and \ os.environ.has_key("AWS_SECRET_ACCESS_KEY"): + # Build a default config self.config = {} self.config['accounts'] = [ { 'name': 'default', + 'cache_location': DEFAULT_CACHE_PATH, 'provider': 'aws/hosts/ec2.py', 'env_vars': { 'AWS_ACCESS_KEY_ID': os.environ["AWS_ACCESS_KEY_ID"], @@ -64,11 +82,15 @@ class MultiEc2(object): else: raise RuntimeError("Could not find valid ec2 credentials in the environment.") - if self.args.refresh_cache: + # Set the default cache path but if its defined we'll assign it. + if self.config.has_key('cache_location'): + self.cache_path = self.config['cache_location'] + + if self.args.get('refresh_cache', None): self.get_inventory() self.write_to_cache() # if its a host query, fetch and do not cache - elif self.args.host: + elif self.args.get('host', None): self.get_inventory() elif not self.is_cache_valid(): # go fetch the inventories and cache them if cache is expired @@ -109,9 +131,9 @@ class MultiEc2(object): "and that it is executable. (%s)" % provider) cmds = [provider] - if self.args.host: + if self.args.get('host', None): cmds.append("--host") - cmds.append(self.args.host) + cmds.append(self.args.get('host', None)) else: cmds.append('--list') @@ -119,6 +141,54 @@ class MultiEc2(object): return subprocess.Popen(cmds, stderr=subprocess.PIPE, \ stdout=subprocess.PIPE, env=env) + + @staticmethod + def generate_config(config_data): + """Generate the ec2.ini file in as a secure temp file. + Once generated, pass it to the ec2.py as an environment variable. + """ + fildes, tmp_file_path = tempfile.mkstemp(prefix='multi_ec2.ini.') + for section, values in config_data.items(): + os.write(fildes, "[%s]\n" % section) + for option, value in values.items(): + os.write(fildes, "%s = %s\n" % (option, value)) + os.close(fildes) + return tmp_file_path + + def run_provider(self): + '''Setup the provider call with proper variables + and call self.get_provider_tags. + ''' + try: + all_results = [] + tmp_file_paths = [] + processes = {} + for account in self.config['accounts']: + env = account['env_vars'] + if account.has_key('provider_config'): + tmp_file_paths.append(MultiEc2.generate_config(account['provider_config'])) + env['EC2_INI_PATH'] = tmp_file_paths[-1] + name = account['name'] + provider = account['provider'] + processes[name] = self.get_provider_tags(provider, env) + + # for each process collect stdout when its available + for name, process in processes.items(): + out, err = process.communicate() + all_results.append({ + "name": name, + "out": out.strip(), + "err": err.strip(), + "code": process.returncode + }) + + finally: + # Clean up the mkstemp file + for tmp_file in tmp_file_paths: + os.unlink(tmp_file) + + return all_results + def get_inventory(self): """Create the subprocess to fetch tags from a provider. Host query: @@ -129,46 +199,61 @@ class MultiEc2(object): Query all of the different accounts for their tags. Once completed store all of their results into one merged updated hash. """ - processes = {} - for account in self.config['accounts']: - env = account['env_vars'] - name = account['name'] - provider = account['provider'] - processes[name] = self.get_provider_tags(provider, env) - - # for each process collect stdout when its available - all_results = [] - for name, process in processes.items(): - out, err = process.communicate() - all_results.append({ - "name": name, - "out": out.strip(), - "err": err.strip(), - "code": process.returncode - }) + provider_results = self.run_provider() # process --host results - if not self.args.host: + # For any 0 result, return it + if self.args.get('host', None): + count = 0 + for results in provider_results: + if results['code'] == 0 and results['err'] == '' and results['out'] != '{}': + self.result = json.loads(results['out']) + count += 1 + if count > 1: + raise RuntimeError("Found > 1 results for --host %s. \ + This is an invalid state." % self.args.get('host', None)) + # process --list results + else: # For any non-zero, raise an error on it - for result in all_results: + for result in provider_results: if result['code'] != 0: raise RuntimeError(result['err']) else: self.all_ec2_results[result['name']] = json.loads(result['out']) + + # Check if user wants extra vars in yaml by + # having hostvars and all_group defined + for acc_config in self.config['accounts']: + self.apply_account_config(acc_config) + + # Build results by merging all dictionaries values = self.all_ec2_results.values() values.insert(0, self.result) for result in values: MultiEc2.merge_destructively(self.result, result) - else: - # For any 0 result, return it - count = 0 - for results in all_results: - if results['code'] == 0 and results['err'] == '' and results['out'] != '{}': - self.result = json.loads(out) - count += 1 - if count > 1: - raise RuntimeError("Found > 1 results for --host %s. \ - This is an invalid state." % self.args.host) + + def apply_account_config(self, acc_config): + ''' Apply account config settings + ''' + if not acc_config.has_key('hostvars') and not acc_config.has_key('all_group'): + return + + results = self.all_ec2_results[acc_config['name']] + # Update each hostvar with the newly desired key: value + for host_property, value in acc_config['hostvars'].items(): + # Verify the account results look sane + # by checking for these keys ('_meta' and 'hostvars' exist) + if results.has_key('_meta') and results['_meta'].has_key('hostvars'): + for data in results['_meta']['hostvars'].values(): + data[str(host_property)] = str(value) + + # Add this group + results["%s_%s" % (host_property, value)] = \ + copy.copy(results[acc_config['all_group']]) + + # store the results back into all_ec2_results + self.all_ec2_results[acc_config['name']] = results + @staticmethod def merge_destructively(input_a, input_b): "merges b into input_a" @@ -182,7 +267,7 @@ class MultiEc2(object): elif isinstance(input_a[key], list) and isinstance(input_b[key], list): for result in input_b[key]: if result not in input_a[key]: - input_a[key].input_append(result) + input_a[key].append(result) # a is a list and not b elif isinstance(input_a[key], list): if input_b[key] not in input_a[key]: @@ -217,14 +302,27 @@ class MultiEc2(object): help='List instances (default: True)') parser.add_argument('--host', action='store', default=False, help='Get all the variables about a specific instance') - self.args = parser.parse_args() + self.args = parser.parse_args().__dict__ def write_to_cache(self): ''' Writes data in JSON format to a file ''' + # if it does not exist, try and create it. + if not os.path.isfile(self.cache_path): + path = os.path.dirname(self.cache_path) + try: + os.makedirs(path) + except OSError as exc: + if exc.errno != errno.EEXIST or not os.path.isdir(path): + raise + json_data = MultiEc2.json_format_dict(self.result, True) with open(self.cache_path, 'w') as cache: - cache.write(json_data) + try: + fcntl.flock(cache, fcntl.LOCK_EX) + cache.write(json_data) + finally: + fcntl.flock(cache, fcntl.LOCK_UN) def get_inventory_from_cache(self): ''' Reads the inventory from the cache file and returns it as a JSON @@ -254,4 +352,7 @@ class MultiEc2(object): if __name__ == "__main__": - print MultiEc2().result_str() + MEC2 = MultiEc2() + MEC2.parse_cli_args() + MEC2.run() + print MEC2.result_str() diff --git a/inventory/multi_ec2.yaml.example b/inventory/multi_ec2.yaml.example index 91e7c7970..99f157b11 100644 --- a/inventory/multi_ec2.yaml.example +++ b/inventory/multi_ec2.yaml.example @@ -1,15 +1,32 @@ # multi ec2 inventory configs +# +cache_location: ~/.ansible/tmp/multi_ec2_inventory.cache + accounts: - name: aws1 provider: aws/hosts/ec2.py + provider_config: + ec2: + regions: all + regions_exclude: us-gov-west-1,cn-north-1 + destination_variable: public_dns_name + route53: False + cache_path: ~/.ansible/tmp + cache_max_age: 300 + vpc_destination_variable: ip_address env_vars: AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + all_group: ec2 + hostvars: + cloud: aws + account: aws1 - - name: aws2 +- name: aws2 provider: aws/hosts/ec2.py env_vars: AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + EC2_INI_PATH: /etc/ansible/ec2.ini cache_max_age: 60 diff --git a/inventory/openshift-ansible-inventory.spec b/inventory/openshift-ansible-inventory.spec index 8267e16f6..cd2332549 100644 --- a/inventory/openshift-ansible-inventory.spec +++ b/inventory/openshift-ansible-inventory.spec @@ -1,6 +1,6 @@ Summary: OpenShift Ansible Inventories Name: openshift-ansible-inventory -Version: 0.0.2 +Version: 0.0.7 Release: 1%{?dist} License: ASL 2.0 URL: https://github.com/openshift/openshift-ansible @@ -25,18 +25,39 @@ mkdir -p %{buildroot}/usr/share/ansible/inventory/gce cp -p multi_ec2.py %{buildroot}/usr/share/ansible/inventory cp -p multi_ec2.yaml.example %{buildroot}/etc/ansible/multi_ec2.yaml -cp -p aws/ec2.py aws/ec2.ini %{buildroot}/usr/share/ansible/inventory/aws -cp -p gce/gce.py %{buildroot}/usr/share/ansible/inventory/gce +cp -p aws/hosts/ec2.py %{buildroot}/usr/share/ansible/inventory/aws +cp -p gce/hosts/gce.py %{buildroot}/usr/share/ansible/inventory/gce %files %config(noreplace) /etc/ansible/* %dir /usr/share/ansible/inventory /usr/share/ansible/inventory/multi_ec2.py* /usr/share/ansible/inventory/aws/ec2.py* -%config(noreplace) /usr/share/ansible/inventory/aws/ec2.ini /usr/share/ansible/inventory/gce/gce.py* %changelog +* Fri May 15 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.7-1 +- Making multi_ec2 into a library (kwoodson@redhat.com) + +* Wed May 13 2015 Thomas Wiest <twiest@redhat.com> 0.0.6-1 +- Added support for grouping and a bug fix. (kwoodson@redhat.com) + +* Tue May 12 2015 Thomas Wiest <twiest@redhat.com> 0.0.5-1 +- removed ec2.ini from the openshift-ansible-inventory.spec file so that we're + not dictating what the ec2.ini file should look like. (twiest@redhat.com) +- Added capability to pass in ec2.ini file. (kwoodson@redhat.com) + +* Thu May 07 2015 Thomas Wiest <twiest@redhat.com> 0.0.4-1 +- Fixed a bug due to renaming of variables. (kwoodson@redhat.com) + +* Thu May 07 2015 Thomas Wiest <twiest@redhat.com> 0.0.3-1 +- fixed build problems with openshift-ansible-inventory.spec + (twiest@redhat.com) +- Allow option in multi_ec2 to set cache location. (kwoodson@redhat.com) +- Add ansible_connection=local to localhost in inventory (jdetiber@redhat.com) +- Adding refresh-cache option and cleanup for pylint. Also updated for + aws/hosts/ being added. (kwoodson@redhat.com) + * Thu Mar 26 2015 Thomas Wiest <twiest@redhat.com> 0.0.2-1 - added the ability to have a config file in /etc/openshift_ansible to multi_ec2.py. (twiest@redhat.com) diff --git a/playbooks/aws/ansible-tower/launch.yml b/playbooks/aws/ansible-tower/launch.yml index 56235bc8a..c23bda3a0 100644 --- a/playbooks/aws/ansible-tower/launch.yml +++ b/playbooks/aws/ansible-tower/launch.yml @@ -6,7 +6,7 @@ vars: inst_region: us-east-1 - rhel7_ami: ami-906240f8 + rhel7_ami: ami-78756d10 user_data_file: user_data.txt vars_files: diff --git a/playbooks/aws/openshift-cluster/vars.online.int.yml b/playbooks/aws/openshift-cluster/vars.online.int.yml index 12f79a9c1..e115615d5 100644 --- a/playbooks/aws/openshift-cluster/vars.online.int.yml +++ b/playbooks/aws/openshift-cluster/vars.online.int.yml @@ -1,5 +1,5 @@ --- -ec2_image: ami-906240f8 +ec2_image: ami-78756d10 ec2_image_name: libra-ops-rhel7* ec2_region: us-east-1 ec2_keypair: mmcgrath_libra diff --git a/playbooks/aws/openshift-cluster/vars.online.prod.yml b/playbooks/aws/openshift-cluster/vars.online.prod.yml index 12f79a9c1..e115615d5 100644 --- a/playbooks/aws/openshift-cluster/vars.online.prod.yml +++ b/playbooks/aws/openshift-cluster/vars.online.prod.yml @@ -1,5 +1,5 @@ --- -ec2_image: ami-906240f8 +ec2_image: ami-78756d10 ec2_image_name: libra-ops-rhel7* ec2_region: us-east-1 ec2_keypair: mmcgrath_libra diff --git a/playbooks/aws/openshift-cluster/vars.online.stage.yml b/playbooks/aws/openshift-cluster/vars.online.stage.yml index 12f79a9c1..e115615d5 100644 --- a/playbooks/aws/openshift-cluster/vars.online.stage.yml +++ b/playbooks/aws/openshift-cluster/vars.online.stage.yml @@ -1,5 +1,5 @@ --- -ec2_image: ami-906240f8 +ec2_image: ami-78756d10 ec2_image_name: libra-ops-rhel7* ec2_region: us-east-1 ec2_keypair: mmcgrath_libra diff --git a/playbooks/byo/config.yml b/playbooks/byo/config.yml index dce49d32f..e059514db 100644 --- a/playbooks/byo/config.yml +++ b/playbooks/byo/config.yml @@ -1,6 +1,8 @@ --- - name: Run the openshift-master config playbook include: openshift-master/config.yml + when: groups.masters is defined and groups.masters - name: Run the openshift-node config playbook include: openshift-node/config.yml + when: groups.nodes is defined and groups.nodes and groups.masters is defined and groups.masters diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 433cfeb87..96641a274 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -15,6 +15,7 @@ local_facts: hostname: "{{ openshift_hostname | default(None) }}" public_hostname: "{{ openshift_public_hostname | default(None) }}" + deployment_type: "{{ openshift_deployment_type }}" - role: node local_facts: external_id: "{{ openshift_node_external_id | default(None) }}" @@ -23,7 +24,6 @@ pod_cidr: "{{ openshift_node_pod_cidr | default(None) }}" labels: "{{ openshift_node_labels | default(None) }}" annotations: "{{ openshift_node_annotations | default(None) }}" - deployment_type: "{{ openshift_deployment_type }}" - name: Create temp directory for syncing certs @@ -68,7 +68,6 @@ fetch: src: "{{ sync_tmpdir }}/{{ item.openshift.common.hostname }}.tgz" dest: "{{ sync_tmpdir }}/" - flat: yes fail_on_missing: yes validate_checksum: yes with_items: openshift_nodes @@ -79,7 +78,7 @@ hosts: oo_nodes_to_config gather_facts: no vars: - sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}" + sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}/{{ groups['oo_first_master'][0] }}/{{ hostvars.localhost.mktemp.stdout }}" openshift_sdn_master_url: "https://{{ hostvars[groups['oo_first_master'][0]].openshift.common.hostname }}:4001" pre_tasks: - name: Ensure certificate directory exists diff --git a/rel-eng/packages/openshift-ansible-bin b/rel-eng/packages/openshift-ansible-bin index 8a9624397..de9bb5157 100644 --- a/rel-eng/packages/openshift-ansible-bin +++ b/rel-eng/packages/openshift-ansible-bin @@ -1 +1 @@ -0.0.12-1 bin/ +0.0.17-1 bin/ diff --git a/rel-eng/packages/openshift-ansible-inventory b/rel-eng/packages/openshift-ansible-inventory index cf3ac87ed..df529d9fd 100644 --- a/rel-eng/packages/openshift-ansible-inventory +++ b/rel-eng/packages/openshift-ansible-inventory @@ -1 +1 @@ -0.0.2-1 inventory/ +0.0.7-1 inventory/ diff --git a/roles/ansible/tasks/config.yml b/roles/ansible/tasks/config.yml new file mode 100644 index 000000000..5e361429b --- /dev/null +++ b/roles/ansible/tasks/config.yml @@ -0,0 +1,8 @@ +--- +- name: modify ansible.cfg + lineinfile: + dest: /etc/ansible/ansible.cfg + backrefs: yes + regexp: "^#?({{ item.option }})( *)=" + line: '\1\2= {{ item.value }}' + with_items: cfg_options diff --git a/roles/ansible/tasks/main.yaml b/roles/ansible/tasks/main.yml index 67a04b919..5d20a3b35 100644 --- a/roles/ansible/tasks/main.yaml +++ b/roles/ansible/tasks/main.yml @@ -5,3 +5,7 @@ yum: pkg: ansible state: installed + +- include: config.yml + vars: + cfg_options: "{{ ans_config }}" diff --git a/roles/openshift_ansible_inventory/tasks/main.yml b/roles/openshift_ansible_inventory/tasks/main.yml index dddfe24e3..5fe77e38b 100644 --- a/roles/openshift_ansible_inventory/tasks/main.yml +++ b/roles/openshift_ansible_inventory/tasks/main.yml @@ -24,22 +24,20 @@ owner: root group: libra_ops -- lineinfile: - dest: /etc/ansible/ansible.cfg - backrefs: yes - regexp: '^(hostfile|inventory)( *)=' - line: '\1\2= /etc/ansible/inventory' +# This cron uses the above location to call its job +- name: Cron to keep cache fresh + cron: + name: 'multi_ec2_inventory' + minute: '*/10' + job: '/usr/share/ansible/inventory/multi_ec2.py --refresh-cache &> /dev/null' + when: oo_cron_refresh_cache is defined and oo_cron_refresh_cache -- name: setting ec2.ini destination_format - lineinfile: - dest: /usr/share/ansible/inventory/aws/ec2.ini - regexp: '^destination_format *=' - line: "destination_format = {{ oo_ec2_destination_format }}" - when: oo_ec2_destination_format is defined - -- name: setting ec2.ini destination_format_tags - lineinfile: - dest: /usr/share/ansible/inventory/aws/ec2.ini - regexp: '^destination_format_tags *=' - line: "destination_format_tags = {{ oo_ec2_destination_format_tags }}" - when: oo_ec2_destination_format_tags is defined +- name: Set cache location + file: + state: directory + dest: "{{ oo_inventory_cache_location | dirname }}" + owner: root + group: libra_ops + recurse: yes + mode: '2750' + when: oo_inventory_cache_location is defined diff --git a/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2 b/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2 index 23dfe73b8..8228ab915 100644 --- a/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2 +++ b/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2 @@ -1,11 +1,26 @@ # multi ec2 inventory configs cache_max_age: {{ oo_inventory_cache_max_age }} +cache_location: {{ oo_inventory_cache_location | default('~/.ansible/tmp/multi_ec2_inventory.cache') }} accounts: {% for account in oo_inventory_accounts %} - name: {{ account.name }} provider: {{ account.provider }} + provider_config: +{% for section, items in account.provider_config.items() %} + {{ section }}: +{% for property, value in items.items() %} + {{ property }}: {{ value }} +{% endfor %} +{% endfor %} env_vars: AWS_ACCESS_KEY_ID: {{ account.env_vars.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: {{ account.env_vars.AWS_SECRET_ACCESS_KEY }} +{% if account.all_group is defined and account.hostvars is defined%} + all_group: {{ account.all_group }} + hostvars: +{% for property, value in account.hostvars.items() %} + {{ property }}: {{ value }} +{% endfor %} +{% endif %} {% endfor %} diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 1e0d5c605..9c2657ff2 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -1,6 +1,11 @@ #!/usr/bin/python # -*- coding: utf-8 -*- # vim: expandtab:tabstop=4:shiftwidth=4 +# disable pylint checks +# temporarily disabled until items can be addressed: +# fixme - until all TODO comments have been addressed +# pylint:disable=fixme +"""Ansible module for retrieving and setting openshift related facts""" DOCUMENTATION = ''' --- @@ -15,294 +20,645 @@ EXAMPLES = ''' import ConfigParser import copy -class OpenShiftFactsUnsupportedRoleError(Exception): - pass -class OpenShiftFactsFileWriteError(Exception): - pass +def hostname_valid(hostname): + """ Test if specified hostname should be considered valid -class OpenShiftFactsMetadataUnavailableError(Exception): - pass + Args: + hostname (str): hostname to test + Returns: + bool: True if valid, otherwise False + """ + if (not hostname or + hostname.startswith('localhost') or + hostname.endswith('localdomain') or + len(hostname.split('.')) < 2): + return False -class OpenShiftFacts(): - known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'dns'] + return True - def __init__(self, role, filename, local_facts): - self.changed = False - self.filename = filename - if role not in self.known_roles: - raise OpenShiftFactsUnsupportedRoleError("Role %s is not supported by this module" % role) - self.role = role - self.facts = self.generate_facts(local_facts) - def generate_facts(self, local_facts): - local_facts = self.init_local_facts(local_facts) - roles = local_facts.keys() +def choose_hostname(hostnames=None, fallback=''): + """ Choose a hostname from the provided hostnames - defaults = self.get_defaults(roles) - provider_facts = self.init_provider_facts() - facts = self.apply_provider_facts(defaults, provider_facts, roles) + Given a list of hostnames and a fallback value, choose a hostname to + use. This function will prefer fqdns if they exist (excluding any that + begin with localhost or end with localdomain) over ip addresses. - facts = self.merge_facts(facts, local_facts) - facts['current_config'] = self.current_config(facts) - self.set_url_facts_if_unset(facts) - return dict(openshift=facts) + Args: + hostnames (list): list of hostnames + fallback (str): default value to set if hostnames does not contain + a valid hostname + Returns: + str: chosen hostname + """ + hostname = fallback + if hostnames is None: + return hostname + + ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z' + ips = [i for i in hostnames + if (i is not None and isinstance(i, basestring) + and re.match(ip_regex, i))] + hosts = [i for i in hostnames + if i is not None and i != '' and i not in ips] + + for host_list in (hosts, ips): + for host in host_list: + if hostname_valid(host): + return host + + return hostname + + +def query_metadata(metadata_url, headers=None, expect_json=False): + """ Return metadata from the provided metadata_url + + Args: + metadata_url (str): metadata url + headers (dict): headers to set for metadata request + expect_json (bool): does the metadata_url return json + Returns: + dict or list: metadata request result + """ + result, info = fetch_url(module, metadata_url, headers=headers) + if info['status'] != 200: + raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable") + if expect_json: + return module.from_json(result.read()) + else: + return [line.strip() for line in result.readlines()] + + +def walk_metadata(metadata_url, headers=None, expect_json=False): + """ Walk the metadata tree and return a dictionary of the entire tree + + Args: + metadata_url (str): metadata url + headers (dict): headers to set for metadata request + expect_json (bool): does the metadata_url return json + Returns: + dict: the result of walking the metadata tree + """ + metadata = dict() + + for line in query_metadata(metadata_url, headers, expect_json): + if line.endswith('/') and not line == 'public-keys/': + key = line[:-1] + metadata[key] = walk_metadata(metadata_url + line, + headers, expect_json) + else: + results = query_metadata(metadata_url + line, headers, + expect_json) + if len(results) == 1: + # disable pylint maybe-no-member because overloaded use of + # the module name causes pylint to not detect that results + # is an array or hash + # pylint: disable=maybe-no-member + metadata[line] = results.pop() + else: + metadata[line] = results + return metadata - def set_url_facts_if_unset(self, facts): - if 'master' in facts: - for (url_var, use_ssl, port, default) in [ - ('api_url', - facts['master']['api_use_ssl'], - facts['master']['api_port'], - facts['common']['hostname']), - ('public_api_url', - facts['master']['api_use_ssl'], - facts['master']['api_port'], - facts['common']['public_hostname']), - ('console_url', - facts['master']['console_use_ssl'], - facts['master']['console_port'], - facts['common']['hostname']), - ('public_console_url' 'console_use_ssl', - facts['master']['console_use_ssl'], - facts['master']['console_port'], - facts['common']['public_hostname'])]: - if url_var not in facts['master']: - scheme = 'https' if use_ssl else 'http' - netloc = default - if (scheme == 'https' and port != '443') or (scheme == 'http' and port != '80'): - netloc = "%s:%s" % (netloc, port) - facts['master'][url_var] = urlparse.urlunparse((scheme, netloc, '', '', '', '')) - - - # Query current OpenShift config and return a dictionary containing - # settings that may be valuable for determining actions that need to be - # taken in the playbooks/roles - def current_config(self, facts): - current_config=dict() - roles = [ role for role in facts if role not in ['common','provider'] ] - for role in roles: - if 'roles' in current_config: - current_config['roles'].append(role) +def get_provider_metadata(metadata_url, supports_recursive=False, + headers=None, expect_json=False): + """ Retrieve the provider metadata + + Args: + metadata_url (str): metadata url + supports_recursive (bool): does the provider metadata api support + recursion + headers (dict): headers to set for metadata request + expect_json (bool): does the metadata_url return json + Returns: + dict: the provider metadata + """ + try: + if supports_recursive: + metadata = query_metadata(metadata_url, headers, + expect_json) + else: + metadata = walk_metadata(metadata_url, headers, + expect_json) + except OpenShiftFactsMetadataUnavailableError: + metadata = None + return metadata + + +def normalize_gce_facts(metadata, facts): + """ Normalize gce facts + + Args: + metadata (dict): provider metadata + facts (dict): facts to update + Returns: + dict: the result of adding the normalized metadata to the provided + facts dict + """ + for interface in metadata['instance']['networkInterfaces']: + int_info = dict(ips=[interface['ip']], network_type='gce') + int_info['public_ips'] = [ac['externalIp'] for ac + in interface['accessConfigs']] + int_info['public_ips'].extend(interface['forwardedIps']) + _, _, network_id = interface['network'].rpartition('/') + int_info['network_id'] = network_id + facts['network']['interfaces'].append(int_info) + _, _, zone = metadata['instance']['zone'].rpartition('/') + facts['zone'] = zone + facts['external_id'] = metadata['instance']['id'] + + # Default to no sdn for GCE deployments + facts['use_openshift_sdn'] = False + + # GCE currently only supports a single interface + facts['network']['ip'] = facts['network']['interfaces'][0]['ips'][0] + pub_ip = facts['network']['interfaces'][0]['public_ips'][0] + facts['network']['public_ip'] = pub_ip + facts['network']['hostname'] = metadata['instance']['hostname'] + + # TODO: attempt to resolve public_hostname + facts['network']['public_hostname'] = facts['network']['public_ip'] + + return facts + + +def normalize_aws_facts(metadata, facts): + """ Normalize aws facts + + Args: + metadata (dict): provider metadata + facts (dict): facts to update + Returns: + dict: the result of adding the normalized metadata to the provided + facts dict + """ + for interface in sorted( + metadata['network']['interfaces']['macs'].values(), + key=lambda x: x['device-number'] + ): + int_info = dict() + var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'} + for ips_var, int_var in var_map.iteritems(): + ips = interface.get(int_var) + if isinstance(ips, basestring): + int_info[ips_var] = [ips] else: - current_config['roles'] = [role] + int_info[ips_var] = ips + if 'vpc-id' in interface: + int_info['network_type'] = 'vpc' + else: + int_info['network_type'] = 'classic' + if int_info['network_type'] == 'vpc': + int_info['network_id'] = interface['subnet-id'] + else: + int_info['network_id'] = None + facts['network']['interfaces'].append(int_info) + facts['zone'] = metadata['placement']['availability-zone'] + facts['external_id'] = metadata['instance-id'] + + # TODO: actually attempt to determine default local and public ips + # by using the ansible default ip fact and the ipv4-associations + # from the ec2 metadata + facts['network']['ip'] = metadata.get('local-ipv4') + facts['network']['public_ip'] = metadata.get('public-ipv4') + + # TODO: verify that local hostname makes sense and is resolvable + facts['network']['hostname'] = metadata.get('local-hostname') + + # TODO: verify that public hostname makes sense and is resolvable + facts['network']['public_hostname'] = metadata.get('public-hostname') + + return facts + + +def normalize_openstack_facts(metadata, facts): + """ Normalize openstack facts + + Args: + metadata (dict): provider metadata + facts (dict): facts to update + Returns: + dict: the result of adding the normalized metadata to the provided + facts dict + """ + # openstack ec2 compat api does not support network interfaces and + # the version tested on did not include the info in the openstack + # metadata api, should be updated if neutron exposes this. + + facts['zone'] = metadata['availability_zone'] + facts['external_id'] = metadata['uuid'] + facts['network']['ip'] = metadata['ec2_compat']['local-ipv4'] + facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4'] + + # TODO: verify local hostname makes sense and is resolvable + facts['network']['hostname'] = metadata['hostname'] + + # TODO: verify that public hostname makes sense and is resolvable + pub_h = metadata['ec2_compat']['public-hostname'] + facts['network']['public_hostname'] = pub_h + + return facts + + +def normalize_provider_facts(provider, metadata): + """ Normalize provider facts + + Args: + provider (str): host provider + metadata (dict): provider metadata + Returns: + dict: the normalized provider facts + """ + if provider is None or metadata is None: + return {} + + # TODO: test for ipv6_enabled where possible (gce, aws do not support) + # and configure ipv6 facts if available + + # TODO: add support for setting user_data if available + + facts = dict(name=provider, metadata=metadata, + network=dict(interfaces=[], ipv6_enabled=False)) + if provider == 'gce': + facts = normalize_gce_facts(metadata, facts) + elif provider == 'ec2': + facts = normalize_aws_facts(metadata, facts) + elif provider == 'openstack': + facts = normalize_openstack_facts(metadata, facts) + return facts + + +def set_url_facts_if_unset(facts): + """ Set url facts if not already present in facts dict + + Args: + facts (dict): existing facts + Returns: + dict: the facts dict updated with the generated url facts if they + were not already present + """ + if 'master' in facts: + for (url_var, use_ssl, port, default) in [ + ('api_url', + facts['master']['api_use_ssl'], + facts['master']['api_port'], + facts['common']['hostname']), + ('public_api_url', + facts['master']['api_use_ssl'], + facts['master']['api_port'], + facts['common']['public_hostname']), + ('console_url', + facts['master']['console_use_ssl'], + facts['master']['console_port'], + facts['common']['hostname']), + ('public_console_url' 'console_use_ssl', + facts['master']['console_use_ssl'], + facts['master']['console_port'], + facts['common']['public_hostname'])]: + if url_var not in facts['master']: + scheme = 'https' if use_ssl else 'http' + netloc = default + if ((scheme == 'https' and port != '443') + or (scheme == 'http' and port != '80')): + netloc = "%s:%s" % (netloc, port) + facts['master'][url_var] = urlparse.urlunparse( + (scheme, netloc, '', '', '', '') + ) + return facts + + +def get_current_config(facts): + """ Get current openshift config + + Args: + facts (dict): existing facts + Returns: + dict: the facts dict updated with the current openshift config + """ + current_config = dict() + roles = [role for role in facts if role not in ['common', 'provider']] + for role in roles: + if 'roles' in current_config: + current_config['roles'].append(role) + else: + current_config['roles'] = [role] - # TODO: parse the /etc/sysconfig/openshift-{master,node} config to - # determine the location of files. + # TODO: parse the /etc/sysconfig/openshift-{master,node} config to + # determine the location of files. - # Query kubeconfig settings - kubeconfig_dir = '/var/lib/openshift/openshift.local.certificates' - if role == 'node': - kubeconfig_dir = os.path.join(kubeconfig_dir, "node-%s" % facts['common']['hostname']) + # Query kubeconfig settings + kubeconfig_dir = '/var/lib/openshift/openshift.local.certificates' + if role == 'node': + kubeconfig_dir = os.path.join( + kubeconfig_dir, "node-%s" % facts['common']['hostname'] + ) - kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig') - if os.path.isfile('/usr/bin/openshift') and os.path.isfile(kubeconfig_path): + kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig') + if (os.path.isfile('/usr/bin/openshift') + and os.path.isfile(kubeconfig_path)): + try: + _, output, _ = module.run_command( + ["/usr/bin/openshift", "ex", "config", "view", "-o", + "json", "--kubeconfig=%s" % kubeconfig_path], + check_rc=False + ) + config = json.loads(output) + + cad = 'certificate-authority-data' + try: + for cluster in config['clusters']: + config['clusters'][cluster][cad] = 'masked' + except KeyError: + pass try: - _, output, error = module.run_command(["/usr/bin/openshift", "ex", - "config", "view", "-o", - "json", - "--kubeconfig=%s" % kubeconfig_path], - check_rc=False) - config = json.loads(output) - - try: - for cluster in config['clusters']: - config['clusters'][cluster]['certificate-authority-data'] = 'masked' - except KeyError: - pass - try: - for user in config['users']: - config['users'][user]['client-certificate-data'] = 'masked' - config['users'][user]['client-key-data'] = 'masked' - except KeyError: - pass - - current_config['kubeconfig'] = config - except Exception: + for user in config['users']: + config['users'][user][cad] = 'masked' + config['users'][user]['client-key-data'] = 'masked' + except KeyError: pass - return current_config + current_config['kubeconfig'] = config + # override pylint broad-except warning, since we do not want + # to bubble up any exceptions if openshift ex config view + # fails + # pylint: disable=broad-except + except Exception: + pass - def apply_provider_facts(self, facts, provider_facts, roles): - if not provider_facts: - return facts + return current_config - use_openshift_sdn = provider_facts.get('use_openshift_sdn') - if isinstance(use_openshift_sdn, bool): - facts['common']['use_openshift_sdn'] = use_openshift_sdn - common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')] - for h_var, ip_var in common_vars: - ip_value = provider_facts['network'].get(ip_var) - if ip_value: - facts['common'][ip_var] = ip_value +def apply_provider_facts(facts, provider_facts, roles): + """ Apply provider facts to supplied facts dict - facts['common'][h_var] = self.choose_hostname([provider_facts['network'].get(h_var)], facts['common'][ip_var]) + Args: + facts (dict): facts dict to update + provider_facts (dict): provider facts to apply + roles: host roles + Returns: + dict: the merged facts + """ + if not provider_facts: + return facts - if 'node' in roles: - ext_id = provider_facts.get('external_id') - if ext_id: - facts['node']['external_id'] = ext_id + use_openshift_sdn = provider_facts.get('use_openshift_sdn') + if isinstance(use_openshift_sdn, bool): + facts['common']['use_openshift_sdn'] = use_openshift_sdn - facts['provider'] = provider_facts - return facts + common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')] + for h_var, ip_var in common_vars: + ip_value = provider_facts['network'].get(ip_var) + if ip_value: + facts['common'][ip_var] = ip_value - def hostname_valid(self, hostname): - if (not hostname or - hostname.startswith('localhost') or - hostname.endswith('localdomain') or - len(hostname.split('.')) < 2): - return False + facts['common'][h_var] = choose_hostname( + [provider_facts['network'].get(h_var)], + facts['common'][ip_var] + ) - return True + if 'node' in roles: + ext_id = provider_facts.get('external_id') + if ext_id: + facts['node']['external_id'] = ext_id + + facts['provider'] = provider_facts + return facts + + +def merge_facts(orig, new): + """ Recursively merge facts dicts + + Args: + orig (dict): existing facts + new (dict): facts to update + Returns: + dict: the merged facts + """ + facts = dict() + for key, value in orig.iteritems(): + if key in new: + if isinstance(value, dict): + facts[key] = merge_facts(value, new[key]) + else: + facts[key] = copy.copy(new[key]) + else: + facts[key] = copy.deepcopy(value) + new_keys = set(new.keys()) - set(orig.keys()) + for key in new_keys: + facts[key] = copy.deepcopy(new[key]) + return facts + + +def save_local_facts(filename, facts): + """ Save local facts + + Args: + filename (str): local facts file + facts (dict): facts to set + """ + try: + fact_dir = os.path.dirname(filename) + if not os.path.exists(fact_dir): + os.makedirs(fact_dir) + with open(filename, 'w') as fact_file: + fact_file.write(module.jsonify(facts)) + except (IOError, OSError) as ex: + raise OpenShiftFactsFileWriteError( + "Could not create fact file: %s, error: %s" % (filename, ex) + ) - def choose_hostname(self, hostnames=[], fallback=''): - hostname = fallback - ips = [ i for i in hostnames if i is not None and re.match(r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z', i) ] - hosts = [ i for i in hostnames if i is not None and i not in set(ips) ] +def get_local_facts_from_file(filename): + """ Retrieve local facts from fact file + + Args: + filename (str): local facts file + Returns: + dict: the retrieved facts + """ + local_facts = dict() + try: + # Handle conversion of INI style facts file to json style + ini_facts = ConfigParser.SafeConfigParser() + ini_facts.read(filename) + for section in ini_facts.sections(): + local_facts[section] = dict() + for key, value in ini_facts.items(section): + local_facts[section][key] = value + + except (ConfigParser.MissingSectionHeaderError, + ConfigParser.ParsingError): + try: + with open(filename, 'r') as facts_file: + local_facts = json.load(facts_file) + except (ValueError, IOError): + pass - for host_list in (hosts, ips): - for h in host_list: - if self.hostname_valid(h): - return h + return local_facts - return hostname + +class OpenShiftFactsUnsupportedRoleError(Exception): + """OpenShift Facts Unsupported Role Error""" + pass + + +class OpenShiftFactsFileWriteError(Exception): + """OpenShift Facts File Write Error""" + pass + + +class OpenShiftFactsMetadataUnavailableError(Exception): + """OpenShift Facts Metadata Unavailable Error""" + pass + + +class OpenShiftFacts(object): + """ OpenShift Facts + + Attributes: + facts (dict): OpenShift facts for the host + + Args: + role (str): role for setting local facts + filename (str): local facts file to use + local_facts (dict): local facts to set + + Raises: + OpenShiftFactsUnsupportedRoleError: + """ + known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'dns'] + + def __init__(self, role, filename, local_facts): + self.changed = False + self.filename = filename + if role not in self.known_roles: + raise OpenShiftFactsUnsupportedRoleError( + "Role %s is not supported by this module" % role + ) + self.role = role + self.system_facts = ansible_facts(module) + self.facts = self.generate_facts(local_facts) + + def generate_facts(self, local_facts): + """ Generate facts + + Args: + local_facts (dict): local_facts for overriding generated + defaults + + Returns: + dict: The generated facts + """ + local_facts = self.init_local_facts(local_facts) + roles = local_facts.keys() + + defaults = self.get_defaults(roles) + provider_facts = self.init_provider_facts() + facts = apply_provider_facts(defaults, provider_facts, roles) + facts = merge_facts(facts, local_facts) + facts['current_config'] = get_current_config(facts) + facts = set_url_facts_if_unset(facts) + return dict(openshift=facts) def get_defaults(self, roles): - ansible_facts = self.get_ansible_facts() + """ Get default fact values + Args: + roles (list): list of roles for this host + + Returns: + dict: The generated default facts + """ defaults = dict() common = dict(use_openshift_sdn=True) - ip = ansible_facts['default_ipv4']['address'] - common['ip'] = ip - common['public_ip'] = ip + ip_addr = self.system_facts['default_ipv4']['address'] + common['ip'] = ip_addr + common['public_ip'] = ip_addr - rc, output, error = module.run_command(['hostname', '-f']) - hostname_f = output.strip() if rc == 0 else '' - hostname_values = [hostname_f, ansible_facts['nodename'], ansible_facts['fqdn']] - hostname = self.choose_hostname(hostname_values) + exit_code, output, _ = module.run_command(['hostname', '-f']) + hostname_f = output.strip() if exit_code == 0 else '' + hostname_values = [hostname_f, self.system_facts['nodename'], + self.system_facts['fqdn']] + hostname = choose_hostname(hostname_values) common['hostname'] = hostname common['public_hostname'] = hostname defaults['common'] = common if 'master' in roles: - # TODO: provide for a better way to override just the port, or just - # the urls, instead of forcing both, also to override the hostname - # without having to re-generate these urls later master = dict(api_use_ssl=True, api_port='8443', - console_use_ssl=True, console_path='/console', - console_port='8443', etcd_use_ssl=False, - etcd_port='4001', portal_net='172.30.17.0/24') + console_use_ssl=True, console_path='/console', + console_port='8443', etcd_use_ssl=False, + etcd_port='4001', portal_net='172.30.17.0/24') defaults['master'] = master if 'node' in roles: node = dict(external_id=common['hostname'], pod_cidr='', labels={}, annotations={}) - node['resources_cpu'] = ansible_facts['processor_cores'] - node['resources_memory'] = int(int(ansible_facts['memtotal_mb']) * 1024 * 1024 * 0.75) + node['resources_cpu'] = self.system_facts['processor_cores'] + node['resources_memory'] = int( + int(self.system_facts['memtotal_mb']) * 1024 * 1024 * 0.75 + ) defaults['node'] = node return defaults - def merge_facts(self, orig, new): - facts = dict() - for key, value in orig.iteritems(): - if key in new: - if isinstance(value, dict): - facts[key] = self.merge_facts(value, new[key]) - else: - facts[key] = copy.copy(new[key]) - else: - facts[key] = copy.deepcopy(value) - new_keys = set(new.keys()) - set(orig.keys()) - for key in new_keys: - facts[key] = copy.deepcopy(new[key]) - return facts - - def query_metadata(self, metadata_url, headers=None, expect_json=False): - r, info = fetch_url(module, metadata_url, headers=headers) - if info['status'] != 200: - raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable") - if expect_json: - return module.from_json(r.read()) - else: - return [line.strip() for line in r.readlines()] - - def walk_metadata(self, metadata_url, headers=None, expect_json=False): - metadata = dict() - - for line in self.query_metadata(metadata_url, headers, expect_json): - if line.endswith('/') and not line == 'public-keys/': - key = line[:-1] - metadata[key]=self.walk_metadata(metadata_url + line, headers, - expect_json) - else: - results = self.query_metadata(metadata_url + line, headers, - expect_json) - if len(results) == 1: - metadata[line] = results.pop() - else: - metadata[line] = results - return metadata - - def get_provider_metadata(self, metadata_url, supports_recursive=False, - headers=None, expect_json=False): - try: - if supports_recursive: - metadata = self.query_metadata(metadata_url, headers, expect_json) - else: - metadata = self.walk_metadata(metadata_url, headers, expect_json) - except OpenShiftFactsMetadataUnavailableError as e: - metadata = None - return metadata - - def get_ansible_facts(self): - if not hasattr(self, 'ansible_facts'): - self.ansible_facts = ansible_facts(module) - return self.ansible_facts - def guess_host_provider(self): + """ Guess the host provider + + Returns: + dict: The generated default facts for the detected provider + """ # TODO: cloud provider facts should probably be submitted upstream - ansible_facts = self.get_ansible_facts() - product_name = ansible_facts['product_name'] - product_version = ansible_facts['product_version'] - virt_type = ansible_facts['virtualization_type'] - virt_role = ansible_facts['virtualization_role'] + product_name = self.system_facts['product_name'] + product_version = self.system_facts['product_version'] + virt_type = self.system_facts['virtualization_type'] + virt_role = self.system_facts['virtualization_role'] provider = None metadata = None # TODO: this is not exposed through module_utils/facts.py in ansible, # need to create PR for ansible to expose it - bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor') + bios_vendor = get_file_content( + '/sys/devices/virtual/dmi/id/bios_vendor' + ) if bios_vendor == 'Google': provider = 'gce' - metadata_url = 'http://metadata.google.internal/computeMetadata/v1/?recursive=true' + metadata_url = ('http://metadata.google.internal/' + 'computeMetadata/v1/?recursive=true') headers = {'Metadata-Flavor': 'Google'} - metadata = self.get_provider_metadata(metadata_url, True, headers, - True) + metadata = get_provider_metadata(metadata_url, True, headers, + True) # Filter sshKeys and serviceAccounts from gce metadata if metadata: metadata['project']['attributes'].pop('sshKeys', None) metadata['instance'].pop('serviceAccounts', None) - elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version): + elif (virt_type == 'xen' and virt_role == 'guest' + and re.match(r'.*\.amazon$', product_version)): provider = 'ec2' metadata_url = 'http://169.254.169.254/latest/meta-data/' - metadata = self.get_provider_metadata(metadata_url) + metadata = get_provider_metadata(metadata_url) elif re.search(r'OpenStack', product_name): provider = 'openstack' - metadata_url = 'http://169.254.169.254/openstack/latest/meta_data.json' - metadata = self.get_provider_metadata(metadata_url, True, None, True) + metadata_url = ('http://169.254.169.254/openstack/latest/' + 'meta_data.json') + metadata = get_provider_metadata(metadata_url, True, None, + True) if metadata: ec2_compat_url = 'http://169.254.169.254/latest/meta-data/' - metadata['ec2_compat'] = self.get_provider_metadata(ec2_compat_url) - + metadata['ec2_compat'] = get_provider_metadata( + ec2_compat_url + ) + + # disable pylint maybe-no-member because overloaded use of + # the module name causes pylint to not detect that results + # is an array or hash + # pylint: disable=maybe-no-member # Filter public_keys and random_seed from openstack metadata metadata.pop('public_keys', None) metadata.pop('random_seed', None) @@ -312,146 +668,74 @@ class OpenShiftFacts(): return dict(name=provider, metadata=metadata) - def normalize_provider_facts(self, provider, metadata): - if provider is None or metadata is None: - return {} - - # TODO: test for ipv6_enabled where possible (gce, aws do not support) - # and configure ipv6 facts if available - - # TODO: add support for setting user_data if available - - facts = dict(name=provider, metadata=metadata) - network = dict(interfaces=[], ipv6_enabled=False) - if provider == 'gce': - for interface in metadata['instance']['networkInterfaces']: - int_info = dict(ips=[interface['ip']], network_type=provider) - int_info['public_ips'] = [ ac['externalIp'] for ac in interface['accessConfigs'] ] - int_info['public_ips'].extend(interface['forwardedIps']) - _, _, network_id = interface['network'].rpartition('/') - int_info['network_id'] = network_id - network['interfaces'].append(int_info) - _, _, zone = metadata['instance']['zone'].rpartition('/') - facts['zone'] = zone - facts['external_id'] = metadata['instance']['id'] - - # Default to no sdn for GCE deployments - facts['use_openshift_sdn'] = False - - # GCE currently only supports a single interface - network['ip'] = network['interfaces'][0]['ips'][0] - network['public_ip'] = network['interfaces'][0]['public_ips'][0] - network['hostname'] = metadata['instance']['hostname'] - - # TODO: attempt to resolve public_hostname - network['public_hostname'] = network['public_ip'] - elif provider == 'ec2': - for interface in sorted(metadata['network']['interfaces']['macs'].values(), - key=lambda x: x['device-number']): - int_info = dict() - var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'} - for ips_var, int_var in var_map.iteritems(): - ips = interface[int_var] - int_info[ips_var] = [ips] if isinstance(ips, basestring) else ips - int_info['network_type'] = 'vpc' if 'vpc-id' in interface else 'classic' - int_info['network_id'] = interface['subnet-id'] if int_info['network_type'] == 'vpc' else None - network['interfaces'].append(int_info) - facts['zone'] = metadata['placement']['availability-zone'] - facts['external_id'] = metadata['instance-id'] - - # TODO: actually attempt to determine default local and public ips - # by using the ansible default ip fact and the ipv4-associations - # form the ec2 metadata - network['ip'] = metadata['local-ipv4'] - network['public_ip'] = metadata['public-ipv4'] - - # TODO: verify that local hostname makes sense and is resolvable - network['hostname'] = metadata['local-hostname'] - - # TODO: verify that public hostname makes sense and is resolvable - network['public_hostname'] = metadata['public-hostname'] - elif provider == 'openstack': - # openstack ec2 compat api does not support network interfaces and - # the version tested on did not include the info in the openstack - # metadata api, should be updated if neutron exposes this. - - facts['zone'] = metadata['availability_zone'] - facts['external_id'] = metadata['uuid'] - network['ip'] = metadata['ec2_compat']['local-ipv4'] - network['public_ip'] = metadata['ec2_compat']['public-ipv4'] - - # TODO: verify local hostname makes sense and is resolvable - network['hostname'] = metadata['hostname'] - - # TODO: verify that public hostname makes sense and is resolvable - network['public_hostname'] = metadata['ec2_compat']['public-hostname'] - - facts['network'] = network - return facts - def init_provider_facts(self): + """ Initialize the provider facts + + Returns: + dict: The normalized provider facts + """ provider_info = self.guess_host_provider() - provider_facts = self.normalize_provider_facts( - provider_info.get('name'), - provider_info.get('metadata') + provider_facts = normalize_provider_facts( + provider_info.get('name'), + provider_info.get('metadata') ) return provider_facts - def get_facts(self): - # TODO: transform facts into cleaner format (openshift_<blah> instead - # of openshift.<blah> - return self.facts - - def init_local_facts(self, facts={}): - changed = False + def init_local_facts(self, facts=None): + """ Initialize the provider facts - local_facts = ConfigParser.SafeConfigParser() - local_facts.read(self.filename) + Args: + facts (dict): local facts to set - section = self.role - if not local_facts.has_section(section): - local_facts.add_section(section) + Returns: + dict: The result of merging the provided facts with existing + local facts + """ + changed = False + facts_to_set = {self.role: dict()} + if facts is not None: + facts_to_set[self.role] = facts + + local_facts = get_local_facts_from_file(self.filename) + + for arg in ['labels', 'annotations']: + if arg in facts_to_set and isinstance(facts_to_set[arg], + basestring): + facts_to_set[arg] = module.from_json(facts_to_set[arg]) + + new_local_facts = merge_facts(local_facts, facts_to_set) + for facts in new_local_facts.values(): + keys_to_delete = [] + for fact, value in facts.iteritems(): + if value == "" or value is None: + keys_to_delete.append(fact) + for key in keys_to_delete: + del facts[key] + + if new_local_facts != local_facts: changed = True - for key, value in facts.iteritems(): - if isinstance(value, bool): - value = str(value) - if not value: - continue - if not local_facts.has_option(section, key) or local_facts.get(section, key) != value: - local_facts.set(section, key, value) - changed = True + if not module.check_mode: + save_local_facts(self.filename, new_local_facts) - if changed and not module.check_mode: - try: - fact_dir = os.path.dirname(self.filename) - if not os.path.exists(fact_dir): - os.makedirs(fact_dir) - with open(self.filename, 'w') as fact_file: - local_facts.write(fact_file) - except (IOError, OSError) as e: - raise OpenShiftFactsFileWriteError("Could not create fact file: %s, error: %s" % (self.filename, e)) self.changed = changed - - role_facts = dict() - for section in local_facts.sections(): - role_facts[section] = dict() - for opt, val in local_facts.items(section): - role_facts[section][opt] = val - return role_facts + return new_local_facts def main(): + """ main """ + # disabling pylint errors for global-variable-undefined and invalid-name + # for 'global module' usage, since it is required to use ansible_facts + # pylint: disable=global-variable-undefined, invalid-name global module module = AnsibleModule( - argument_spec = dict( - role=dict(default='common', - choices=OpenShiftFacts.known_roles, - required=False), - local_facts=dict(default={}, type='dict', required=False), - ), - supports_check_mode=True, - add_file_common_args=True, + argument_spec=dict( + role=dict(default='common', required=False, + choices=OpenShiftFacts.known_roles), + local_facts=dict(default=None, type='dict', required=False), + ), + supports_check_mode=True, + add_file_common_args=True, ) role = module.params['role'] @@ -464,11 +748,13 @@ def main(): file_params['path'] = fact_file file_args = module.load_file_common_arguments(file_params) changed = module.set_fs_attributes_if_different(file_args, - openshift_facts.changed) + openshift_facts.changed) return module.exit_json(changed=changed, - ansible_facts=openshift_facts.get_facts()) + ansible_facts=openshift_facts.facts) +# ignore pylint errors related to the module_utils import +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.facts import * diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml index 5a7d10d25..d71e6d019 100644 --- a/roles/openshift_facts/tasks/main.yml +++ b/roles/openshift_facts/tasks/main.yml @@ -1,3 +1,9 @@ --- +- name: Verify Ansible version is greater than 1.8.0 and not 1.9.0 + assert: + that: + - ansible_version | version_compare('1.8.0', 'ge') + - ansible_version | version_compare('1.9.0', 'ne') + - name: Gather OpenShift facts openshift_facts: diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index 87fb347a8..56cf43531 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -2,12 +2,15 @@ openshift_node_ips: [] # TODO: update setting these values based on the facts -# TODO: update for console port change os_firewall_allow: - service: etcd embedded port: 4001/tcp - service: OpenShift api https port: 8443/tcp +- service: OpenShift dns tcp + port: 53/tcp +- service: OpenShift dns udp + port: 53/udp os_firewall_deny: - service: OpenShift api http port: 8080/tcp |