diff options
author | Samuel Munilla <smunilla@redhat.com> | 2016-03-07 13:47:05 -0500 |
---|---|---|
committer | Samuel Munilla <smunilla@redhat.com> | 2016-07-18 10:23:34 -0400 |
commit | 4a11a05a59348dc2875a017c67b0ed88808dc06c (patch) | |
tree | 05ce52de090dc2ab91d5fa9d1a917df0d9eb9e92 /utils | |
parent | 166db870ed38eaff45a040128c9f4aca252f25d0 (diff) | |
download | openshift-4a11a05a59348dc2875a017c67b0ed88808dc06c.tar.gz openshift-4a11a05a59348dc2875a017c67b0ed88808dc06c.tar.bz2 openshift-4a11a05a59348dc2875a017c67b0ed88808dc06c.tar.xz openshift-4a11a05a59348dc2875a017c67b0ed88808dc06c.zip |
Arbitrary Installer yaml
Initial build of new, more flexible installer config file format.
Diffstat (limited to 'utils')
-rw-r--r-- | utils/src/ooinstall/cli_installer.py | 141 | ||||
-rw-r--r-- | utils/src/ooinstall/oo_config.py | 196 | ||||
-rw-r--r-- | utils/src/ooinstall/openshift_ansible.py | 86 | ||||
-rw-r--r-- | utils/test/cli_installer_tests.py | 453 | ||||
-rw-r--r-- | utils/test/fixture.py | 55 | ||||
-rw-r--r-- | utils/test/oo_config_tests.py | 195 |
6 files changed, 666 insertions, 460 deletions
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index d6f2540bc..d052ad852 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -10,7 +10,7 @@ import click from ooinstall import openshift_ansible from ooinstall import OOConfig from ooinstall.oo_config import OOConfigInvalidHostError -from ooinstall.oo_config import Host +from ooinstall.oo_config import Host, Role from ooinstall.variants import find_variant, get_variant_version_combos DEFAULT_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible.cfg' @@ -121,21 +121,23 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen click.echo(message) hosts = [] + roles = set(['master', 'node', 'storage']) more_hosts = True num_masters = 0 while more_hosts: host_props = {} + host_props['roles'] = [] host_props['connect_to'] = click.prompt('Enter hostname or IP address', value_proc=validate_prompt_hostname) if not masters_set: if click.confirm('Will this host be an OpenShift Master?'): - host_props['master'] = True + host_props['roles'].append('master') num_masters += 1 if oo_cfg.settings['variant_version'] == '3.0': masters_set = True - host_props['node'] = True + host_props['roles'].append('node') host_props['containerized'] = False if oo_cfg.settings['variant_version'] != '3.0': @@ -152,6 +154,7 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen hosts.append(host) + if print_summary: print_installation_summary(hosts, oo_cfg.settings['variant_version']) @@ -163,11 +166,12 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen if num_masters >= 3: collect_master_lb(hosts) + roles.add('master_lb') if not existing_env: collect_storage_host(hosts) - return hosts + return hosts, roles def print_installation_summary(hosts, version=None): @@ -184,9 +188,9 @@ def print_installation_summary(hosts, version=None): for host in hosts: print_host_summary(hosts, host) - masters = [host for host in hosts if host.master] - nodes = [host for host in hosts if host.node] - dedicated_nodes = [host for host in hosts if host.node and not host.master] + masters = [host for host in hosts if host.is_master()] + nodes = [host for host in hosts if host.is_node()] + dedicated_nodes = [host for host in hosts if host.is_node() and not host.is_master()] click.echo('') click.echo('Total OpenShift Masters: %s' % len(masters)) click.echo('Total OpenShift Nodes: %s' % len(nodes)) @@ -226,26 +230,26 @@ deployment.""" def print_host_summary(all_hosts, host): click.echo("- %s" % host.connect_to) - if host.master: + if host.is_master(): click.echo(" - OpenShift Master") - if host.node: + if host.is_node(): if host.is_dedicated_node(): click.echo(" - OpenShift Node (Dedicated)") elif host.is_schedulable_node(all_hosts): click.echo(" - OpenShift Node") else: click.echo(" - OpenShift Node (Unscheduled)") - if host.master_lb: + if host.is_master_lb(): if host.preconfigured: click.echo(" - Load Balancer (Preconfigured)") else: click.echo(" - Load Balancer (HAProxy)") - if host.master: + if host.is_master(): if host.is_etcd_member(all_hosts): click.echo(" - Etcd Member") else: click.echo(" - Etcd (Embedded)") - if host.storage: + if host.is_storage(): click.echo(" - Storage") @@ -279,7 +283,7 @@ hostname. # Make sure this host wasn't already specified: for host in hosts: - if host.connect_to == hostname and (host.master or host.node): + if host.connect_to == hostname and (host.is_master() or host.is_node()): raise click.BadParameter('Cannot re-use "%s" as a load balancer, ' 'please specify a separate host' % hostname) return hostname @@ -289,9 +293,7 @@ hostname. install_haproxy = \ click.confirm('Should the reference haproxy load balancer be installed on this host?') host_props['preconfigured'] = not install_haproxy - host_props['master'] = False - host_props['node'] = False - host_props['master_lb'] = True + host_props['roles'] = ['master_lb'] master_lb = Host(**host_props) hosts.append(master_lb) @@ -309,14 +311,14 @@ Note: Containerized storage hosts are not currently supported. click.echo(message) host_props = {} - first_master = next(host for host in hosts if host.master) + first_master = next(host for host in hosts if host.is_master()) hostname_or_ip = click.prompt('Enter hostname or IP address', value_proc=validate_prompt_hostname, default=first_master.connect_to) existing, existing_host = is_host_already_node_or_master(hostname_or_ip, hosts) - if existing and existing_host.node: - existing_host.storage = True + if existing and existing_host.is_node(): + existing_host.roles.append('storage') else: host_props['connect_to'] = hostname_or_ip host_props['preconfigured'] = False @@ -331,14 +333,14 @@ def is_host_already_node_or_master(hostname, hosts): existing_host = None for host in hosts: - if host.connect_to == hostname and (host.master or host.node): + if host.connect_to == hostname and (host.is_master() or host.is_node()): is_existing = True existing_host = host return is_existing, existing_host def confirm_hosts_facts(oo_cfg, callback_facts): - hosts = oo_cfg.hosts + hosts = oo_cfg.deployment.hosts click.clear() message = """ A list of the facts gathered from the provided hosts follows. Because it is @@ -414,19 +416,20 @@ Edit %s with the desired values and run `atomic-openshift-installer --unattended def check_hosts_config(oo_cfg, unattended): click.clear() - masters = [host for host in oo_cfg.hosts if host.master] + masters = [host for host in oo_cfg.deployment.hosts if host.is_master()] if len(masters) == 2: click.echo("A minimum of 3 Masters are required for HA deployments.") sys.exit(1) if len(masters) > 1: - master_lb = [host for host in oo_cfg.hosts if host.master_lb] + master_lb = [host for host in oo_cfg.deployment.hosts if host.is_master_lb()] + if len(master_lb) > 1: click.echo('ERROR: More than one Master load balancer specified. Only one is allowed.') sys.exit(1) elif len(master_lb) == 1: - if master_lb[0].master or master_lb[0].node: + if master_lb[0].is_master() or master_lb[0].is_node(): click.echo('ERROR: The Master load balancer is configured as a master or node. ' \ 'Please correct this.') sys.exit(1) @@ -440,7 +443,8 @@ https://docs.openshift.org/latest/install_config/install/advanced_install.html#m click.echo(message) sys.exit(1) - dedicated_nodes = [host for host in oo_cfg.hosts if host.node and not host.master] + dedicated_nodes = [host for host in oo_cfg.deployment.hosts \ + if host.is_node() and not host.is_master()] if len(dedicated_nodes) == 0: message = """ WARNING: No dedicated Nodes specified. By default, colocated Masters have @@ -481,7 +485,7 @@ def confirm_continue(message): def error_if_missing_info(oo_cfg): missing_info = False - if not oo_cfg.hosts: + if not oo_cfg.deployment.hosts: missing_info = True click.echo('For unattended installs, hosts must be specified on the ' 'command line or in the config file: %s' % oo_cfg.config_path) @@ -515,9 +519,24 @@ def error_if_missing_info(oo_cfg): for host in missing_facts: click.echo('Host "%s" missing facts: %s' % (host, ", ".join(missing_facts[host]))) + # check that all listed host roles are included + listed_roles = get_host_roles_set(oo_cfg) + configured_roles = set([role for role in oo_cfg.deployment.roles]) + if listed_roles != configured_roles: + missing_info = True + click.echo('Any roles assigned to hosts must be defined.') + if missing_info: sys.exit(1) +def get_host_roles_set(oo_cfg): + roles_set = set() + for host in oo_cfg.deployment.hosts: + for role in host.roles: + roles_set.add(role) + + return roles_set + def get_proxy_hostnames_and_excludes(): message = """ If a proxy is needed to reach HTTP and HTTPS traffic please enter the name below. @@ -574,35 +593,51 @@ https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.h confirm_continue(message) click.clear() - if oo_cfg.settings.get('ansible_ssh_user', '') == '': + if not oo_cfg.settings.get('ansible_ssh_user', ''): oo_cfg.settings['ansible_ssh_user'] = get_ansible_ssh_user() click.clear() - if oo_cfg.settings.get('variant', '') == '': + if not oo_cfg.settings.get('variant', ''): variant, version = get_variant_and_version() oo_cfg.settings['variant'] = variant.name oo_cfg.settings['variant_version'] = version.name click.clear() - if not oo_cfg.hosts: - oo_cfg.hosts = collect_hosts(oo_cfg) + if not oo_cfg.deployment.hosts: + oo_cfg.deployment.hosts, roles = collect_hosts(oo_cfg) + + for role in roles: + oo_cfg.deployment.roles[role] = Role(name=role, variables={}) click.clear() - if not oo_cfg.settings.get('master_routingconfig_subdomain', None): - oo_cfg.settings['master_routingconfig_subdomain'] = get_master_routingconfig_subdomain() + if not 'master_routingconfig_subdomain' in oo_cfg.deployment.variables: + oo_cfg.deployment.variables['master_routingconfig_subdomain'] = \ + get_master_routingconfig_subdomain() click.clear() if not oo_cfg.settings.get('openshift_http_proxy', None) and \ LooseVersion(oo_cfg.settings.get('variant_version', '0.0')) >= LooseVersion('3.2'): http_proxy, https_proxy, proxy_excludes = get_proxy_hostnames_and_excludes() - oo_cfg.settings['openshift_http_proxy'] = http_proxy - oo_cfg.settings['openshift_https_proxy'] = https_proxy - oo_cfg.settings['openshift_no_proxy'] = proxy_excludes + oo_cfg.deployment.variables['proxy_http'] = http_proxy + oo_cfg.deployment.variables['proxy_https'] = https_proxy + oo_cfg.deployment.variables['proxy_exclude_hosts'] = proxy_excludes click.clear() return oo_cfg +def get_role_variable(oo_cfg, role_name, variable_name): + try: + target_role = next(role for role in oo_cfg.deployment.roles if role.name is role_name) + target_variable = target_role.variables[variable_name] + return target_variable + except (StopIteration, KeyError): + return None + +def set_role_variable(oo_cfg, role_name, variable_name, variable_value): + target_role = next(role for role in oo_cfg.deployment.roles if role.name is role_name) + target_role[variable_name] = variable_value + def collect_new_nodes(oo_cfg): click.clear() click.echo('*** New Node Configuration ***') @@ -610,14 +645,15 @@ def collect_new_nodes(oo_cfg): Add new nodes here """ click.echo(message) - return collect_hosts(oo_cfg, existing_env=True, masters_set=True, print_summary=False) + new_nodes, _ = collect_hosts(oo_cfg, existing_env=True, masters_set=True, print_summary=False) + return new_nodes def get_installed_hosts(hosts, callback_facts): installed_hosts = [] # count nativeha lb as an installed host try: - first_master = next(host for host in hosts if host.master) + first_master = next(host for host in hosts if host.is_master()) lb_hostname = callback_facts[first_master.connect_to]['master'].get('cluster_hostname', '') lb_host = \ next(host for host in hosts if host.ip == callback_facts[lb_hostname]['common']['ip']) @@ -636,17 +672,17 @@ def is_installed_host(host, callback_facts): callback_facts[host.connect_to]['common'].get('version', '') and \ callback_facts[host.connect_to]['common'].get('version', '') != 'None' - return version_found or host.master_lb or host.preconfigured + return version_found # pylint: disable=too-many-branches # This pylint error will be corrected shortly in separate PR. def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose): # Copy the list of existing hosts so we can remove any already installed nodes. - hosts_to_run_on = list(oo_cfg.hosts) + hosts_to_run_on = list(oo_cfg.deployment.hosts) # Check if master or nodes already have something installed - installed_hosts = get_installed_hosts(oo_cfg.hosts, callback_facts) + installed_hosts = get_installed_hosts(oo_cfg.deployment.hosts, callback_facts) if len(installed_hosts) > 0: click.echo('Installed environment detected.') # This check has to happen before we start removing hosts later in this method @@ -668,11 +704,11 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose): # present a message listing already installed hosts and remove hosts if needed for host in installed_hosts: - if host.master: + if host.is_master(): click.echo("{} is already an OpenShift Master".format(host)) # Masters stay in the list, we need to run against them when adding # new nodes. - elif host.node: + elif host.is_node(): click.echo("{} is already an OpenShift Node".format(host)) # force is only used for reinstalls so we don't want to remove # anything. @@ -699,11 +735,11 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose): new_nodes = collect_new_nodes(oo_cfg) hosts_to_run_on.extend(new_nodes) - oo_cfg.hosts.extend(new_nodes) + oo_cfg.deployment.hosts.extend(new_nodes) openshift_ansible.set_config(oo_cfg) click.echo('Gathering information from hosts...') - callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts, verbose) + callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts, verbose) if error or callback_facts is None: click.echo("There was a problem fetching the required information. See " \ "{} for details.".format(oo_cfg.settings['ansible_log_path'])) @@ -800,14 +836,14 @@ def uninstall(ctx): oo_cfg = ctx.obj['oo_cfg'] verbose = ctx.obj['verbose'] - if len(oo_cfg.hosts) == 0: + if len(oo_cfg.deployment.hosts) == 0: click.echo("No hosts defined in: %s" % oo_cfg.config_path) sys.exit(1) click.echo("OpenShift will be uninstalled from the following hosts:\n") if not ctx.obj['unattended']: # Prompt interactively to confirm: - for host in oo_cfg.hosts: + for host in oo_cfg.deployment.hosts: click.echo(" * %s" % host.connect_to) proceed = click.confirm("\nDo you wish to proceed?") if not proceed: @@ -841,7 +877,7 @@ def upgrade(ctx, latest_minor, next_major): }, } - if len(oo_cfg.hosts) == 0: + if len(oo_cfg.deployment.hosts) == 0: click.echo("No hosts defined in: %s" % oo_cfg.config_path) sys.exit(1) @@ -901,7 +937,7 @@ def upgrade(ctx, latest_minor, next_major): click.echo("Openshift will be upgraded from %s %s to latest %s %s on the following hosts:\n" % ( variant, old_version, oo_cfg.settings['variant'], new_version)) - for host in oo_cfg.hosts: + for host in oo_cfg.deployment.hosts: click.echo(" * %s" % host.connect_to) if not ctx.obj['unattended']: @@ -936,10 +972,11 @@ def install(ctx, force, gen_inventory): check_hosts_config(oo_cfg, ctx.obj['unattended']) - print_installation_summary(oo_cfg.hosts, oo_cfg.settings.get('variant_version', None)) + print_installation_summary(oo_cfg.deployment.hosts, oo_cfg.settings.get('variant_version', None)) click.echo('Gathering information from hosts...') - callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts, + callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts, verbose) + if error or callback_facts is None: click.echo("There was a problem fetching the required information. " \ "Please see {} for details.".format(oo_cfg.settings['ansible_log_path'])) @@ -959,6 +996,7 @@ def install(ctx, force, gen_inventory): # Write quick installer config file to disk: oo_cfg.save_to_disk() + # Write ansible inventory file to disk: inventory_file = openshift_ansible.generate_inventory(hosts_to_run_on) @@ -977,8 +1015,9 @@ If changes are needed please edit the config file above and re-run. if not ctx.obj['unattended']: confirm_continue(message) - error = openshift_ansible.run_main_playbook(inventory_file, oo_cfg.hosts, + error = openshift_ansible.run_main_playbook(inventory_file, oo_cfg.deployment.hosts, hosts_to_run_on, verbose) + if error: # The bootstrap script will print out the log location. message = """ diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py index 24dfbe013..cfea44b62 100644 --- a/utils/src/ooinstall/oo_config.py +++ b/utils/src/ooinstall/oo_config.py @@ -1,26 +1,32 @@ -# TODO: Temporarily disabled due to importing old code into openshift-ansible -# repo. We will work on these over time. # pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,too-many-instance-attributes,too-few-public-methods import os +import sys import yaml from pkg_resources import resource_filename -PERSIST_SETTINGS = [ +CONFIG_PERSIST_SETTINGS = [ 'ansible_ssh_user', + 'ansible_callback_facts_yaml', 'ansible_config', + 'ansible_inventory_path', 'ansible_log_path', - 'master_routingconfig_subdomain', - 'proxy', - 'proxy_exclude_hosts', + 'deployment', + 'version', 'variant', 'variant_version', - 'version', ] + +DEPLOYMENT_PERSIST_SETTINGS = [ + 'master_routingconfig_subdomain', + 'proxy_http', + 'proxy_https', + 'proxy_exclude_hosts', +] + DEFAULT_REQUIRED_FACTS = ['ip', 'public_ip', 'hostname', 'public_hostname'] PRECONFIGURED_REQUIRED_FACTS = ['hostname', 'public_hostname'] - class OOConfigFileError(Exception): """The provided config file path can't be read/written """ @@ -40,31 +46,19 @@ class Host(object): self.public_ip = kwargs.get('public_ip', None) self.public_hostname = kwargs.get('public_hostname', None) self.connect_to = kwargs.get('connect_to', None) + self.preconfigured = kwargs.get('preconfigured', None) + self.schedulable = kwargs.get('schedulable', None) self.new_host = kwargs.get('new_host', None) - - # Should this host run as an OpenShift master: - self.master = kwargs.get('master', False) - - # Should this host run as an OpenShift node: - self.node = kwargs.get('node', False) - - # Should this host run as an HAProxy: - self.master_lb = kwargs.get('master_lb', False) - - # Should this host run as an HAProxy: - self.storage = kwargs.get('storage', False) - self.containerized = kwargs.get('containerized', False) + self.node_labels = kwargs.get('node_labels', '') - if self.connect_to is None: - raise OOConfigInvalidHostError("You must specify either an ip " \ - "or hostname as 'connect_to'") + # allowable roles: master, node, etcd, storage, master_lb, new + self.roles = kwargs.get('roles', []) - if self.master is False and self.node is False and \ - self.master_lb is False and self.storage is False: + if self.connect_to is None: raise OOConfigInvalidHostError( - "You must specify each host as either a master or a node.") + "You must specify either an ip or hostname as 'connect_to'") def __str__(self): return self.connect_to @@ -75,41 +69,83 @@ class Host(object): def to_dict(self): """ Used when exporting to yaml. """ d = {} - for prop in ['ip', 'hostname', 'public_ip', 'public_hostname', - 'master', 'node', 'master_lb', 'storage', 'containerized', - 'connect_to', 'preconfigured', 'new_host']: + + for prop in ['ip', 'hostname', 'public_ip', 'public_hostname', 'connect_to', + 'preconfigured', 'containerized', 'schedulable', 'roles', 'node_labels']: # If the property is defined (not None or False), export it: if getattr(self, prop): d[prop] = getattr(self, prop) return d + def is_master(self): + return 'master' in self.roles + + def is_node(self): + return 'node' in self.roles + + def is_master_lb(self): + return 'master_lb' in self.roles + + def is_storage(self): + return 'storage' in self.roles + + def is_etcd_member(self, all_hosts): """ Will this host be a member of a standalone etcd cluster. """ - if not self.master: + if not self.is_master(): return False - masters = [host for host in all_hosts if host.master] + masters = [host for host in all_hosts if host.is_master()] if len(masters) > 1: return True return False def is_dedicated_node(self): """ Will this host be a dedicated node. (not a master) """ - return self.node and not self.master + return self.is_node() and not self.is_master() def is_schedulable_node(self, all_hosts): """ Will this host be a node marked as schedulable. """ - if not self.node: + if not self.is_node(): return False - if not self.master: + if not self.is_master(): return True - masters = [host for host in all_hosts if host.master] - nodes = [host for host in all_hosts if host.node] + masters = [host for host in all_hosts if host.is_master()] + nodes = [host for host in all_hosts if host.is_node()] if len(masters) == len(nodes): return True return False +class Role(object): + """ A role that will be applied to a host. """ + def __init__(self, name, variables): + self.name = name + self.variables = variables + + def __str__(self): + return self.name + + def __repr__(self): + return self.name + + def to_dict(self): + """ Used when exporting to yaml. """ + d = {} + for prop in ['name', 'variables']: + # If the property is defined (not None or False), export it: + if getattr(self, prop): + d[prop] = getattr(self, prop) + return d + + +class Deployment(object): + def __init__(self, **kwargs): + self.hosts = kwargs.get('hosts', []) + self.roles = kwargs.get('roles', {}) + self.variables = kwargs.get('variables', {}) + + class OOConfig(object): default_dir = os.path.normpath( os.environ.get('XDG_CONFIG_HOME', @@ -122,32 +158,51 @@ class OOConfig(object): else: self.config_path = os.path.normpath(self.default_dir + self.default_file) + self.deployment = Deployment(hosts=[], roles={}, variables={}) self.settings = {} self._read_config() self._set_defaults() + def _read_config(self): - self.hosts = [] try: if os.path.exists(self.config_path): - cfgfile = open(self.config_path, 'r') - self.settings = yaml.safe_load(cfgfile.read()) - cfgfile.close() + with open(self.config_path, 'r') as cfgfile: + loaded_config = yaml.safe_load(cfgfile.read()) # Use the presence of a Description as an indicator this is # a legacy config file: if 'Description' in self.settings: self._upgrade_legacy_config() + try: + host_list = loaded_config['deployment']['hosts'] + role_list = loaded_config['deployment']['roles'] + except KeyError as e: + print "Error loading config, no such key: {}".format(e) + sys.exit(0) + + for setting in CONFIG_PERSIST_SETTINGS: + try: + self.settings[setting] = str(loaded_config[setting]) + except KeyError: + continue + + for setting in DEPLOYMENT_PERSIST_SETTINGS: + try: + self.deployment.variables[setting] = \ + str(loaded_config['deployment'][setting]) + except KeyError: + continue + # Parse the hosts into DTO objects: - if 'hosts' in self.settings: - for host in self.settings['hosts']: - self.hosts.append(Host(**host)) + for host in host_list: + self.deployment.hosts.append(Host(**host)) + + # Parse the roles into Objects + for name, variables in role_list.iteritems(): + self.deployment.roles.update({name: Role(name, variables)}) - # Watchout for the variant_version coming in as a float: - if 'variant_version' in self.settings: - self.settings['variant_version'] = \ - str(self.settings['variant_version']) except IOError, ferr: raise OOConfigFileError('Cannot open config file "{}": {}'.format(ferr.filename, @@ -179,18 +234,21 @@ class OOConfig(object): self.settings['variant'] = 'openshift-enterprise' self.settings['variant_version'] = '3.0' + def _upgrade_v1_config(self): + #TODO write code to upgrade old config + return + def _set_defaults(self): if 'ansible_inventory_directory' not in self.settings: - self.settings['ansible_inventory_directory'] = \ - self._default_ansible_inv_dir() + self.settings['ansible_inventory_directory'] = self._default_ansible_inv_dir() if not os.path.exists(self.settings['ansible_inventory_directory']): os.makedirs(self.settings['ansible_inventory_directory']) if 'ansible_plugins_directory' not in self.settings: self.settings['ansible_plugins_directory'] = \ resource_filename(__name__, 'ansible_plugins') if 'version' not in self.settings: - self.settings['version'] = 'v1' + self.settings['version'] = 'v2' if 'ansible_callback_facts_yaml' not in self.settings: self.settings['ansible_callback_facts_yaml'] = '%s/callback_facts.yaml' % \ @@ -219,7 +277,7 @@ class OOConfig(object): """ result = {} - for host in self.hosts: + for host in self.deployment.hosts: missing_facts = [] if host.preconfigured: required_facts = PRECONFIGURED_REQUIRED_FACTS @@ -240,17 +298,33 @@ class OOConfig(object): def persist_settings(self): p_settings = {} - for setting in PERSIST_SETTINGS: + + for setting in CONFIG_PERSIST_SETTINGS: if setting in self.settings and self.settings[setting]: p_settings[setting] = self.settings[setting] - p_settings['hosts'] = [] - for host in self.hosts: - p_settings['hosts'].append(host.to_dict()) - if self.settings['ansible_inventory_directory'] != \ - self._default_ansible_inv_dir(): - p_settings['ansible_inventory_directory'] = \ - self.settings['ansible_inventory_directory'] + + p_settings['deployment'] = {} + p_settings['deployment']['hosts'] = [] + p_settings['deployment']['roles'] = {} + + for setting in DEPLOYMENT_PERSIST_SETTINGS: + if setting in self.deployment.variables: + p_settings['deployment'][setting] = self.deployment.variables[setting] + + for host in self.deployment.hosts: + p_settings['deployment']['hosts'].append(host.to_dict()) + + try: + p_settings['variant'] = self.settings['variant'] + p_settings['variant_version'] = self.settings['variant_version'] + + if self.settings['ansible_inventory_directory'] != self._default_ansible_inv_dir(): + p_settings['ansible_inventory_directory'] = self.settings['ansible_inventory_directory'] + except KeyError as e: + print "Error persisting settings: {}".format(e) + sys.exit(0) + return p_settings @@ -261,7 +335,7 @@ class OOConfig(object): return self.yaml() def get_host(self, name): - for host in self.hosts: + for host in self.deployment.hosts: if host.connect_to == name: return host return None diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index 97aee0b53..8f7cf07e7 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -11,15 +11,32 @@ from ooinstall.variants import find_variant CFG = None +ROLES_TO_GROUPS_MAP = { + 'master': 'masters', + 'node': 'nodes', + 'storage': 'nfs' +} + +VARIABLES_MAP = { + 'ansible_ssh_user': 'ansible_ssh_user', + 'ansible_config': 'ansible_config', + 'ansible_log_path': 'ansible_log_path', + 'deployment_type': 'deployment_type', + 'master_routingconfig_subdomain':'openshift_master_default_subdomain', + 'proxy_http':'openshift_http_proxy', + 'proxy_https': 'openshift_https_proxy', + 'proxy_exclude_hosts': 'openshift_no_proxy', +} + def set_config(cfg): global CFG CFG = cfg def generate_inventory(hosts): global CFG - masters = [host for host in hosts if host.master] - nodes = [host for host in hosts if host.node] - new_nodes = [host for host in hosts if host.node and host.new_host] + masters = [host for host in hosts if host.is_master()] + nodes = [host for host in hosts if host.is_node()] + new_nodes = [host for host in hosts if host.is_node() and host.new_host] proxy = determine_proxy_configuration(hosts) storage = determine_storage_configuration(hosts) multiple_masters = len(masters) > 1 @@ -28,7 +45,7 @@ def generate_inventory(hosts): base_inventory_path = CFG.settings['ansible_inventory_path'] base_inventory = open(base_inventory_path, 'w') - write_inventory_children(base_inventory, multiple_masters, proxy, storage, scaleup) + write_inventory_children(base_inventory, multiple_masters, proxy, scaleup) write_inventory_vars(base_inventory, multiple_masters, proxy) @@ -66,7 +83,7 @@ def generate_inventory(hosts): schedulable = None # If the node is also a master, we must explicitly set schedulablity: - if node.master: + if node.is_master(): schedulable = node.is_schedulable_node(hosts) write_host(node, base_inventory, schedulable) @@ -88,7 +105,7 @@ def generate_inventory(hosts): return base_inventory_path def determine_proxy_configuration(hosts): - proxy = next((host for host in hosts if host.master_lb), None) + proxy = next((host for host in hosts if host.is_master_lb()), None) if proxy: if proxy.hostname == None: proxy.hostname = proxy.connect_to @@ -97,53 +114,80 @@ def determine_proxy_configuration(hosts): return proxy def determine_storage_configuration(hosts): - storage = next((host for host in hosts if host.storage), None) + storage = next((host for host in hosts if host.is_storage()), None) return storage -def write_inventory_children(base_inventory, multiple_masters, proxy, storage, scaleup): +def write_inventory_children(base_inventory, multiple_masters, proxy, scaleup): global CFG base_inventory.write('\n[OSEv3:children]\n') - base_inventory.write('masters\n') - base_inventory.write('nodes\n') + for role in CFG.deployment.roles: + child = ROLES_TO_GROUPS_MAP.get(role, role) + base_inventory.write('{}\n'.format(child)) + if scaleup: base_inventory.write('new_nodes\n') if multiple_masters: base_inventory.write('etcd\n') if not getattr(proxy, 'preconfigured', True): base_inventory.write('lb\n') - if storage: - base_inventory.write('nfs\n') def write_inventory_vars(base_inventory, multiple_masters, proxy): global CFG base_inventory.write('\n[OSEv3:vars]\n') - base_inventory.write('ansible_ssh_user={}\n'.format(CFG.settings['ansible_ssh_user'])) + + for variable, value in CFG.settings.iteritems(): + inventory_var = VARIABLES_MAP.get(variable, None) + if inventory_var and value: + base_inventory.write('{}={}\n'.format(inventory_var, value)) + + for variable, value in CFG.deployment.variables.iteritems(): + inventory_var = VARIABLES_MAP.get(variable, variable) + if value: + base_inventory.write('{}={}\n'.format(inventory_var, value)) + if CFG.settings['ansible_ssh_user'] != 'root': base_inventory.write('ansible_become=yes\n') + if multiple_masters and proxy is not None: base_inventory.write('openshift_master_cluster_method=native\n') base_inventory.write("openshift_master_cluster_hostname={}\n".format(proxy.hostname)) base_inventory.write( "openshift_master_cluster_public_hostname={}\n".format(proxy.public_hostname)) - if CFG.settings.get('master_routingconfig_subdomain', False): - base_inventory.write( - "openshift_master_default_subdomain={}\n".format( - CFG.settings['master_routingconfig_subdomain'])) + if CFG.settings.get('variant_version', None) == '3.1': #base_inventory.write('openshift_image_tag=v{}\n'.format(CFG.settings.get('variant_version'))) base_inventory.write('openshift_image_tag=v{}\n'.format('3.1.1.6')) - if CFG.settings.get('openshift_http_proxy', ''): + write_proxy_settings(base_inventory) + + for name, role_obj in CFG.deployment.roles.iteritems(): + if role_obj.variables: + base_inventory.write("{}:vars".format(name)) + for variable, value in role_obj.variables.iteritems(): + inventory_var = VARIABLES_MAP.get(variable, variable) + if value: + base_inventory.write('{}={}\n'.format(inventory_var, value)) + base_inventory.write("\n") + + +def write_proxy_settings(base_inventory): + try: base_inventory.write("openshift_http_proxy={}\n".format( CFG.settings['openshift_http_proxy'])) - if CFG.settings.get('openshift_https_proxy', ''): + except KeyError: + pass + try: base_inventory.write("openshift_https_proxy={}\n".format( CFG.settings['openshift_https_proxy'])) - if CFG.settings.get('openshift_no_proxy', ''): + except KeyError: + pass + try: base_inventory.write("openshift_no_proxy={}\n".format( CFG.settings['openshift_no_proxy'])) + except KeyError: + pass def write_host(host, inventory, schedulable=None): @@ -160,8 +204,6 @@ def write_host(host, inventory, schedulable=None): facts += ' openshift_public_hostname={}'.format(host.public_hostname) if host.containerized: facts += ' containerized={}'.format(host.containerized) - # TODO: For not write_host is handles both master and nodes. - # Technically only nodes will ever need this. # Distinguish between three states, no schedulability specified (use default), # explicitly set to True, or explicitly set to False: diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py index 66ed66660..13973f22f 100644 --- a/utils/test/cli_installer_tests.py +++ b/utils/test/cli_installer_tests.py @@ -76,6 +76,14 @@ MOCK_FACTS_QUICKHA = { 'common': { 'ip': '10.0.0.4', 'public_ip': '10.0.0.4', + 'hostname': 'node3-private.example.com', + 'public_hostname': 'node3.example.com' + } + }, + '10.0.0.5': { + 'common': { + 'ip': '10.0.0.5', + 'public_ip': '10.0.0.5', 'hostname': 'proxy-private.example.com', 'public_hostname': 'proxy.example.com' } @@ -94,221 +102,279 @@ MOCK_FACTS_QUICKHA = { BAD_CONFIG = """ variant: %s ansible_ssh_user: root -hosts: - - connect_to: 10.0.0.1 - ip: 10.0.0.1 - hostname: master-private.example.com - public_ip: 24.222.0.1 - public_hostname: master.example.com - master: true - node: true - - ip: 10.0.0.2 - hostname: node1-private.example.com - public_ip: 24.222.0.2 - public_hostname: node1.example.com - node: true - - connect_to: 10.0.0.3 - ip: 10.0.0.3 - hostname: node2-private.example.com - public_ip: 24.222.0.3 - public_hostname: node2.example.com - node: true +deployment: + hosts: + - connect_to: 10.0.0.1 + ip: 10.0.0.1 + hostname: master-private.example.com + public_ip: 24.222.0.1 + public_hostname: master.example.com + roles: + - master + - node + - ip: 10.0.0.2 + hostname: node1-private.example.com + public_ip: 24.222.0.2 + public_hostname: node1.example.com + roles: + - node + - connect_to: 10.0.0.3 + ip: 10.0.0.3 + hostname: node2-private.example.com + public_ip: 24.222.0.3 + public_hostname: node2.example.com + roles: + - node + roles: + master: + node: """ QUICKHA_CONFIG = """ variant: %s ansible_ssh_user: root -master_routingconfig_subdomain: example.com -hosts: - - connect_to: 10.0.0.1 - ip: 10.0.0.1 - hostname: master-private.example.com - public_ip: 24.222.0.1 - public_hostname: master.example.com - master: true - node: true - - connect_to: 10.0.0.2 - ip: 10.0.0.2 - hostname: node1-private.example.com - public_ip: 24.222.0.2 - public_hostname: node1.example.com - master: true - node: true - - connect_to: 10.0.0.3 - ip: 10.0.0.3 - hostname: node2-private.example.com - public_ip: 24.222.0.3 - public_hostname: node2.example.com - node: true - master: true - - connect_to: 10.0.0.4 - ip: 10.0.0.4 - hostname: node3-private.example.com - public_ip: 24.222.0.4 - public_hostname: node3.example.com - node: true - - connect_to: 10.0.0.5 - ip: 10.0.0.5 - hostname: proxy-private.example.com - public_ip: 24.222.0.5 - public_hostname: proxy.example.com - master_lb: true - - connect_to: 10.1.0.1 - ip: 10.1.0.1 - hostname: storage-private.example.com - public_ip: 24.222.0.6 - public_hostname: storage.example.com - storage: true +deployment: + hosts: + - connect_to: 10.0.0.1 + ip: 10.0.0.1 + hostname: master-private.example.com + public_ip: 24.222.0.1 + public_hostname: master.example.com + roles: + - master + - node + - connect_to: 10.0.0.2 + ip: 10.0.0.2 + hostname: node1-private.example.com + public_ip: 24.222.0.2 + public_hostname: node1.example.com + roles: + - master + - node + - connect_to: 10.0.0.3 + ip: 10.0.0.3 + hostname: node2-private.example.com + public_ip: 24.222.0.3 + public_hostname: node2.example.com + roles: + - master + - node + - connect_to: 10.0.0.4 + ip: 10.0.0.4 + hostname: node3-private.example.com + public_ip: 24.222.0.4 + public_hostname: node3.example.com + roles: + - node + - connect_to: 10.0.0.5 + ip: 10.0.0.5 + hostname: proxy-private.example.com + public_ip: 24.222.0.5 + public_hostname: proxy.example.com + roles: + - master_lb + - connect_to: 10.1.0.1 + ip: 10.1.0.1 + hostname: storage-private.example.com + public_ip: 24.222.0.6 + public_hostname: storage.example.com + roles: + - storage + roles: + master: + master_lb: + node: + storage: """ QUICKHA_2_MASTER_CONFIG = """ variant: %s ansible_ssh_user: root -hosts: - - connect_to: 10.0.0.1 - ip: 10.0.0.1 - hostname: master-private.example.com - public_ip: 24.222.0.1 - public_hostname: master.example.com - master: true - node: true - - connect_to: 10.0.0.2 - ip: 10.0.0.2 - hostname: node1-private.example.com - public_ip: 24.222.0.2 - public_hostname: node1.example.com - master: true - node: true - - connect_to: 10.0.0.4 - ip: 10.0.0.4 - hostname: node3-private.example.com - public_ip: 24.222.0.4 - public_hostname: node3.example.com - node: true - - connect_to: 10.0.0.5 - ip: 10.0.0.5 - hostname: proxy-private.example.com - public_ip: 24.222.0.5 - public_hostname: proxy.example.com - master_lb: true - - connect_to: 10.1.0.1 - ip: 10.1.0.1 - hostname: storage-private.example.com - public_ip: 24.222.0.6 - public_hostname: storage.example.com - storage: true +deployment: + hosts: + - connect_to: 10.0.0.1 + ip: 10.0.0.1 + hostname: master-private.example.com + public_ip: 24.222.0.1 + public_hostname: master.example.com + roles: + - master + - node + - connect_to: 10.0.0.2 + ip: 10.0.0.2 + hostname: node1-private.example.com + public_ip: 24.222.0.2 + public_hostname: node1.example.com + roles: + - master + - node + - connect_to: 10.0.0.4 + ip: 10.0.0.4 + hostname: node3-private.example.com + public_ip: 24.222.0.4 + public_hostname: node3.example.com + roles: + - node + - connect_to: 10.0.0.5 + ip: 10.0.0.5 + hostname: proxy-private.example.com + public_ip: 24.222.0.5 + public_hostname: proxy.example.com + roles: + - master_lb + - connect_to: 10.1.0.1 + ip: 10.1.0.1 + hostname: storage-private.example.com + public_ip: 24.222.0.6 + public_hostname: storage.example.com + roles: + - storage + roles: + master: + master_lb: + node: + storage: """ QUICKHA_CONFIG_REUSED_LB = """ variant: %s ansible_ssh_user: root -hosts: - - connect_to: 10.0.0.1 - ip: 10.0.0.1 - hostname: master-private.example.com - public_ip: 24.222.0.1 - public_hostname: master.example.com - master: true - node: true - - connect_to: 10.0.0.2 - ip: 10.0.0.2 - hostname: node1-private.example.com - public_ip: 24.222.0.2 - public_hostname: node1.example.com - master: true - node: true - master_lb: true - - connect_to: 10.0.0.3 - ip: 10.0.0.3 - hostname: node2-private.example.com - public_ip: 24.222.0.3 - public_hostname: node2.example.com - node: true - master: true - - connect_to: 10.1.0.1 - ip: 10.1.0.1 - hostname: storage-private.example.com - public_ip: 24.222.0.6 - public_hostname: storage.example.com - storage: true +deployment: + hosts: + - connect_to: 10.0.0.1 + ip: 10.0.0.1 + hostname: master-private.example.com + public_ip: 24.222.0.1 + public_hostname: master.example.com + roles: + - master + - node + - connect_to: 10.0.0.2 + ip: 10.0.0.2 + hostname: node1-private.example.com + public_ip: 24.222.0.2 + public_hostname: node1.example.com + roles: + - master + - node + - master_lb + - connect_to: 10.0.0.3 + ip: 10.0.0.3 + hostname: node2-private.example.com + public_ip: 24.222.0.3 + public_hostname: node2.example.com + roles: + - master + - node + - connect_to: 10.1.0.1 + ip: 10.1.0.1 + hostname: storage-private.example.com + public_ip: 24.222.0.6 + public_hostname: storage.example.com + roles: + - storage + roles: + master: + node: + storage: """ QUICKHA_CONFIG_NO_LB = """ variant: %s ansible_ssh_user: root -hosts: - - connect_to: 10.0.0.1 - ip: 10.0.0.1 - hostname: master-private.example.com - public_ip: 24.222.0.1 - public_hostname: master.example.com - master: true - node: true - - connect_to: 10.0.0.2 - ip: 10.0.0.2 - hostname: node1-private.example.com - public_ip: 24.222.0.2 - public_hostname: node1.example.com - master: true - node: true - - connect_to: 10.0.0.3 - ip: 10.0.0.3 - hostname: node2-private.example.com - public_ip: 24.222.0.3 - public_hostname: node2.example.com - node: true - master: true - - connect_to: 10.1.0.1 - ip: 10.1.0.1 - hostname: storage-private.example.com - public_ip: 24.222.0.6 - public_hostname: storage.example.com - storage: true +deployment: + hosts: + - connect_to: 10.0.0.1 + ip: 10.0.0.1 + hostname: master-private.example.com + public_ip: 24.222.0.1 + public_hostname: master.example.com + roles: + - master + - node + - connect_to: 10.0.0.2 + ip: 10.0.0.2 + hostname: node1-private.example.com + public_ip: 24.222.0.2 + public_hostname: node1.example.com + roles: + - master + - node + - connect_to: 10.0.0.3 + ip: 10.0.0.3 + hostname: node2-private.example.com + public_ip: 24.222.0.3 + public_hostname: node2.example.com + roles: + - master + - node + - connect_to: 10.1.0.1 + ip: 10.1.0.1 + hostname: storage-private.example.com + public_ip: 24.222.0.6 + public_hostname: storage.example.com + roles: + - storage + roles: + master: + node: + storage: """ QUICKHA_CONFIG_PRECONFIGURED_LB = """ variant: %s ansible_ssh_user: root -master_routingconfig_subdomain: example.com -hosts: - - connect_to: 10.0.0.1 - ip: 10.0.0.1 - hostname: master-private.example.com - public_ip: 24.222.0.1 - public_hostname: master.example.com - master: true - node: true - - connect_to: 10.0.0.2 - ip: 10.0.0.2 - hostname: node1-private.example.com - public_ip: 24.222.0.2 - public_hostname: node1.example.com - master: true - node: true - - connect_to: 10.0.0.3 - ip: 10.0.0.3 - hostname: node2-private.example.com - public_ip: 24.222.0.3 - public_hostname: node2.example.com - node: true - master: true - - connect_to: 10.0.0.4 - ip: 10.0.0.4 - hostname: node3-private.example.com - public_ip: 24.222.0.4 - public_hostname: node3.example.com - node: true - - connect_to: proxy-private.example.com - hostname: proxy-private.example.com - public_hostname: proxy.example.com - master_lb: true - preconfigured: true - - connect_to: 10.1.0.1 - ip: 10.1.0.1 - hostname: storage-private.example.com - public_ip: 24.222.0.6 - public_hostname: storage.example.com - storage: true +deployment: + hosts: + - connect_to: 10.0.0.1 + ip: 10.0.0.1 + hostname: master-private.example.com + public_ip: 24.222.0.1 + public_hostname: master.example.com + roles: + - master + - node + - connect_to: 10.0.0.2 + ip: 10.0.0.2 + hostname: node1-private.example.com + public_ip: 24.222.0.2 + public_hostname: node1.example.com + roles: + - master + - node + - connect_to: 10.0.0.3 + ip: 10.0.0.3 + hostname: node2-private.example.com + public_ip: 24.222.0.3 + public_hostname: node2.example.com + roles: + - master + - node + - connect_to: 10.0.0.4 + ip: 10.0.0.4 + hostname: node3-private.example.com + public_ip: 24.222.0.4 + public_hostname: node3.example.com + roles: + - node + - connect_to: proxy-private.example.com + hostname: proxy-private.example.com + public_hostname: proxy.example.com + preconfigured: true + roles: + - master_lb + - connect_to: 10.1.0.1 + ip: 10.1.0.1 + hostname: storage-private.example.com + public_ip: 24.222.0.6 + public_hostname: storage.example.com + roles: + - storage + roles: + master: + master_lb: + node: + storage: """ class UnattendedCliTests(OOCliFixture): @@ -439,10 +505,7 @@ class UnattendedCliTests(OOCliFixture): @patch('ooinstall.openshift_ansible.run_main_playbook') @patch('ooinstall.openshift_ansible.load_system_facts') def test_inventory_write(self, load_facts_mock, run_playbook_mock): - - # Add an ssh user so we can verify it makes it to the inventory file: - merged_config = "%s\n%s" % (SAMPLE_CONFIG % 'openshift-enterprise', - "ansible_ssh_user: bob") + merged_config = SAMPLE_CONFIG % 'openshift-enterprise' load_facts_mock.return_value = (MOCK_FACTS, 0) run_playbook_mock.return_value = 0 @@ -456,7 +519,7 @@ class UnattendedCliTests(OOCliFixture): # Check the inventory file looks as we would expect: inventory = ConfigParser.ConfigParser(allow_no_value=True) inventory.read(os.path.join(self.work_dir, 'hosts')) - self.assertEquals('bob', + self.assertEquals('root', inventory.get('OSEv3:vars', 'ansible_ssh_user')) self.assertEquals('openshift-enterprise', inventory.get('OSEv3:vars', 'deployment_type')) diff --git a/utils/test/fixture.py b/utils/test/fixture.py index e01eaebaf..006df739b 100644 --- a/utils/test/fixture.py +++ b/utils/test/fixture.py @@ -10,28 +10,36 @@ from click.testing import CliRunner # Substitute in a product name before use: SAMPLE_CONFIG = """ variant: %s +variant_version: 3.2 ansible_ssh_user: root master_routingconfig_subdomain: example.com -hosts: - - connect_to: 10.0.0.1 - ip: 10.0.0.1 - hostname: master-private.example.com - public_ip: 24.222.0.1 - public_hostname: master.example.com - master: true - node: true - - connect_to: 10.0.0.2 - ip: 10.0.0.2 - hostname: node1-private.example.com - public_ip: 24.222.0.2 - public_hostname: node1.example.com - node: true - - connect_to: 10.0.0.3 - ip: 10.0.0.3 - hostname: node2-private.example.com - public_ip: 24.222.0.3 - public_hostname: node2.example.com - node: true +deployment: + hosts: + - connect_to: 10.0.0.1 + ip: 10.0.0.1 + hostname: master-private.example.com + public_ip: 24.222.0.1 + public_hostname: master.example.com + roles: + - master + - node + - connect_to: 10.0.0.2 + ip: 10.0.0.2 + hostname: node1-private.example.com + public_ip: 24.222.0.2 + public_hostname: node1.example.com + roles: + - node + - connect_to: 10.0.0.3 + ip: 10.0.0.3 + hostname: node2-private.example.com + public_ip: 24.222.0.3 + public_hostname: node2.example.com + roles: + - node + roles: + master: + node: """ def read_yaml(config_file_path): @@ -87,12 +95,13 @@ class OOCliFixture(OOInstallFixture): self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on)) def _verify_config_hosts(self, written_config, host_count): - self.assertEquals(host_count, len(written_config['hosts'])) - for host in written_config['hosts']: + self.assertEquals(host_count, len(written_config['deployment']['hosts'])) + for host in written_config['deployment']['hosts']: self.assertTrue('hostname' in host) self.assertTrue('public_hostname' in host) if 'preconfigured' not in host: - self.assertTrue('node' in host or 'storage' in host) + if 'roles' in host: + self.assertTrue('node' in host['roles'] or 'storage' in host['roles']) self.assertTrue('ip' in host) self.assertTrue('public_ip' in host) diff --git a/utils/test/oo_config_tests.py b/utils/test/oo_config_tests.py index 9f5f8e244..c19fe9e0d 100644 --- a/utils/test/oo_config_tests.py +++ b/utils/test/oo_config_tests.py @@ -12,27 +12,35 @@ from ooinstall.oo_config import OOConfig, Host, OOConfigInvalidHostError SAMPLE_CONFIG = """ variant: openshift-enterprise +variant_version: 3.2 ansible_ssh_user: root -hosts: - - connect_to: master-private.example.com - ip: 10.0.0.1 - hostname: master-private.example.com - public_ip: 24.222.0.1 - public_hostname: master.example.com - master: true - node: true - - connect_to: node1-private.example.com - ip: 10.0.0.2 - hostname: node1-private.example.com - public_ip: 24.222.0.2 - public_hostname: node1.example.com - node: true - - connect_to: node2-private.example.com - ip: 10.0.0.3 - hostname: node2-private.example.com - public_ip: 24.222.0.3 - public_hostname: node2.example.com - node: true +deployment: + hosts: + - connect_to: master-private.example.com + ip: 10.0.0.1 + hostname: master-private.example.com + public_ip: 24.222.0.1 + public_hostname: master.example.com + roles: + - master + - node + - connect_to: node1-private.example.com + ip: 10.0.0.2 + hostname: node1-private.example.com + public_ip: 24.222.0.2 + public_hostname: node1.example.com + roles: + - node + - connect_to: node2-private.example.com + ip: 10.0.0.3 + hostname: node2-private.example.com + public_ip: 24.222.0.3 + public_hostname: node2.example.com + roles: + - node + roles: + master: + node: """ # Used to test automatic upgrading of config: @@ -56,45 +64,59 @@ validated_facts: CONFIG_INCOMPLETE_FACTS = """ -hosts: - - connect_to: 10.0.0.1 - ip: 10.0.0.1 - hostname: master-private.example.com - public_ip: 24.222.0.1 - public_hostname: master.example.com - master: true - - connect_to: 10.0.0.2 - ip: 10.0.0.2 - hostname: 24.222.0.2 - public_ip: 24.222.0.2 - node: true - - connect_to: 10.0.0.3 - ip: 10.0.0.3 - node: true +deployment: + hosts: + - connect_to: 10.0.0.1 + ip: 10.0.0.1 + hostname: master-private.example.com + public_ip: 24.222.0.1 + public_hostname: master.example.com + roles: + - master + - connect_to: 10.0.0.2 + ip: 10.0.0.2 + hostname: 24.222.0.2 + public_ip: 24.222.0.2 + roles: + - node + - connect_to: 10.0.0.3 + ip: 10.0.0.3 + roles: + - node + roles: + master: + node: """ CONFIG_BAD = """ variant: openshift-enterprise ansible_ssh_user: root -hosts: - - connect_to: master-private.example.com - ip: 10.0.0.1 - hostname: master-private.example.com - public_ip: 24.222.0.1 - public_hostname: master.example.com - master: true - node: true - - ip: 10.0.0.2 - hostname: node1-private.example.com - public_ip: 24.222.0.2 - public_hostname: node1.example.com - node: true - - connect_to: node2-private.example.com - ip: 10.0.0.3 - hostname: node2-private.example.com - public_ip: 24.222.0.3 - public_hostname: node2.example.com - node: true +deployment: + hosts: + - connect_to: master-private.example.com + ip: 10.0.0.1 + hostname: master-private.example.com + public_ip: 24.222.0.1 + public_hostname: master.example.com + roles: + - master + - node + - ip: 10.0.0.2 + hostname: node1-private.example.com + public_ip: 24.222.0.2 + public_hostname: node1.example.com + roles: + - node + - connect_to: node2-private.example.com + ip: 10.0.0.3 + hostname: node2-private.example.com + public_ip: 24.222.0.3 + public_hostname: node2.example.com + roles: + - node + roles: + master: + node: """ class OOInstallFixture(unittest.TestCase): @@ -123,47 +145,6 @@ class OOInstallFixture(unittest.TestCase): return path -class LegacyOOConfigTests(OOInstallFixture): - - def setUp(self): - OOInstallFixture.setUp(self) - self.cfg_path = self.write_config(os.path.join(self.work_dir, - 'ooinstall.conf'), LEGACY_CONFIG) - self.cfg = OOConfig(self.cfg_path) - - def test_load_config_memory(self): - self.assertEquals('openshift-enterprise', self.cfg.settings['variant']) - self.assertEquals('3.0', self.cfg.settings['variant_version']) - self.assertEquals('v1', self.cfg.settings['version']) - - self.assertEquals(3, len(self.cfg.hosts)) - h1 = self.cfg.get_host('10.0.0.1') - self.assertEquals('10.0.0.1', h1.ip) - self.assertEquals('24.222.0.1', h1.public_ip) - self.assertEquals('master-private.example.com', h1.hostname) - self.assertEquals('master.example.com', h1.public_hostname) - - h2 = self.cfg.get_host('10.0.0.2') - self.assertEquals('10.0.0.2', h2.ip) - self.assertEquals('24.222.0.2', h2.public_ip) - self.assertEquals('node1-private.example.com', h2.hostname) - self.assertEquals('node1.example.com', h2.public_hostname) - - h3 = self.cfg.get_host('10.0.0.3') - self.assertEquals('10.0.0.3', h3.ip) - self.assertEquals('24.222.0.3', h3.public_ip) - self.assertEquals('node2-private.example.com', h3.hostname) - self.assertEquals('node2.example.com', h3.public_hostname) - - self.assertFalse('masters' in self.cfg.settings) - self.assertFalse('nodes' in self.cfg.settings) - self.assertFalse('Description' in self.cfg.settings) - self.assertFalse('Name' in self.cfg.settings) - self.assertFalse('Subscription' in self.cfg.settings) - self.assertFalse('Vendor' in self.cfg.settings) - self.assertFalse('Version' in self.cfg.settings) - self.assertFalse('validates_facts' in self.cfg.settings) - class OOConfigTests(OOInstallFixture): @@ -173,16 +154,16 @@ class OOConfigTests(OOInstallFixture): 'ooinstall.conf'), SAMPLE_CONFIG) ooconfig = OOConfig(cfg_path) - self.assertEquals(3, len(ooconfig.hosts)) - self.assertEquals("master-private.example.com", ooconfig.hosts[0].connect_to) - self.assertEquals("10.0.0.1", ooconfig.hosts[0].ip) - self.assertEquals("master-private.example.com", ooconfig.hosts[0].hostname) + self.assertEquals(3, len(ooconfig.deployment.hosts)) + self.assertEquals("master-private.example.com", ooconfig.deployment.hosts[0].connect_to) + self.assertEquals("10.0.0.1", ooconfig.deployment.hosts[0].ip) + self.assertEquals("master-private.example.com", ooconfig.deployment.hosts[0].hostname) self.assertEquals(["10.0.0.1", "10.0.0.2", "10.0.0.3"], - [host['ip'] for host in ooconfig.settings['hosts']]) + [host.ip for host in ooconfig.deployment.hosts]) self.assertEquals('openshift-enterprise', ooconfig.settings['variant']) - self.assertEquals('v1', ooconfig.settings['version']) + self.assertEquals('v2', ooconfig.settings['version']) def test_load_bad_config(self): @@ -222,8 +203,10 @@ class OOConfigTests(OOInstallFixture): written_config = yaml.safe_load(f.read()) f.close() - self.assertEquals(3, len(written_config['hosts'])) - for h in written_config['hosts']: + + + self.assertEquals(3, len(written_config['deployment']['hosts'])) + for h in written_config['deployment']['hosts']: self.assertTrue('ip' in h) self.assertTrue('public_ip' in h) self.assertTrue('hostname' in h) @@ -231,7 +214,7 @@ class OOConfigTests(OOInstallFixture): self.assertTrue('ansible_ssh_user' in written_config) self.assertTrue('variant' in written_config) - self.assertEquals('v1', written_config['version']) + self.assertEquals('v2', written_config['version']) # Some advanced settings should not get written out if they # were not specified by the user: @@ -256,7 +239,3 @@ class HostTests(OOInstallFixture): 'public_hostname': 'a.example.com', } self.assertRaises(OOConfigInvalidHostError, Host, **yaml_props) - - - - |