diff options
author | Thomas Wiest <twiest@users.noreply.github.com> | 2015-12-09 15:49:48 -0500 |
---|---|---|
committer | Thomas Wiest <twiest@users.noreply.github.com> | 2015-12-09 15:49:48 -0500 |
commit | eeb164fae0e6721100c4fcc1717d92bb85b9652c (patch) | |
tree | 70eee046db8012061c178ab4e686650048265564 /utils | |
parent | 898290cb3aabbc9d98883181877ac857a2fe1faf (diff) | |
parent | 14c69ad397be8ee101ef5b4edfa223d703e67ad0 (diff) | |
download | openshift-eeb164fae0e6721100c4fcc1717d92bb85b9652c.tar.gz openshift-eeb164fae0e6721100c4fcc1717d92bb85b9652c.tar.bz2 openshift-eeb164fae0e6721100c4fcc1717d92bb85b9652c.tar.xz openshift-eeb164fae0e6721100c4fcc1717d92bb85b9652c.zip |
Merge pull request #1048 from twiest/prod
Sync master -> Prod
Diffstat (limited to 'utils')
-rwxr-xr-x | utils/site_assets/oo-install-bootstrap.sh | 9 | ||||
-rw-r--r-- | utils/src/ooinstall/cli_installer.py | 282 | ||||
-rw-r--r-- | utils/src/ooinstall/oo_config.py | 49 | ||||
-rw-r--r-- | utils/src/ooinstall/openshift_ansible.py | 105 | ||||
-rw-r--r-- | utils/src/ooinstall/variants.py | 6 | ||||
-rw-r--r-- | utils/test/cli_installer_tests.py | 622 | ||||
-rw-r--r-- | utils/test/fixture.py | 221 | ||||
-rw-r--r-- | utils/test/oo_config_tests.py | 34 |
8 files changed, 1092 insertions, 236 deletions
diff --git a/utils/site_assets/oo-install-bootstrap.sh b/utils/site_assets/oo-install-bootstrap.sh index e1b2cec90..3847c029a 100755 --- a/utils/site_assets/oo-install-bootstrap.sh +++ b/utils/site_assets/oo-install-bootstrap.sh @@ -9,6 +9,13 @@ cmdlnargs="$@" : ${OO_INSTALL_LOG:=${TMPDIR}/INSTALLPKGNAME.log} [[ $TMPDIR != */ ]] && TMPDIR="${TMPDIR}/" +if rpm -q dnf; +then + PKG_MGR="dnf" +else + PKG_MGR="yum" +fi + if [ $OO_INSTALL_CONTEXT != 'origin_vm' ] then clear @@ -18,7 +25,7 @@ if [ -e /etc/redhat-release ] then for i in python python-virtualenv openssh-clients gcc do - rpm -q $i >/dev/null 2>&1 || { echo >&2 "Missing installation dependency detected. Please run \"yum install ${i}\"."; exit 1; } + rpm -q $i >/dev/null 2>&1 || { echo >&2 "Missing installation dependency detected. Please run \"${PKG_MGR} install ${i}\"."; exit 1; } done fi for i in python virtualenv ssh gcc diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index d9d3dd80c..dc88cb1ad 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -8,6 +8,7 @@ import re import sys from ooinstall import openshift_ansible from ooinstall import OOConfig +from ooinstall.oo_config import OOConfigInvalidHostError from ooinstall.oo_config import Host from ooinstall.variants import find_variant, get_variant_version_combos @@ -71,7 +72,7 @@ def delete_hosts(hosts): click.echo("\"{}\" doesn't coorespond to any valid input.".format(del_idx)) return hosts, None -def collect_hosts(master_set=False): +def collect_hosts(version=None, masters_set=False, print_summary=True): """ Collect host information from user. This will later be filled in using ansible. @@ -79,19 +80,31 @@ def collect_hosts(master_set=False): Returns: a list of host information collected from the user """ click.clear() - click.echo('***Host Configuration***') + click.echo('*** Host Configuration ***') message = """ -The OpenShift Master serves the API and web console. It also coordinates the -jobs that have to run across the environment. It can even run the datastore. -For wizard based installations the database will be embedded. It's possible to -change this later using etcd from Red Hat Enterprise Linux 7. +You must now specify the hosts that will compose your OpenShift cluster. + +Please enter an IP or hostname to connect to for each system in the cluster. +You will then be prompted to identify what role you would like this system to +serve in the cluster. + +OpenShift Masters serve the API and web console and coordinate the jobs to run +across the environment. If desired you can specify multiple Master systems for +an HA deployment, in which case you will be prompted to identify a *separate* +system to act as the load balancer for your cluster after all Masters and Nodes +are defined. + +If only one Master is specified, an etcd instance embedded within the OpenShift +Master service will be used as the datastore. This can be later replaced with a +separate etcd instance if desired. If multiple Masters are specified, a +separate etcd cluster will be configured with each Master serving as a member. Any Masters configured as part of this installation process will also be configured as Nodes. This is so that the Master will be able to proxy to Pods -from the API. By default this Node will be unscheduleable but this can be changed +from the API. By default this Node will be unschedulable but this can be changed after installation with 'oadm manage-node'. -The OpenShift Node provides the runtime environments for containers. It will +OpenShift Nodes provide the runtime environments for containers. They will host the required services to be managed by the Master. http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#master @@ -101,17 +114,19 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen hosts = [] more_hosts = True + num_masters = 0 while more_hosts: host_props = {} - hostname_or_ip = click.prompt('Enter hostname or IP address:', - default='', - value_proc=validate_prompt_hostname) - - host_props['connect_to'] = hostname_or_ip - if not master_set: - is_master = click.confirm('Will this host be an OpenShift Master?') - host_props['master'] = is_master - master_set = True + host_props['connect_to'] = click.prompt('Enter hostname or IP address', + value_proc=validate_prompt_hostname) + + if not masters_set: + if click.confirm('Will this host be an OpenShift Master?'): + host_props['master'] = True + num_masters += 1 + + if version == '3.0': + masters_set = True host_props['node'] = True #TODO: Reenable this option once container installs are out of tech preview @@ -128,9 +143,142 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen hosts.append(host) - more_hosts = click.confirm('Do you want to add additional hosts?') + if print_summary: + print_installation_summary(hosts) + + # If we have one master, this is enough for an all-in-one deployment, + # thus we can start asking if you wish to proceed. Otherwise we assume + # you must. + if masters_set or num_masters != 2: + more_hosts = click.confirm('Do you want to add additional hosts?') + + if num_masters >= 3: + collect_master_lb(hosts) + return hosts + +def print_installation_summary(hosts): + """ + Displays a summary of all hosts configured thus far, and what role each + will play. + + Shows total nodes/masters, hints for performing/modifying the deployment + with additional setup, warnings for invalid or sub-optimal configurations. + """ + click.clear() + click.echo('*** Installation Summary ***\n') + click.echo('Hosts:') + for host in hosts: + print_host_summary(hosts, host) + + masters = [host for host in hosts if host.master] + nodes = [host for host in hosts if host.node] + dedicated_nodes = [host for host in hosts if host.node and not host.master] + click.echo('') + click.echo('Total OpenShift Masters: %s' % len(masters)) + click.echo('Total OpenShift Nodes: %s' % len(nodes)) + + if len(masters) == 1: + ha_hint_message = """ +NOTE: Add a total of 3 or more Masters to perform an HA installation.""" + click.echo(ha_hint_message) + elif len(masters) == 2: + min_masters_message = """ +WARNING: A minimum of 3 masters are required to perform an HA installation. +Please add one more to proceed.""" + click.echo(min_masters_message) + elif len(masters) >= 3: + ha_message = """ +NOTE: Multiple Masters specified, this will be an HA deployment with a separate +etcd cluster. You will be prompted to provide the FQDN of a load balancer once +finished entering hosts.""" + click.echo(ha_message) + + dedicated_nodes_message = """ +WARNING: Dedicated Nodes are recommended for an HA deployment. If no dedicated +Nodes are specified, each configured Master will be marked as a schedulable +Node.""" + + min_ha_nodes_message = """ +WARNING: A minimum of 3 dedicated Nodes are recommended for an HA +deployment.""" + if len(dedicated_nodes) == 0: + click.echo(dedicated_nodes_message) + elif len(dedicated_nodes) < 3: + click.echo(min_ha_nodes_message) + + click.echo('') + + +def print_host_summary(all_hosts, host): + click.echo("- %s" % host.connect_to) + if host.master: + click.echo(" - OpenShift Master") + if host.node: + if host.is_dedicated_node(): + click.echo(" - OpenShift Node (Dedicated)") + elif host.is_schedulable_node(all_hosts): + click.echo(" - OpenShift Node") + else: + click.echo(" - OpenShift Node (Unscheduled)") + if host.master_lb: + if host.preconfigured: + click.echo(" - Load Balancer (Preconfigured)") + else: + click.echo(" - Load Balancer (HAProxy)") + if host.master: + if host.is_etcd_member(all_hosts): + click.echo(" - Etcd Member") + else: + click.echo(" - Etcd (Embedded)") + + +def collect_master_lb(hosts): + """ + Get a valid load balancer from the user and append it to the list of + hosts. + + Ensure user does not specify a system already used as a master/node as + this is an invalid configuration. + """ + message = """ +Setting up High Availability Masters requires a load balancing solution. +Please provide a the FQDN of a host that will be configured as a proxy. This +can be either an existing load balancer configured to balance all masters on +port 8443 or a new host that will have HAProxy installed on it. + +If the host provided does is not yet configured, a reference haproxy load +balancer will be installed. It's important to note that while the rest of the +environment will be fault tolerant this reference load balancer will not be. +It can be replaced post-installation with a load balancer with the same +hostname. +""" + click.echo(message) + host_props = {} + + # Using an embedded function here so we have access to the hosts list: + def validate_prompt_lb(hostname): + # Run the standard hostname check first: + hostname = validate_prompt_hostname(hostname) + + # Make sure this host wasn't already specified: + for host in hosts: + if host.connect_to == hostname and (host.master or host.node): + raise click.BadParameter('Cannot re-use "%s" as a load balancer, ' + 'please specify a separate host' % hostname) + return hostname + + host_props['connect_to'] = click.prompt('Enter hostname or IP address', + value_proc=validate_prompt_lb) + install_haproxy = click.confirm('Should the reference haproxy load balancer be installed on this host?') + host_props['preconfigured'] = not install_haproxy + host_props['master'] = False + host_props['node'] = False + host_props['master_lb'] = True + master_lb = Host(**host_props) + hosts.append(master_lb) + def confirm_hosts_facts(oo_cfg, callback_facts): hosts = oo_cfg.hosts click.clear() @@ -168,6 +316,8 @@ Notes: default_facts_lines = [] default_facts = {} for h in hosts: + if h.preconfigured == True: + continue default_facts[h.connect_to] = {} h.ip = callback_facts[h.connect_to]["common"]["ip"] h.public_ip = callback_facts[h.connect_to]["common"]["public_ip"] @@ -198,7 +348,50 @@ Edit %s with the desired values and run `atomic-openshift-installer --unattended sys.exit(0) return default_facts -def get_variant_and_version(): + + +def check_hosts_config(oo_cfg, unattended): + click.clear() + masters = [host for host in oo_cfg.hosts if host.master] + + if len(masters) == 2: + click.echo("A minimum of 3 Masters are required for HA deployments.") + sys.exit(1) + + if len(masters) > 1: + master_lb = [host for host in oo_cfg.hosts if host.master_lb] + if len(master_lb) > 1: + click.echo('ERROR: More than one Master load balancer specified. Only one is allowed.') + sys.exit(1) + elif len(master_lb) == 1: + if master_lb[0].master or master_lb[0].node: + click.echo('ERROR: The Master load balancer is configured as a master or node. Please correct this.') + sys.exit(1) + else: + message = """ +ERROR: No master load balancer specified in config. You must provide the FQDN +of a load balancer to balance the API (port 8443) on all Master hosts. + +https://docs.openshift.org/latest/install_config/install/advanced_install.html#multiple-masters +""" + click.echo(message) + sys.exit(1) + + dedicated_nodes = [host for host in oo_cfg.hosts if host.node and not host.master] + if len(dedicated_nodes) == 0: + message = """ +WARNING: No dedicated Nodes specified. By default, colocated Masters have +their Nodes set to unschedulable. If you proceed all nodes will be labelled +as schedulable. +""" + if unattended: + click.echo(message) + else: + confirm_continue(message) + + return + +def get_variant_and_version(multi_master=False): message = "\nWhich variant would you like to install?\n\n" i = 1 @@ -207,15 +400,19 @@ def get_variant_and_version(): message = "%s\n(%s) %s %s" % (message, i, variant.description, version.name) i = i + 1 + message = "%s\n" % message click.echo(message) + if multi_master: + click.echo('NOTE: 3.0 installations are not') response = click.prompt("Choose a variant from above: ", default=1) product, version = combos[response - 1] return product, version def confirm_continue(message): - click.echo(message) + if message: + click.echo(message) click.confirm("Are you ready to continue?", default=False, abort=True) return @@ -290,27 +487,27 @@ https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.h oo_cfg.settings['ansible_ssh_user'] = get_ansible_ssh_user() click.clear() - if not oo_cfg.hosts: - oo_cfg.hosts = collect_hosts() - click.clear() - if oo_cfg.settings.get('variant', '') == '': variant, version = get_variant_and_version() oo_cfg.settings['variant'] = variant.name oo_cfg.settings['variant_version'] = version.name click.clear() + if not oo_cfg.hosts: + oo_cfg.hosts = collect_hosts(version=oo_cfg.settings['variant_version']) + click.clear() + return oo_cfg def collect_new_nodes(): click.clear() - click.echo('***New Node Configuration***') + click.echo('*** New Node Configuration ***') message = """ Add new nodes here """ click.echo(message) - return collect_hosts(True) + return collect_hosts(masters_set=True, print_summary=False) def get_installed_hosts(hosts, callback_facts): installed_hosts = [] @@ -336,7 +533,8 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose): # This check has to happen before we start removing hosts later in this method if not force: if not unattended: - click.echo('By default the installer only adds new nodes to an installed environment.') + click.echo('By default the installer only adds new nodes ' \ + 'to an installed environment.') response = click.prompt('Do you want to (1) only add additional nodes or ' \ '(2) reinstall the existing hosts ' \ 'potentially erasing any custom changes?', @@ -373,8 +571,8 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose): else: if unattended: if not force: - click.echo('Installed environment detected and no additional nodes specified: ' \ - 'aborting. If you want a fresh install, use ' \ + click.echo('Installed environment detected and no additional ' \ + 'nodes specified: aborting. If you want a fresh install, use ' \ '`atomic-openshift-installer install --force`') sys.exit(1) else: @@ -388,8 +586,8 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose): click.echo('Gathering information from hosts...') callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts, verbose) if error: - click.echo("There was a problem fetching the required information. " \ - "See {} for details.".format(oo_cfg.settings['ansible_log_path'])) + click.echo("There was a problem fetching the required information. See " \ + "{} for details.".format(oo_cfg.settings['ansible_log_path'])) sys.exit(1) else: pass # proceeding as normal should do a clean install @@ -430,10 +628,12 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose): @click.option('-v', '--verbose', is_flag=True, default=False) #pylint: disable=too-many-arguments +#pylint: disable=line-too-long # Main CLI entrypoint, not much we can do about too many arguments. def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_config, ansible_log_path, verbose): """ - atomic-openshift-installer makes the process for installing OSE or AEP easier by interactively gathering the data needed to run on each host. + atomic-openshift-installer makes the process for installing OSE or AEP + easier by interactively gathering the data needed to run on each host. It can also be run in unattended mode if provided with a configuration file. Further reading: https://docs.openshift.com/enterprise/latest/install_config/install/quick_install.html @@ -445,7 +645,11 @@ def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_conf ctx.obj['ansible_log_path'] = ansible_log_path ctx.obj['verbose'] = verbose - oo_cfg = OOConfig(ctx.obj['configuration']) + try: + oo_cfg = OOConfig(ctx.obj['configuration']) + except OOConfigInvalidHostError as e: + click.echo(e) + sys.exit(1) # If no playbook dir on the CLI, check the config: if not ansible_playbook_directory: @@ -460,7 +664,8 @@ def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_conf if ctx.obj['ansible_config']: oo_cfg.settings['ansible_config'] = ctx.obj['ansible_config'] - elif os.path.exists(DEFAULT_ANSIBLE_CONFIG): + elif 'ansible_config' not in oo_cfg.settings and \ + os.path.exists(DEFAULT_ANSIBLE_CONFIG): # If we're installed by RPM this file should exist and we can use it as our default: oo_cfg.settings['ansible_config'] = DEFAULT_ANSIBLE_CONFIG @@ -477,7 +682,7 @@ def uninstall(ctx): verbose = ctx.obj['verbose'] if len(oo_cfg.hosts) == 0: - click.echo("No hosts defined in: %s" % oo_cfg['configuration']) + click.echo("No hosts defined in: %s" % oo_cfg.config_path) sys.exit(1) click.echo("OpenShift will be uninstalled from the following hosts:\n") @@ -545,6 +750,9 @@ def install(ctx, force): else: oo_cfg = get_missing_info_from_user(oo_cfg) + check_hosts_config(oo_cfg, ctx.obj['unattended']) + + print_installation_summary(oo_cfg.hosts) click.echo('Gathering information from hosts...') callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts, verbose) @@ -570,8 +778,8 @@ def install(ctx, force): click.echo('Ready to run installation process.') message = """ -If changes are needed to the values recorded by the installer please update {}. -""".format(oo_cfg.config_path) +If changes are needed please edit the config file above and re-run. +""" if not ctx.obj['unattended']: confirm_continue(message) diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py index 9c97e6e93..031b82bc1 100644 --- a/utils/src/ooinstall/oo_config.py +++ b/utils/src/ooinstall/oo_config.py @@ -14,7 +14,8 @@ PERSIST_SETTINGS = [ 'variant_version', 'version', ] -REQUIRED_FACTS = ['ip', 'public_ip', 'hostname', 'public_hostname'] +DEFAULT_REQUIRED_FACTS = ['ip', 'public_ip', 'hostname', 'public_hostname'] +PRECONFIGURED_REQUIRED_FACTS = ['hostname', 'public_hostname'] class OOConfigFileError(Exception): @@ -36,19 +37,24 @@ class Host(object): self.public_ip = kwargs.get('public_ip', None) self.public_hostname = kwargs.get('public_hostname', None) self.connect_to = kwargs.get('connect_to', None) + self.preconfigured = kwargs.get('preconfigured', None) # Should this host run as an OpenShift master: self.master = kwargs.get('master', False) # Should this host run as an OpenShift node: self.node = kwargs.get('node', False) + + # Should this host run as an HAProxy: + self.master_lb = kwargs.get('master_lb', False) + self.containerized = kwargs.get('containerized', False) if self.connect_to is None: - raise OOConfigInvalidHostError("You must specify either and 'ip' " \ - "or 'hostname' to connect to.") + raise OOConfigInvalidHostError("You must specify either an ip " \ + "or hostname as 'connect_to'") - if self.master is False and self.node is False: + if self.master is False and self.node is False and self.master_lb is False: raise OOConfigInvalidHostError( "You must specify each host as either a master or a node.") @@ -62,12 +68,38 @@ class Host(object): """ Used when exporting to yaml. """ d = {} for prop in ['ip', 'hostname', 'public_ip', 'public_hostname', - 'master', 'node', 'containerized', 'connect_to']: + 'master', 'node', 'master_lb', 'containerized', 'connect_to', 'preconfigured']: # If the property is defined (not None or False), export it: if getattr(self, prop): d[prop] = getattr(self, prop) return d + def is_etcd_member(self, all_hosts): + """ Will this host be a member of a standalone etcd cluster. """ + if not self.master: + return False + masters = [host for host in all_hosts if host.master] + if len(masters) > 1: + return True + return False + + def is_dedicated_node(self): + """ Will this host be a dedicated node. (not a master) """ + return self.node and not self.master + + def is_schedulable_node(self, all_hosts): + """ Will this host be a node marked as schedulable. """ + if not self.node: + return False + if not self.master: + return True + + masters = [host for host in all_hosts if host.master] + nodes = [host for host in all_hosts if host.node] + if len(masters) == len(nodes): + return True + return False + class OOConfig(object): default_dir = os.path.normpath( @@ -177,7 +209,12 @@ class OOConfig(object): for host in self.hosts: missing_facts = [] - for required_fact in REQUIRED_FACTS: + if host.preconfigured: + required_facts = PRECONFIGURED_REQUIRED_FACTS + else: + required_facts = DEFAULT_REQUIRED_FACTS + + for required_fact in required_facts: if not getattr(host, required_fact): missing_facts.append(required_fact) if len(missing_facts) > 0: diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index 372f27bda..fd2cd7fbd 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -17,14 +17,17 @@ def set_config(cfg): def generate_inventory(hosts): global CFG + masters = [host for host in hosts if host.master] + nodes = [host for host in hosts if host.node] + proxy = determine_proxy_configuration(hosts) + multiple_masters = len(masters) > 1 base_inventory_path = CFG.settings['ansible_inventory_path'] base_inventory = open(base_inventory_path, 'w') - base_inventory.write('\n[OSEv3:children]\nmasters\nnodes\n') - base_inventory.write('\n[OSEv3:vars]\n') - base_inventory.write('ansible_ssh_user={}\n'.format(CFG.settings['ansible_ssh_user'])) - if CFG.settings['ansible_ssh_user'] != 'root': - base_inventory.write('ansible_become=true\n') + + write_inventory_children(base_inventory, multiple_masters, proxy) + + write_inventory_vars(base_inventory, multiple_masters, proxy) # Find the correct deployment type for ansible: ver = find_variant(CFG.settings['variant'], @@ -45,24 +48,66 @@ def generate_inventory(hosts): "'enabled': 1, 'gpgcheck': 0}}]\n".format(os.environ['OO_INSTALL_PUDDLE_REPO'])) base_inventory.write('\n[masters]\n') - masters = (host for host in hosts if host.master) for master in masters: write_host(master, base_inventory) + + if len(masters) > 1: + base_inventory.write('\n[etcd]\n') + for master in masters: + write_host(master, base_inventory) + base_inventory.write('\n[nodes]\n') - nodes = (host for host in hosts if host.node) + for node in nodes: - # TODO: Until the Master can run the SDN itself we have to configure the Masters - # as Nodes too. - scheduleable = True - # If there's only one Node and it's also a Master we want it to be scheduleable: - if node in masters and len(masters) != 1: - scheduleable = False - write_host(node, base_inventory, scheduleable) + # Let the fact defaults decide if we're not a master: + schedulable = None + + # If the node is also a master, we must explicitly set schedulablity: + if node.master: + schedulable = node.is_schedulable_node(hosts) + write_host(node, base_inventory, schedulable) + + if not getattr(proxy, 'preconfigured', True): + base_inventory.write('\n[lb]\n') + write_host(proxy, base_inventory) + base_inventory.close() return base_inventory_path +def determine_proxy_configuration(hosts): + proxy = next((host for host in hosts if host.master_lb), None) + if proxy: + if proxy.hostname == None: + proxy.hostname = proxy.connect_to + proxy.public_hostname = proxy.connect_to + return proxy + + return None + +def write_inventory_children(base_inventory, multiple_masters, proxy): + global CFG + + base_inventory.write('\n[OSEv3:children]\n') + base_inventory.write('masters\n') + base_inventory.write('nodes\n') + if multiple_masters: + base_inventory.write('etcd\n') + if not getattr(proxy, 'preconfigured', True): + base_inventory.write('lb\n') + +def write_inventory_vars(base_inventory, multiple_masters, proxy): + global CFG + base_inventory.write('\n[OSEv3:vars]\n') + base_inventory.write('ansible_ssh_user={}\n'.format(CFG.settings['ansible_ssh_user'])) + if CFG.settings['ansible_ssh_user'] != 'root': + base_inventory.write('ansible_become=true\n') + if multiple_masters and proxy is not None: + base_inventory.write('openshift_master_cluster_method=native\n') + base_inventory.write("openshift_master_cluster_hostname={}\n".format(proxy.hostname)) + base_inventory.write("openshift_master_cluster_public_hostname={}\n".format(proxy.public_hostname)) + -def write_host(host, inventory, scheduleable=True): +def write_host(host, inventory, schedulable=None): global CFG facts = '' @@ -76,8 +121,16 @@ def write_host(host, inventory, scheduleable=True): facts += ' openshift_public_hostname={}'.format(host.public_hostname) # TODO: For not write_host is handles both master and nodes. # Technically only nodes will ever need this. - if not scheduleable: - facts += ' openshift_scheduleable=False' + + # Distinguish between three states, no schedulability specified (use default), + # explicitly set to True, or explicitly set to False: + if schedulable is None: + pass + elif schedulable: + facts += ' openshift_schedulable=True' + elif not schedulable: + facts += ' openshift_schedulable=False' + installer_host = socket.gethostname() if installer_host in [host.connect_to, host.hostname, host.public_hostname]: facts += ' ansible_connection=local' @@ -104,9 +157,15 @@ def load_system_facts(inventory_file, os_facts_path, env_vars, verbose=False): status = subprocess.call(args, env=env_vars, stdout=FNULL) if not status == 0: return [], 1 - callback_facts_file = open(CFG.settings['ansible_callback_facts_yaml'], 'r') - callback_facts = yaml.load(callback_facts_file) - callback_facts_file.close() + + with open(CFG.settings['ansible_callback_facts_yaml'], 'r') as callback_facts_file: + try: + callback_facts = yaml.safe_load(callback_facts_file) + except yaml.YAMLError, exc: + print "Error in {}".format(CFG.settings['ansible_callback_facts_yaml']), exc + print "Try deleting and rerunning the atomic-openshift-installer" + sys.exit(1) + return callback_facts, 0 @@ -118,6 +177,7 @@ def default_facts(hosts, verbose=False): facts_env = os.environ.copy() facts_env["OO_INSTALL_CALLBACK_FACTS_YAML"] = CFG.settings['ansible_callback_facts_yaml'] facts_env["ANSIBLE_CALLBACK_PLUGINS"] = CFG.settings['ansible_plugins_directory'] + facts_env["OPENSHIFT_MASTER_CLUSTER_METHOD"] = 'native' if 'ansible_log_path' in CFG.settings: facts_env["ANSIBLE_LOG_PATH"] = CFG.settings['ansible_log_path'] if 'ansible_config' in CFG.settings: @@ -130,10 +190,10 @@ def run_main_playbook(hosts, hosts_to_run_on, verbose=False): inventory_file = generate_inventory(hosts_to_run_on) if len(hosts_to_run_on) != len(hosts): main_playbook_path = os.path.join(CFG.ansible_playbook_directory, - 'playbooks/common/openshift-cluster/scaleup.yml') + 'playbooks/byo/openshift-cluster/scaleup.yml') else: main_playbook_path = os.path.join(CFG.ansible_playbook_directory, - 'playbooks/byo/config.yml') + 'playbooks/byo/openshift-cluster/config.yml') facts_env = os.environ.copy() if 'ansible_log_path' in CFG.settings: facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path'] @@ -176,4 +236,3 @@ def run_upgrade_playbook(verbose=False): if 'ansible_config' in CFG.settings: facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config'] return run_ansible(playbook, inventory_file, facts_env, verbose) - diff --git a/utils/src/ooinstall/variants.py b/utils/src/ooinstall/variants.py index 3bb61dddb..571025543 100644 --- a/utils/src/ooinstall/variants.py +++ b/utils/src/ooinstall/variants.py @@ -30,14 +30,14 @@ class Variant(object): self.versions = versions def latest_version(self): - return self.versions[-1] + return self.versions[0] # WARNING: Keep the versions ordered, most recent last: OSE = Variant('openshift-enterprise', 'OpenShift Enterprise', [ - Version('3.0', 'enterprise'), - Version('3.1', 'openshift-enterprise') + Version('3.1', 'openshift-enterprise'), + Version('3.0', 'enterprise') ] ) diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py index fc16d9ceb..1da49c807 100644 --- a/utils/test/cli_installer_tests.py +++ b/utils/test/cli_installer_tests.py @@ -5,12 +5,10 @@ import copy import os import ConfigParser -import yaml import ooinstall.cli_installer as cli -from click.testing import CliRunner -from test.oo_config_tests import OOInstallFixture +from test.fixture import OOCliFixture, SAMPLE_CONFIG, build_input, read_yaml from mock import patch @@ -41,8 +39,67 @@ MOCK_FACTS = { }, } -# Substitute in a product name before use: -SAMPLE_CONFIG = """ +MOCK_FACTS_QUICKHA = { + '10.0.0.1': { + 'common': { + 'ip': '10.0.0.1', + 'public_ip': '10.0.0.1', + 'hostname': 'master-private.example.com', + 'public_hostname': 'master.example.com' + } + }, + '10.0.0.2': { + 'common': { + 'ip': '10.0.0.2', + 'public_ip': '10.0.0.2', + 'hostname': 'node1-private.example.com', + 'public_hostname': 'node1.example.com' + } + }, + '10.0.0.3': { + 'common': { + 'ip': '10.0.0.3', + 'public_ip': '10.0.0.3', + 'hostname': 'node2-private.example.com', + 'public_hostname': 'node2.example.com' + } + }, + '10.0.0.4': { + 'common': { + 'ip': '10.0.0.4', + 'public_ip': '10.0.0.4', + 'hostname': 'proxy-private.example.com', + 'public_hostname': 'proxy.example.com' + } + }, +} + +# Missing connect_to on some hosts: +BAD_CONFIG = """ +variant: %s +ansible_ssh_user: root +hosts: + - connect_to: 10.0.0.1 + ip: 10.0.0.1 + hostname: master-private.example.com + public_ip: 24.222.0.1 + public_hostname: master.example.com + master: true + node: true + - ip: 10.0.0.2 + hostname: node1-private.example.com + public_ip: 24.222.0.2 + public_hostname: node1.example.com + node: true + - connect_to: 10.0.0.3 + ip: 10.0.0.3 + hostname: node2-private.example.com + public_ip: 24.222.0.3 + public_hostname: node2.example.com + node: true +""" + +QUICKHA_CONFIG = """ variant: %s ansible_ssh_user: root hosts: @@ -58,6 +115,7 @@ hosts: hostname: node1-private.example.com public_ip: 24.222.0.2 public_hostname: node1.example.com + master: true node: true - connect_to: 10.0.0.3 ip: 10.0.0.3 @@ -65,109 +123,145 @@ hosts: public_ip: 24.222.0.3 public_hostname: node2.example.com node: true + master: true + - connect_to: 10.0.0.4 + ip: 10.0.0.4 + hostname: node3-private.example.com + public_ip: 24.222.0.4 + public_hostname: node3.example.com + node: true + - connect_to: 10.0.0.5 + ip: 10.0.0.5 + hostname: proxy-private.example.com + public_ip: 24.222.0.5 + public_hostname: proxy.example.com + master_lb: true """ +QUICKHA_2_MASTER_CONFIG = """ +variant: %s +ansible_ssh_user: root +hosts: + - connect_to: 10.0.0.1 + ip: 10.0.0.1 + hostname: master-private.example.com + public_ip: 24.222.0.1 + public_hostname: master.example.com + master: true + node: true + - connect_to: 10.0.0.2 + ip: 10.0.0.2 + hostname: node1-private.example.com + public_ip: 24.222.0.2 + public_hostname: node1.example.com + master: true + node: true + - connect_to: 10.0.0.4 + ip: 10.0.0.4 + hostname: node3-private.example.com + public_ip: 24.222.0.4 + public_hostname: node3.example.com + node: true + - connect_to: 10.0.0.5 + ip: 10.0.0.5 + hostname: proxy-private.example.com + public_ip: 24.222.0.5 + public_hostname: proxy.example.com + master_lb: true +""" -class OOCliFixture(OOInstallFixture): - - def setUp(self): - OOInstallFixture.setUp(self) - self.runner = CliRunner() - - # Add any arguments you would like to test here, the defaults ensure - # we only do unattended invocations here, and using temporary files/dirs. - self.cli_args = ["-a", self.work_dir] - - def run_cli(self): - return self.runner.invoke(cli.cli, self.cli_args) - - def assert_result(self, result, exit_code): - if result.exception is not None or result.exit_code != exit_code: - print "Unexpected result from CLI execution" - print "Exit code: %s" % result.exit_code - print "Exception: %s" % result.exception - print result.exc_info - import traceback - traceback.print_exception(*result.exc_info) - print "Output:\n%s" % result.output - self.fail("Exception during CLI execution") - - def _read_yaml(self, config_file_path): - f = open(config_file_path, 'r') - config = yaml.safe_load(f.read()) - f.close() - return config - - def _verify_load_facts(self, load_facts_mock): - """ Check that we ran load facts with expected inputs. """ - load_facts_args = load_facts_mock.call_args[0] - self.assertEquals(os.path.join(self.work_dir, ".ansible/hosts"), - load_facts_args[0]) - self.assertEquals(os.path.join(self.work_dir, - "playbooks/byo/openshift_facts.yml"), load_facts_args[1]) - env_vars = load_facts_args[2] - self.assertEquals(os.path.join(self.work_dir, - '.ansible/callback_facts.yaml'), - env_vars['OO_INSTALL_CALLBACK_FACTS_YAML']) - self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH']) - - def _verify_run_playbook(self, run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len): - """ Check that we ran playbook with expected inputs. """ - hosts = run_playbook_mock.call_args[0][0] - hosts_to_run_on = run_playbook_mock.call_args[0][1] - self.assertEquals(exp_hosts_len, len(hosts)) - self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on)) - - def _verify_config_hosts(self, written_config, host_count): - print written_config['hosts'] - self.assertEquals(host_count, len(written_config['hosts'])) - for h in written_config['hosts']: - self.assertTrue(h['node']) - self.assertTrue('ip' in h) - self.assertTrue('hostname' in h) - self.assertTrue('public_ip' in h) - self.assertTrue('public_hostname' in h) - - #pylint: disable=too-many-arguments - def _verify_get_hosts_to_run_on(self, mock_facts, load_facts_mock, - run_playbook_mock, cli_input, - exp_hosts_len=None, exp_hosts_to_run_on_len=None, - force=None): - """ - Tests cli_installer.py:get_hosts_to_run_on. That method has quite a - few subtle branches in the logic. The goal with this method is simply - to handle all the messy stuff here and allow the main test cases to be - easily read. The basic idea is to modify mock_facts to return a - version indicating OpenShift is already installed on particular hosts. - """ - load_facts_mock.return_value = (mock_facts, 0) - run_playbook_mock.return_value = 0 - - if cli_input: - self.cli_args.append("install") - result = self.runner.invoke(cli.cli, - self.cli_args, - input=cli_input) - else: - config_file = self.write_config(os.path.join(self.work_dir, - 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise') - - self.cli_args.extend(["-c", config_file, "install"]) - if force: - self.cli_args.append("--force") - result = self.runner.invoke(cli.cli, self.cli_args) - written_config = self._read_yaml(config_file) - self._verify_config_hosts(written_config, exp_hosts_len) +QUICKHA_CONFIG_REUSED_LB = """ +variant: %s +ansible_ssh_user: root +hosts: + - connect_to: 10.0.0.1 + ip: 10.0.0.1 + hostname: master-private.example.com + public_ip: 24.222.0.1 + public_hostname: master.example.com + master: true + node: true + - connect_to: 10.0.0.2 + ip: 10.0.0.2 + hostname: node1-private.example.com + public_ip: 24.222.0.2 + public_hostname: node1.example.com + master: true + node: true + master_lb: true + - connect_to: 10.0.0.3 + ip: 10.0.0.3 + hostname: node2-private.example.com + public_ip: 24.222.0.3 + public_hostname: node2.example.com + node: true + master: true +""" - self.assert_result(result, 0) - self._verify_load_facts(load_facts_mock) - self._verify_run_playbook(run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len) +QUICKHA_CONFIG_NO_LB = """ +variant: %s +ansible_ssh_user: root +hosts: + - connect_to: 10.0.0.1 + ip: 10.0.0.1 + hostname: master-private.example.com + public_ip: 24.222.0.1 + public_hostname: master.example.com + master: true + node: true + - connect_to: 10.0.0.2 + ip: 10.0.0.2 + hostname: node1-private.example.com + public_ip: 24.222.0.2 + public_hostname: node1.example.com + master: true + node: true + - connect_to: 10.0.0.3 + ip: 10.0.0.3 + hostname: node2-private.example.com + public_ip: 24.222.0.3 + public_hostname: node2.example.com + node: true + master: true +""" - # Make sure we ran on the expected masters and nodes: - hosts = run_playbook_mock.call_args[0][0] - hosts_to_run_on = run_playbook_mock.call_args[0][1] - self.assertEquals(exp_hosts_len, len(hosts)) - self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on)) +QUICKHA_CONFIG_PRECONFIGURED_LB = """ +variant: %s +ansible_ssh_user: root +hosts: + - connect_to: 10.0.0.1 + ip: 10.0.0.1 + hostname: master-private.example.com + public_ip: 24.222.0.1 + public_hostname: master.example.com + master: true + node: true + - connect_to: 10.0.0.2 + ip: 10.0.0.2 + hostname: node1-private.example.com + public_ip: 24.222.0.2 + public_hostname: node1.example.com + master: true + node: true + - connect_to: 10.0.0.3 + ip: 10.0.0.3 + hostname: node2-private.example.com + public_ip: 24.222.0.3 + public_hostname: node2.example.com + node: true + master: true + - connect_to: 10.0.0.4 + ip: 10.0.0.4 + hostname: node3-private.example.com + public_ip: 24.222.0.4 + public_hostname: node3.example.com + node: true + - connect_to: proxy-private.example.com + hostname: proxy-private.example.com + public_hostname: proxy.example.com + master_lb: true + preconfigured: true +""" class UnattendedCliTests(OOCliFixture): @@ -284,7 +378,9 @@ class UnattendedCliTests(OOCliFixture): '.ansible/callback_facts.yaml'), env_vars['OO_INSTALL_CALLBACK_FACTS_YAML']) self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH']) - self.assertTrue('ANSIBLE_CONFIG' not in env_vars) + # If user running test has rpm installed, this might be set to default: + self.assertTrue('ANSIBLE_CONFIG' not in env_vars or + env_vars['ANSIBLE_CONFIG'] == cli.DEFAULT_ANSIBLE_CONFIG) # Make sure we ran on the expected masters and nodes: hosts = run_playbook_mock.call_args[0][0] @@ -345,7 +441,7 @@ class UnattendedCliTests(OOCliFixture): result = self.runner.invoke(cli.cli, self.cli_args) self.assert_result(result, 0) - written_config = self._read_yaml(config_file) + written_config = read_yaml(config_file) self.assertEquals('openshift-enterprise', written_config['variant']) # We didn't specify a version so the latest should have been assumed, @@ -374,7 +470,7 @@ class UnattendedCliTests(OOCliFixture): result = self.runner.invoke(cli.cli, self.cli_args) self.assert_result(result, 0) - written_config = self._read_yaml(config_file) + written_config = read_yaml(config_file) self.assertEquals('openshift-enterprise', written_config['variant']) # Make sure our older version was preserved: @@ -450,15 +546,125 @@ class UnattendedCliTests(OOCliFixture): if expected_result: self.assertEquals(expected_result, facts_env_vars['ANSIBLE_CONFIG']) else: - self.assertFalse('ANSIBLE_CONFIG' in facts_env_vars) + # If user running test has rpm installed, this might be set to default: + self.assertTrue('ANSIBLE_CONFIG' not in facts_env_vars or + facts_env_vars['ANSIBLE_CONFIG'] == cli.DEFAULT_ANSIBLE_CONFIG) # Test the env vars for main playbook: env_vars = run_ansible_mock.call_args[0][2] if expected_result: self.assertEquals(expected_result, env_vars['ANSIBLE_CONFIG']) else: - self.assertFalse('ANSIBLE_CONFIG' in env_vars) + # If user running test has rpm installed, this might be set to default: + self.assertTrue('ANSIBLE_CONFIG' not in env_vars or + env_vars['ANSIBLE_CONFIG'] == cli.DEFAULT_ANSIBLE_CONFIG) + # unattended with bad config file and no installed hosts (without --force) + @patch('ooinstall.openshift_ansible.run_main_playbook') + @patch('ooinstall.openshift_ansible.load_system_facts') + def test_bad_config(self, load_facts_mock, run_playbook_mock): + load_facts_mock.return_value = (MOCK_FACTS, 0) + run_playbook_mock.return_value = 0 + + config_file = self.write_config(os.path.join(self.work_dir, + 'ooinstall.conf'), BAD_CONFIG % 'openshift-enterprise') + + self.cli_args.extend(["-c", config_file, "install"]) + result = self.runner.invoke(cli.cli, self.cli_args) + + self.assertEquals(1, result.exit_code) + self.assertTrue("You must specify either an ip or hostname" + in result.output) + + #unattended with three masters, one node, and haproxy + @patch('ooinstall.openshift_ansible.run_main_playbook') + @patch('ooinstall.openshift_ansible.load_system_facts') + def test_quick_ha_full_run(self, load_facts_mock, run_playbook_mock): + load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0) + run_playbook_mock.return_value = 0 + + config_file = self.write_config(os.path.join(self.work_dir, + 'ooinstall.conf'), QUICKHA_CONFIG % 'openshift-enterprise') + + self.cli_args.extend(["-c", config_file, "install"]) + result = self.runner.invoke(cli.cli, self.cli_args) + self.assert_result(result, 0) + + # Make sure we ran on the expected masters and nodes: + hosts = run_playbook_mock.call_args[0][0] + hosts_to_run_on = run_playbook_mock.call_args[0][1] + self.assertEquals(5, len(hosts)) + self.assertEquals(5, len(hosts_to_run_on)) + + #unattended with two masters, one node, and haproxy + @patch('ooinstall.openshift_ansible.run_main_playbook') + @patch('ooinstall.openshift_ansible.load_system_facts') + def test_quick_ha_only_2_masters(self, load_facts_mock, run_playbook_mock): + load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0) + run_playbook_mock.return_value = 0 + + config_file = self.write_config(os.path.join(self.work_dir, + 'ooinstall.conf'), QUICKHA_2_MASTER_CONFIG % 'openshift-enterprise') + + self.cli_args.extend(["-c", config_file, "install"]) + result = self.runner.invoke(cli.cli, self.cli_args) + + # This is an invalid config: + self.assert_result(result, 1) + self.assertTrue("A minimum of 3 Masters are required" in result.output) + + #unattended with three masters, one node, but no load balancer specified: + @patch('ooinstall.openshift_ansible.run_main_playbook') + @patch('ooinstall.openshift_ansible.load_system_facts') + def test_quick_ha_no_lb(self, load_facts_mock, run_playbook_mock): + load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0) + run_playbook_mock.return_value = 0 + + config_file = self.write_config(os.path.join(self.work_dir, + 'ooinstall.conf'), QUICKHA_CONFIG_NO_LB % 'openshift-enterprise') + + self.cli_args.extend(["-c", config_file, "install"]) + result = self.runner.invoke(cli.cli, self.cli_args) + + # This is not a valid input: + self.assert_result(result, 1) + self.assertTrue('No master load balancer specified in config' in result.output) + + #unattended with three masters, one node, and one of the masters reused as load balancer: + @patch('ooinstall.openshift_ansible.run_main_playbook') + @patch('ooinstall.openshift_ansible.load_system_facts') + def test_quick_ha_reused_lb(self, load_facts_mock, run_playbook_mock): + load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0) + run_playbook_mock.return_value = 0 + + config_file = self.write_config(os.path.join(self.work_dir, + 'ooinstall.conf'), QUICKHA_CONFIG_REUSED_LB % 'openshift-enterprise') + + self.cli_args.extend(["-c", config_file, "install"]) + result = self.runner.invoke(cli.cli, self.cli_args) + + # This is not a valid configuration: + self.assert_result(result, 1) + + #unattended with preconfigured lb + @patch('ooinstall.openshift_ansible.run_main_playbook') + @patch('ooinstall.openshift_ansible.load_system_facts') + def test_quick_ha_preconfigured_lb(self, load_facts_mock, run_playbook_mock): + load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0) + run_playbook_mock.return_value = 0 + + config_file = self.write_config(os.path.join(self.work_dir, + 'ooinstall.conf'), QUICKHA_CONFIG_PRECONFIGURED_LB % 'openshift-enterprise') + + self.cli_args.extend(["-c", config_file, "install"]) + result = self.runner.invoke(cli.cli, self.cli_args) + self.assert_result(result, 0) + + # Make sure we ran on the expected masters and nodes: + hosts = run_playbook_mock.call_args[0][0] + hosts_to_run_on = run_playbook_mock.call_args[0][1] + self.assertEquals(5, len(hosts)) + self.assertEquals(5, len(hosts_to_run_on)) class AttendedCliTests(OOCliFixture): @@ -468,64 +674,13 @@ class AttendedCliTests(OOCliFixture): self.config_file = os.path.join(self.work_dir, 'config.yml') self.cli_args.extend(["-c", self.config_file]) - #pylint: disable=too-many-arguments - def _build_input(self, ssh_user=None, hosts=None, variant_num=None, - add_nodes=None, confirm_facts=None): - """ - Builds a CLI input string with newline characters to simulate - the full run. - This gives us only one place to update when the input prompts change. - """ - - inputs = [ - 'y', # let's proceed - ] - if ssh_user: - inputs.append(ssh_user) - - if hosts: - i = 0 - for (host, is_master) in hosts: - inputs.append(host) - inputs.append('y' if is_master else 'n') - #inputs.append('rpm') - if i < len(hosts) - 1: - inputs.append('y') # Add more hosts - else: - inputs.append('n') # Done adding hosts - i += 1 - - if variant_num: - inputs.append(str(variant_num)) # Choose variant + version - - # TODO: support option 2, fresh install - if add_nodes: - inputs.append('1') # Add more nodes - i = 0 - for (host, is_master) in add_nodes: - inputs.append(host) - inputs.append('y' if is_master else 'n') - #inputs.append('rpm') - if i < len(add_nodes) - 1: - inputs.append('y') # Add more hosts - else: - inputs.append('n') # Done adding hosts - i += 1 - - inputs.extend([ - confirm_facts, - 'y', # lets do this - ]) - - return '\n'.join(inputs) - @patch('ooinstall.openshift_ansible.run_main_playbook') @patch('ooinstall.openshift_ansible.load_system_facts') def test_full_run(self, load_facts_mock, run_playbook_mock): load_facts_mock.return_value = (MOCK_FACTS, 0) run_playbook_mock.return_value = 0 - cli_input = self._build_input(hosts=[ + cli_input = build_input(hosts=[ ('10.0.0.1', True), ('10.0.0.2', False), ('10.0.0.3', False)], @@ -540,9 +695,18 @@ class AttendedCliTests(OOCliFixture): self._verify_load_facts(load_facts_mock) self._verify_run_playbook(run_playbook_mock, 3, 3) - written_config = self._read_yaml(self.config_file) + written_config = read_yaml(self.config_file) self._verify_config_hosts(written_config, 3) + inventory = ConfigParser.ConfigParser(allow_no_value=True) + inventory.read(os.path.join(self.work_dir, '.ansible/hosts')) + self.assertEquals('False', + inventory.get('nodes', '10.0.0.1 openshift_schedulable')) + self.assertEquals(None, + inventory.get('nodes', '10.0.0.2')) + self.assertEquals(None, + inventory.get('nodes', '10.0.0.3')) + # interactive with config file and some installed some uninstalled hosts @patch('ooinstall.openshift_ansible.run_main_playbook') @patch('ooinstall.openshift_ansible.load_system_facts') @@ -557,7 +721,7 @@ class AttendedCliTests(OOCliFixture): load_facts_mock.return_value = (mock_facts, 0) run_playbook_mock.return_value = 0 - cli_input = self._build_input(hosts=[ + cli_input = build_input(hosts=[ ('10.0.0.1', True), ('10.0.0.2', False), ], @@ -574,7 +738,7 @@ class AttendedCliTests(OOCliFixture): self._verify_load_facts(load_facts_mock) self._verify_run_playbook(run_playbook_mock, 3, 2) - written_config = self._read_yaml(self.config_file) + written_config = read_yaml(self.config_file) self._verify_config_hosts(written_config, 3) @patch('ooinstall.openshift_ansible.run_main_playbook') @@ -586,7 +750,7 @@ class AttendedCliTests(OOCliFixture): config_file = self.write_config(os.path.join(self.work_dir, 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise') - cli_input = self._build_input(confirm_facts='y') + cli_input = build_input(confirm_facts='y') self.cli_args.extend(["-c", config_file]) self.cli_args.append("install") result = self.runner.invoke(cli.cli, @@ -597,7 +761,7 @@ class AttendedCliTests(OOCliFixture): self._verify_load_facts(load_facts_mock) self._verify_run_playbook(run_playbook_mock, 3, 3) - written_config = self._read_yaml(config_file) + written_config = read_yaml(config_file) self._verify_config_hosts(written_config, 3) #interactive with config file and all installed hosts @@ -608,12 +772,13 @@ class AttendedCliTests(OOCliFixture): mock_facts['10.0.0.1']['common']['version'] = "3.0.0" mock_facts['10.0.0.2']['common']['version'] = "3.0.0" - cli_input = self._build_input(hosts=[ + cli_input = build_input(hosts=[ ('10.0.0.1', True), ], add_nodes=[('10.0.0.2', False)], ssh_user='root', variant_num=1, + schedulable_masters_ok=True, confirm_facts='y') self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, @@ -623,6 +788,131 @@ class AttendedCliTests(OOCliFixture): exp_hosts_to_run_on_len=2, force=False) + #interactive multimaster: one more node than master + @patch('ooinstall.openshift_ansible.run_main_playbook') + @patch('ooinstall.openshift_ansible.load_system_facts') + def test_ha_dedicated_node(self, load_facts_mock, run_playbook_mock): + load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0) + run_playbook_mock.return_value = 0 + + cli_input = build_input(hosts=[ + ('10.0.0.1', True), + ('10.0.0.2', True), + ('10.0.0.3', True), + ('10.0.0.4', False)], + ssh_user='root', + variant_num=1, + confirm_facts='y', + master_lb=('10.0.0.5', False)) + self.cli_args.append("install") + result = self.runner.invoke(cli.cli, self.cli_args, + input=cli_input) + self.assert_result(result, 0) + + self._verify_load_facts(load_facts_mock) + self._verify_run_playbook(run_playbook_mock, 5, 5) + + written_config = read_yaml(self.config_file) + self._verify_config_hosts(written_config, 5) + + inventory = ConfigParser.ConfigParser(allow_no_value=True) + inventory.read(os.path.join(self.work_dir, '.ansible/hosts')) + self.assertEquals('False', + inventory.get('nodes', '10.0.0.1 openshift_schedulable')) + self.assertEquals('False', + inventory.get('nodes', '10.0.0.2 openshift_schedulable')) + self.assertEquals('False', + inventory.get('nodes', '10.0.0.3 openshift_schedulable')) + self.assertEquals(None, + inventory.get('nodes', '10.0.0.4')) + + self.assertTrue(inventory.has_section('etcd')) + self.assertEquals(3, len(inventory.items('etcd'))) + + #interactive multimaster: identical masters and nodes + @patch('ooinstall.openshift_ansible.run_main_playbook') + @patch('ooinstall.openshift_ansible.load_system_facts') + def test_ha_no_dedicated_nodes(self, load_facts_mock, run_playbook_mock): + load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0) + run_playbook_mock.return_value = 0 + + cli_input = build_input(hosts=[ + ('10.0.0.1', True), + ('10.0.0.2', True), + ('10.0.0.3', True)], + ssh_user='root', + variant_num=1, + confirm_facts='y', + master_lb=('10.0.0.5', False)) + self.cli_args.append("install") + result = self.runner.invoke(cli.cli, self.cli_args, + input=cli_input) + self.assert_result(result, 0) + + self._verify_load_facts(load_facts_mock) + self._verify_run_playbook(run_playbook_mock, 4, 4) + + written_config = read_yaml(self.config_file) + self._verify_config_hosts(written_config, 4) + + inventory = ConfigParser.ConfigParser(allow_no_value=True) + inventory.read(os.path.join(self.work_dir, '.ansible/hosts')) + self.assertEquals('True', + inventory.get('nodes', '10.0.0.1 openshift_schedulable')) + self.assertEquals('True', + inventory.get('nodes', '10.0.0.2 openshift_schedulable')) + self.assertEquals('True', + inventory.get('nodes', '10.0.0.3 openshift_schedulable')) + + #interactive multimaster: attempting to use a master as the load balancer should fail: + @patch('ooinstall.openshift_ansible.run_main_playbook') + @patch('ooinstall.openshift_ansible.load_system_facts') + def test_ha_reuse_master_as_lb(self, load_facts_mock, run_playbook_mock): + load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0) + run_playbook_mock.return_value = 0 + + cli_input = build_input(hosts=[ + ('10.0.0.1', True), + ('10.0.0.2', True), + ('10.0.0.3', False), + ('10.0.0.4', True)], + ssh_user='root', + variant_num=1, + confirm_facts='y', + master_lb=(['10.0.0.2', '10.0.0.5'], False)) + self.cli_args.append("install") + result = self.runner.invoke(cli.cli, self.cli_args, + input=cli_input) + self.assert_result(result, 0) + + #interactive all-in-one + @patch('ooinstall.openshift_ansible.run_main_playbook') + @patch('ooinstall.openshift_ansible.load_system_facts') + def test_all_in_one(self, load_facts_mock, run_playbook_mock): + load_facts_mock.return_value = (MOCK_FACTS, 0) + run_playbook_mock.return_value = 0 + + cli_input = build_input(hosts=[ + ('10.0.0.1', True)], + ssh_user='root', + variant_num=1, + confirm_facts='y') + self.cli_args.append("install") + result = self.runner.invoke(cli.cli, self.cli_args, + input=cli_input) + self.assert_result(result, 0) + + self._verify_load_facts(load_facts_mock) + self._verify_run_playbook(run_playbook_mock, 1, 1) + + written_config = read_yaml(self.config_file) + self._verify_config_hosts(written_config, 1) + + inventory = ConfigParser.ConfigParser(allow_no_value=True) + inventory.read(os.path.join(self.work_dir, '.ansible/hosts')) + self.assertEquals('True', + inventory.get('nodes', '10.0.0.1 openshift_schedulable')) + # TODO: test with config file, attended add node # TODO: test with config file, attended new node already in config file # TODO: test with config file, attended new node already in config file, plus manually added nodes diff --git a/utils/test/fixture.py b/utils/test/fixture.py new file mode 100644 index 000000000..90bd9e1ef --- /dev/null +++ b/utils/test/fixture.py @@ -0,0 +1,221 @@ +# pylint: disable=missing-docstring +import os +import yaml + +import ooinstall.cli_installer as cli + +from test.oo_config_tests import OOInstallFixture +from click.testing import CliRunner + +# Substitute in a product name before use: +SAMPLE_CONFIG = """ +variant: %s +ansible_ssh_user: root +hosts: + - connect_to: 10.0.0.1 + ip: 10.0.0.1 + hostname: master-private.example.com + public_ip: 24.222.0.1 + public_hostname: master.example.com + master: true + node: true + - connect_to: 10.0.0.2 + ip: 10.0.0.2 + hostname: node1-private.example.com + public_ip: 24.222.0.2 + public_hostname: node1.example.com + node: true + - connect_to: 10.0.0.3 + ip: 10.0.0.3 + hostname: node2-private.example.com + public_ip: 24.222.0.3 + public_hostname: node2.example.com + node: true +""" + +def read_yaml(config_file_path): + cfg_f = open(config_file_path, 'r') + config = yaml.safe_load(cfg_f.read()) + cfg_f.close() + return config + + +class OOCliFixture(OOInstallFixture): + + def setUp(self): + OOInstallFixture.setUp(self) + self.runner = CliRunner() + + # Add any arguments you would like to test here, the defaults ensure + # we only do unattended invocations here, and using temporary files/dirs. + self.cli_args = ["-a", self.work_dir] + + def run_cli(self): + return self.runner.invoke(cli.cli, self.cli_args) + + def assert_result(self, result, exit_code): + if result.exit_code != exit_code: + print "Unexpected result from CLI execution" + print "Exit code: %s" % result.exit_code + print "Exception: %s" % result.exception + print result.exc_info + import traceback + traceback.print_exception(*result.exc_info) + print "Output:\n%s" % result.output + self.fail("Exception during CLI execution") + + def _verify_load_facts(self, load_facts_mock): + """ Check that we ran load facts with expected inputs. """ + load_facts_args = load_facts_mock.call_args[0] + self.assertEquals(os.path.join(self.work_dir, ".ansible/hosts"), + load_facts_args[0]) + self.assertEquals(os.path.join(self.work_dir, + "playbooks/byo/openshift_facts.yml"), + load_facts_args[1]) + env_vars = load_facts_args[2] + self.assertEquals(os.path.join(self.work_dir, + '.ansible/callback_facts.yaml'), + env_vars['OO_INSTALL_CALLBACK_FACTS_YAML']) + self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH']) + + def _verify_run_playbook(self, run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len): + """ Check that we ran playbook with expected inputs. """ + hosts = run_playbook_mock.call_args[0][0] + hosts_to_run_on = run_playbook_mock.call_args[0][1] + self.assertEquals(exp_hosts_len, len(hosts)) + self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on)) + + def _verify_config_hosts(self, written_config, host_count): + self.assertEquals(host_count, len(written_config['hosts'])) + for host in written_config['hosts']: + self.assertTrue('hostname' in host) + self.assertTrue('public_hostname' in host) + if 'preconfigured' not in host: + self.assertTrue(host['node']) + self.assertTrue('ip' in host) + self.assertTrue('public_ip' in host) + + #pylint: disable=too-many-arguments + def _verify_get_hosts_to_run_on(self, mock_facts, load_facts_mock, + run_playbook_mock, cli_input, + exp_hosts_len=None, exp_hosts_to_run_on_len=None, + force=None): + """ + Tests cli_installer.py:get_hosts_to_run_on. That method has quite a + few subtle branches in the logic. The goal with this method is simply + to handle all the messy stuff here and allow the main test cases to be + easily read. The basic idea is to modify mock_facts to return a + version indicating OpenShift is already installed on particular hosts. + """ + load_facts_mock.return_value = (mock_facts, 0) + run_playbook_mock.return_value = 0 + + if cli_input: + self.cli_args.append("install") + result = self.runner.invoke(cli.cli, + self.cli_args, + input=cli_input) + else: + config_file = self.write_config( + os.path.join(self.work_dir, + 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise') + + self.cli_args.extend(["-c", config_file, "install"]) + if force: + self.cli_args.append("--force") + result = self.runner.invoke(cli.cli, self.cli_args) + written_config = read_yaml(config_file) + self._verify_config_hosts(written_config, exp_hosts_len) + + self.assert_result(result, 0) + self._verify_load_facts(load_facts_mock) + self._verify_run_playbook(run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len) + + # Make sure we ran on the expected masters and nodes: + hosts = run_playbook_mock.call_args[0][0] + hosts_to_run_on = run_playbook_mock.call_args[0][1] + self.assertEquals(exp_hosts_len, len(hosts)) + self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on)) + + +#pylint: disable=too-many-arguments,too-many-branches +def build_input(ssh_user=None, hosts=None, variant_num=None, + add_nodes=None, confirm_facts=None, schedulable_masters_ok=None, + master_lb=None): + """ + Build an input string simulating a user entering values in an interactive + attended install. + + This is intended to give us one place to update when the CLI prompts change. + We should aim to keep this dependent on optional keyword arguments with + sensible defaults to keep things from getting too fragile. + """ + + inputs = [ + 'y', # let's proceed + ] + if ssh_user: + inputs.append(ssh_user) + + if variant_num: + inputs.append(str(variant_num)) # Choose variant + version + + num_masters = 0 + if hosts: + i = 0 + for (host, is_master) in hosts: + inputs.append(host) + if is_master: + inputs.append('y') + num_masters += 1 + else: + inputs.append('n') + #inputs.append('rpm') + # We should not be prompted to add more hosts if we're currently at + # 2 masters, this is an invalid HA configuration, so this question + # will not be asked, and the user must enter the next host: + if num_masters != 2: + if i < len(hosts) - 1: + if num_masters >= 1: + inputs.append('y') # Add more hosts + else: + inputs.append('n') # Done adding hosts + i += 1 + + # You can pass a single master_lb or a list if you intend for one to get rejected: + if master_lb: + if isinstance(master_lb[0], list) or isinstance(master_lb[0], tuple): + inputs.extend(master_lb[0]) + else: + inputs.append(master_lb[0]) + inputs.append('y' if master_lb[1] else 'n') + + # TODO: support option 2, fresh install + if add_nodes: + if schedulable_masters_ok: + inputs.append('y') + inputs.append('1') # Add more nodes + i = 0 + for (host, is_master) in add_nodes: + inputs.append(host) + #inputs.append('rpm') + if i < len(add_nodes) - 1: + inputs.append('y') # Add more hosts + else: + inputs.append('n') # Done adding hosts + i += 1 + + if add_nodes is None: + total_hosts = hosts + else: + total_hosts = hosts + add_nodes + if total_hosts is not None and num_masters == len(total_hosts): + inputs.append('y') + + inputs.extend([ + confirm_facts, + 'y', # lets do this + ]) + + return '\n'.join(inputs) + diff --git a/utils/test/oo_config_tests.py b/utils/test/oo_config_tests.py index 0dd4a30e9..9f5f8e244 100644 --- a/utils/test/oo_config_tests.py +++ b/utils/test/oo_config_tests.py @@ -73,6 +73,29 @@ hosts: node: true """ +CONFIG_BAD = """ +variant: openshift-enterprise +ansible_ssh_user: root +hosts: + - connect_to: master-private.example.com + ip: 10.0.0.1 + hostname: master-private.example.com + public_ip: 24.222.0.1 + public_hostname: master.example.com + master: true + node: true + - ip: 10.0.0.2 + hostname: node1-private.example.com + public_ip: 24.222.0.2 + public_hostname: node1.example.com + node: true + - connect_to: node2-private.example.com + ip: 10.0.0.3 + hostname: node2-private.example.com + public_ip: 24.222.0.3 + public_hostname: node2.example.com + node: true +""" class OOInstallFixture(unittest.TestCase): @@ -161,6 +184,17 @@ class OOConfigTests(OOInstallFixture): self.assertEquals('openshift-enterprise', ooconfig.settings['variant']) self.assertEquals('v1', ooconfig.settings['version']) + def test_load_bad_config(self): + + cfg_path = self.write_config(os.path.join(self.work_dir, + 'ooinstall.conf'), CONFIG_BAD) + try: + OOConfig(cfg_path) + assert False + except OOConfigInvalidHostError: + assert True + + def test_load_complete_facts(self): cfg_path = self.write_config(os.path.join(self.work_dir, 'ooinstall.conf'), SAMPLE_CONFIG) |