diff options
Diffstat (limited to 'utils')
-rw-r--r-- | utils/.gitignore | 3 | ||||
-rw-r--r-- | utils/Makefile | 83 | ||||
-rw-r--r-- | utils/README.md | 41 | ||||
-rw-r--r-- | utils/README.txt | 24 | ||||
-rw-r--r-- | utils/docs/config.md | 57 | ||||
-rw-r--r-- | utils/etc/ansible.cfg | 3 | ||||
-rw-r--r-- | utils/src/ooinstall/__init__.py | 4 | ||||
-rw-r--r-- | utils/src/ooinstall/ansible_plugins/facts_callback.py | 62 | ||||
-rw-r--r-- | utils/src/ooinstall/cli_installer.py | 429 | ||||
-rw-r--r-- | utils/src/ooinstall/oo_config.py | 181 | ||||
-rw-r--r-- | utils/src/ooinstall/openshift_ansible.py | 194 | ||||
-rw-r--r-- | utils/src/ooinstall/variants.py | 45 | ||||
-rw-r--r-- | utils/test-requirements.txt | 11 | ||||
-rw-r--r-- | utils/test/cli_installer_tests.py | 58 | ||||
-rw-r--r-- | utils/test/fixture.py | 27 | ||||
-rw-r--r-- | utils/test/oo_config_tests.py | 31 |
16 files changed, 736 insertions, 517 deletions
diff --git a/utils/.gitignore b/utils/.gitignore index 68759c0ba..facfeee54 100644 --- a/utils/.gitignore +++ b/utils/.gitignore @@ -43,3 +43,6 @@ coverage.xml # Sphinx documentation docs/_build/ +oo-install +oo-installenv +cover diff --git a/utils/Makefile b/utils/Makefile new file mode 100644 index 000000000..79c27626a --- /dev/null +++ b/utils/Makefile @@ -0,0 +1,83 @@ +######################################################## + +# Makefile for OpenShift: Atomic Quick Installer +# +# useful targets (not all implemented yet!): +# make clean -- Clean up garbage +# make ci ------------------- Execute CI steps (for travis or jenkins) + +######################################################## + +# > VARIABLE = value +# +# Normal setting of a variable - values within it are recursively +# expanded when the variable is USED, not when it's declared. +# +# > VARIABLE := value +# +# Setting of a variable with simple expansion of the values inside - +# values within it are expanded at DECLARATION time. + +######################################################## + + +NAME := oo-install +TESTPACKAGE := oo-install +SHORTNAME := ooinstall + +sdist: clean + python setup.py sdist + rm -fR $(SHORTNAME).egg-info + +clean: + @find . -type f -regex ".*\.py[co]$$" -delete + @find . -type f \( -name "*~" -or -name "#*" \) -delete + @rm -fR build dist rpm-build MANIFEST htmlcov .coverage cover ooinstall.egg-info oo-install + @rm -fR $(NAME)env + +viewcover: + xdg-open cover/index.html + +virtualenv: + @echo "#############################################" + @echo "# Creating a virtualenv" + @echo "#############################################" + virtualenv $(NAME)env + . $(NAME)env/bin/activate && pip install setuptools==17.1.1 + . $(NAME)env/bin/activate && pip install -r test-requirements.txt +# If there are any special things to install do it here +# . $(NAME)env/bin/activate && INSTALL STUFF + +ci-unittests: + @echo "#############################################" + @echo "# Running Unit Tests in virtualenv" + @echo "#############################################" + . $(NAME)env/bin/activate && nosetests -v --with-coverage --cover-html --cover-min-percentage=70 --cover-package=$(SHORTNAME) test/ + @echo "VIEW CODE COVERAGE REPORT WITH 'xdg-open cover/index.html' or run 'make viewcover'" + +ci-pylint: + @echo "#############################################" + @echo "# Running PyLint Tests in virtualenv" + @echo "#############################################" + . $(NAME)env/bin/activate && python -m pylint --rcfile ../git/.pylintrc src/ooinstall/cli_installer.py src/ooinstall/oo_config.py src/ooinstall/openshift_ansible.py src/ooinstall/variants.py + +ci-list-deps: + @echo "#############################################" + @echo "# Listing all pip deps" + @echo "#############################################" + . $(NAME)env/bin/activate && pip freeze + +ci-pyflakes: + @echo "#################################################" + @echo "# Running Pyflakes Compliance Tests in virtualenv" + @echo "#################################################" + . $(NAME)env/bin/activate && pyflakes src/ooinstall/*.py + +ci-pep8: + @echo "#############################################" + @echo "# Running PEP8 Compliance Tests in virtualenv" + @echo "#############################################" + . $(NAME)env/bin/activate && pep8 --ignore=E501,E121,E124 src/$(SHORTNAME)/ + +ci: clean virtualenv ci-list-deps ci-pep8 ci-pylint ci-pyflakes ci-unittests + : diff --git a/utils/README.md b/utils/README.md new file mode 100644 index 000000000..2abf2705e --- /dev/null +++ b/utils/README.md @@ -0,0 +1,41 @@ +# Running Tests (NEW) + +Run the command: + + make ci + +to run an array of unittests locally. + +You will get errors if the log files already exist and can not be +written to by the current user (`/tmp/ansible.log` and +`/tmp/installer.txt`). *We're working on it.* + +# Running From Source + +You will need to setup a **virtualenv** to run from source: + + $ virtualenv oo-install + $ source ./oo-install/bin/activate + $ virtualenv --relocatable ./oo-install/ + $ python setup.py install + +The virtualenv `bin` directory should now be at the start of your +`$PATH`, and `oo-install` is ready to use from your shell. + +You can exit the virtualenv with: + + $ deactivate + +# Testing (OLD) + +*This section is deprecated, but still works* + +First, run the **virtualenv setup steps** described above. + +Install some testing libraries: (we cannot do this via setuptools due to the version virtualenv bundles) + +$ pip install mock nose + +Then run the tests with: + +$ oo-install/bin/nosetests diff --git a/utils/README.txt b/utils/README.txt deleted file mode 100644 index 6a6a1d24d..000000000 --- a/utils/README.txt +++ /dev/null @@ -1,24 +0,0 @@ -## Running From Source - -You will need to setup a virtualenv to run from source and execute the unit tests. - -$ virtualenv oo-install -$ source ./oo-install/bin/activate -$ virtualenv --relocatable ./oo-install/ -$ python setup.py install - -The virtualenv bin directory should now be at the start of your $PATH, and oo-install is ready to use from your shell. - -You can exit the virtualenv with: - -$ deactivate - -## Testing - -Install some testing libraries: (we cannot do this via setuptools due to the version virtualenv bundles) - -$ pip install mock nose - -Then run the tests with: - -$ oo-install/bin/nosetests diff --git a/utils/docs/config.md b/utils/docs/config.md index 2729f8d37..3677ffe2e 100644 --- a/utils/docs/config.md +++ b/utils/docs/config.md @@ -7,31 +7,38 @@ The default location this config file will be written to ~/.config/openshift/ins ## Example ``` -version: v1 +version: v2 variant: openshift-enterprise -variant_version: 3.0 -ansible_ssh_user: root -hosts: -- ip: 10.0.0.1 - hostname: master-private.example.com - public_ip: 24.222.0.1 - public_hostname: master.example.com - master: true - node: true - containerized: true - connect_to: 24.222.0.1 -- ip: 10.0.0.2 - hostname: node1-private.example.com - public_ip: 24.222.0.2 - public_hostname: node1.example.com - node: true - connect_to: 10.0.0.2 -- ip: 10.0.0.3 - hostname: node2-private.example.com - public_ip: 24.222.0.3 - public_hostname: node2.example.com - node: true - connect_to: 10.0.0.3 +variant_version: 3.3 +deployment: + ansible_ssh_user: root + hosts: + - connect_to: 24.222.0.1 + ip: 10.0.0.1 + hostname: master-private.example.com + public_ip: 24.222.0.1 + public_hostname: master.example.com + roles: + - master + - node + containerized: true + - connect_to: 10.0.0.2 + ip: 10.0.0.2 + hostname: node1-private.example.com + public_ip: 24.222.0.2 + public_hostname: node1.example.com + roles: + - node + - connect_to: 10.0.0.3 + ip: 10.0.0.3 + hostname: node2-private.example.com + public_ip: 24.222.0.3 + public_hostname: node2.example.com + roles: + - node + roles: + master: + node: ``` ## Primary Settings @@ -76,5 +83,3 @@ Defines the user ansible will use to ssh to remote systems for gathering facts a ### ansible_log_path Default: /tmp/ansible.log - - diff --git a/utils/etc/ansible.cfg b/utils/etc/ansible.cfg index b7376ddfc..a53ab6cb1 100644 --- a/utils/etc/ansible.cfg +++ b/utils/etc/ansible.cfg @@ -16,6 +16,9 @@ log_path = /tmp/ansible.log forks = 10 host_key_checking = False nocows = 1 + +retry_files_enabled = False + # Need to handle: # inventory - derive from OO_ANSIBLE_DIRECTORY env var # callback_plugins - derive from pkg_resource.resource_filename diff --git a/utils/src/ooinstall/__init__.py b/utils/src/ooinstall/__init__.py index 944dea3b5..96e495e19 100644 --- a/utils/src/ooinstall/__init__.py +++ b/utils/src/ooinstall/__init__.py @@ -1,5 +1 @@ -# TODO: Temporarily disabled due to importing old code into openshift-ansible -# repo. We will work on these over time. # pylint: disable=missing-docstring - -from .oo_config import OOConfig diff --git a/utils/src/ooinstall/ansible_plugins/facts_callback.py b/utils/src/ooinstall/ansible_plugins/facts_callback.py index ea6ed6574..e51890a22 100644 --- a/utils/src/ooinstall/ansible_plugins/facts_callback.py +++ b/utils/src/ooinstall/ansible_plugins/facts_callback.py @@ -4,8 +4,11 @@ import os import yaml +from ansible.plugins.callback import CallbackBase -class CallbackModule(object): + +# pylint: disable=super-init-not-called +class CallbackModule(CallbackBase): def __init__(self): ###################### @@ -17,72 +20,75 @@ class CallbackModule(object): self.hosts_yaml_name = os.environ['OO_INSTALL_CALLBACK_FACTS_YAML'] except KeyError: raise ValueError('The OO_INSTALL_CALLBACK_FACTS_YAML environment ' - 'variable must be set.') + 'variable must be set.') self.hosts_yaml = os.open(self.hosts_yaml_name, os.O_CREAT | - os.O_WRONLY) + os.O_WRONLY) - def on_any(self, *args, **kwargs): + def v2_on_any(self, *args, **kwargs): pass - def runner_on_failed(self, host, res, ignore_errors=False): + def v2_runner_on_failed(self, res, ignore_errors=False): pass - def runner_on_ok(self, host, res): - if res['invocation']['module_args'] == 'var=result': - facts = res['var']['result']['ansible_facts']['openshift'] + # pylint: disable=protected-access + def v2_runner_on_ok(self, res): + abridged_result = res._result.copy() + # Collect facts result from playbooks/byo/openshift_facts.yml + if 'result' in abridged_result: + facts = abridged_result['result']['ansible_facts']['openshift'] hosts_yaml = {} - hosts_yaml[host] = facts + hosts_yaml[res._host.get_name()] = facts os.write(self.hosts_yaml, yaml.safe_dump(hosts_yaml)) - def runner_on_skipped(self, host, item=None): + def v2_runner_on_skipped(self, res): pass - def runner_on_unreachable(self, host, res): + def v2_runner_on_unreachable(self, res): pass - def runner_on_no_hosts(self): + def v2_runner_on_no_hosts(self, task): pass - def runner_on_async_poll(self, host, res): + def v2_runner_on_async_poll(self, res): pass - def runner_on_async_ok(self, host, res): + def v2_runner_on_async_ok(self, res): pass - def runner_on_async_failed(self, host, res): + def v2_runner_on_async_failed(self, res): pass - def playbook_on_start(self): + def v2_playbook_on_start(self, playbook): pass - def playbook_on_notify(self, host, handler): + def v2_playbook_on_notify(self, res, handler): pass - def playbook_on_no_hosts_matched(self): + def v2_playbook_on_no_hosts_matched(self): pass - def playbook_on_no_hosts_remaining(self): + def v2_playbook_on_no_hosts_remaining(self): pass - def playbook_on_task_start(self, name, is_conditional): + def v2_playbook_on_task_start(self, name, is_conditional): pass - #pylint: disable=too-many-arguments - def playbook_on_vars_prompt(self, varname, private=True, prompt=None, - encrypt=None, confirm=False, salt_size=None, salt=None, default=None): + # pylint: disable=too-many-arguments + def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, + encrypt=None, confirm=False, salt_size=None, salt=None, default=None): pass - def playbook_on_setup(self): + def v2_playbook_on_setup(self): pass - def playbook_on_import_for_host(self, host, imported_file): + def v2_playbook_on_import_for_host(self, res, imported_file): pass - def playbook_on_not_import_for_host(self, host, missing_file): + def v2_playbook_on_not_import_for_host(self, res, missing_file): pass - def playbook_on_play_start(self, name): + def v2_playbook_on_play_start(self, play): pass - def playbook_on_stats(self, stats): + def v2_playbook_on_stats(self, stats): pass diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index c9c13501d..2ba7efe3e 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -5,24 +5,58 @@ import os import re import sys -from distutils.version import LooseVersion +import logging import click +from pkg_resources import parse_version from ooinstall import openshift_ansible -from ooinstall import OOConfig +from ooinstall.oo_config import OOConfig from ooinstall.oo_config import OOConfigInvalidHostError from ooinstall.oo_config import Host, Role from ooinstall.variants import find_variant, get_variant_version_combos +installer_log = logging.getLogger('installer') +installer_log.setLevel(logging.CRITICAL) +installer_file_handler = logging.FileHandler('/tmp/installer.txt') +installer_file_handler.setFormatter( + logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')) +# Example output: +# 2016-08-23 07:34:58,480 - installer - DEBUG - Going to 'load_system_facts' +installer_file_handler.setLevel(logging.DEBUG) +installer_log.addHandler(installer_file_handler) + DEFAULT_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible.cfg' DEFAULT_PLAYBOOK_DIR = '/usr/share/ansible/openshift-ansible/' +UPGRADE_MAPPINGS = { + '3.0': { + 'minor_version': '3.0', + 'minor_playbook': 'v3_0_minor/upgrade.yml', + 'major_version': '3.1', + 'major_playbook': 'v3_0_to_v3_1/upgrade.yml', + }, + '3.1': { + 'minor_version': '3.1', + 'minor_playbook': 'v3_1_minor/upgrade.yml', + 'major_playbook': 'v3_1_to_v3_2/upgrade.yml', + 'major_version': '3.2', + }, + '3.2': { + 'minor_version': '3.2', + 'minor_playbook': 'v3_2/upgrade.yml', + 'major_playbook': 'v3_2/upgrade.yml', + 'major_version': '3.3', + } +} + + def validate_ansible_dir(path): if not path: - raise click.BadParameter('An ansible path must be provided') + raise click.BadParameter('An Ansible path must be provided') return path # if not os.path.exists(path)): # raise click.BadParameter("Path \"{}\" doesn't exist".format(path)) + def is_valid_hostname(hostname): if not hostname or len(hostname) > 255: return False @@ -31,59 +65,43 @@ def is_valid_hostname(hostname): allowed = re.compile(r"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE) return all(allowed.match(x) for x in hostname.split(".")) + def validate_prompt_hostname(hostname): if hostname == '' or is_valid_hostname(hostname): return hostname raise click.BadParameter('Invalid hostname. Please double-check this value and re-enter it.') + def get_ansible_ssh_user(): click.clear() message = """ -This installation process will involve connecting to remote hosts via ssh. Any -account may be used however if a non-root account is used it must have +This installation process involves connecting to remote hosts via ssh. Any +account may be used. However, if a non-root account is used, then it must have passwordless sudo access. """ click.echo(message) return click.prompt('User for ssh access', default='root') + def get_master_routingconfig_subdomain(): click.clear() message = """ -You might want to override the default subdomain uses for exposed routes. If you don't know what -this is, use the default value. +You might want to override the default subdomain used for exposed routes. If you don't know what this is, use the default value. """ click.echo(message) return click.prompt('New default subdomain (ENTER for none)', default='') + def list_hosts(hosts): hosts_idx = range(len(hosts)) for idx in hosts_idx: click.echo(' {}: {}'.format(idx, hosts[idx])) -def delete_hosts(hosts): - while True: - list_hosts(hosts) - del_idx = click.prompt('Select host to delete, y/Y to confirm, ' \ - 'or n/N to add more hosts', default='n') - try: - del_idx = int(del_idx) - hosts.remove(hosts[del_idx]) - except IndexError: - click.echo("\"{}\" doesn't match any hosts listed.".format(del_idx)) - except ValueError: - try: - response = del_idx.lower() - if response in ['y', 'n']: - return hosts, response - click.echo("\"{}\" doesn't coorespond to any valid input.".format(del_idx)) - except AttributeError: - click.echo("\"{}\" doesn't coorespond to any valid input.".format(del_idx)) - return hosts, None def collect_hosts(oo_cfg, existing_env=False, masters_set=False, print_summary=True): """ Collect host information from user. This will later be filled in using - ansible. + Ansible. Returns: a list of host information collected from the user """ @@ -92,28 +110,28 @@ def collect_hosts(oo_cfg, existing_env=False, masters_set=False, print_summary=T message = """ You must now specify the hosts that will compose your OpenShift cluster. -Please enter an IP or hostname to connect to for each system in the cluster. -You will then be prompted to identify what role you would like this system to +Please enter an IP address or hostname to connect to for each system in the +cluster. You will then be prompted to identify what role you want this system to serve in the cluster. -OpenShift Masters serve the API and web console and coordinate the jobs to run -across the environment. If desired you can specify multiple Master systems for -an HA deployment, in which case you will be prompted to identify a *separate* -system to act as the load balancer for your cluster after all Masters and Nodes -are defined. +OpenShift masters serve the API and web console and coordinate the jobs to run +across the environment. Optionally, you can specify multiple master systems for +a high-availability (HA) deployment. If you choose an HA deployment, then you +are prompted to identify a *separate* system to act as the load balancer for +your cluster once you define all masters and nodes. -If only one Master is specified, an etcd instance embedded within the OpenShift -Master service will be used as the datastore. This can be later replaced with a -separate etcd instance if desired. If multiple Masters are specified, a -separate etcd cluster will be configured with each Master serving as a member. +If only one master is specified, an etcd instance is embedded within the +OpenShift master service to use as the datastore. This can be later replaced +with a separate etcd instance, if required. If multiple masters are specified, +then a separate etcd cluster is configured with each master serving as a member. -Any Masters configured as part of this installation process will also be -configured as Nodes. This is so that the Master will be able to proxy to Pods -from the API. By default this Node will be unschedulable but this can be changed -after installation with 'oadm manage-node'. +Any masters configured as part of this installation process are also +configured as nodes. This enables the master to proxy to pods +from the API. By default, this node is unschedulable, but this can be changed +after installation with the 'oadm manage-node' command. -OpenShift Nodes provide the runtime environments for containers. They will -host the required services to be managed by the Master. +OpenShift nodes provide the runtime environments for containers. They host the +required services to be managed by the master. http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#master http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#node @@ -121,7 +139,7 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen click.echo(message) hosts = [] - roles = set(['master', 'node', 'storage']) + roles = set(['master', 'node', 'storage', 'etcd']) more_hosts = True num_masters = 0 while more_hosts: @@ -131,8 +149,9 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen value_proc=validate_prompt_hostname) if not masters_set: - if click.confirm('Will this host be an OpenShift Master?'): + if click.confirm('Will this host be an OpenShift master?'): host_props['roles'].append('master') + host_props['roles'].append('etcd') num_masters += 1 if oo_cfg.settings['variant_version'] == '3.0': @@ -154,12 +173,11 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen hosts.append(host) - if print_summary: print_installation_summary(hosts, oo_cfg.settings['variant_version']) # If we have one master, this is enough for an all-in-one deployment, - # thus we can start asking if you wish to proceed. Otherwise we assume + # thus we can start asking if you want to proceed. Otherwise we assume # you must. if masters_set or num_masters != 2: more_hosts = click.confirm('Do you want to add additional hosts?') @@ -192,12 +210,12 @@ def print_installation_summary(hosts, version=None): nodes = [host for host in hosts if host.is_node()] dedicated_nodes = [host for host in hosts if host.is_node() and not host.is_master()] click.echo('') - click.echo('Total OpenShift Masters: %s' % len(masters)) - click.echo('Total OpenShift Nodes: %s' % len(nodes)) + click.echo('Total OpenShift masters: %s' % len(masters)) + click.echo('Total OpenShift nodes: %s' % len(nodes)) if len(masters) == 1 and version != '3.0': ha_hint_message = """ -NOTE: Add a total of 3 or more Masters to perform an HA installation.""" +NOTE: Add a total of 3 or more masters to perform an HA installation.""" click.echo(ha_hint_message) elif len(masters) == 2: min_masters_message = """ @@ -206,19 +224,19 @@ Please add one more to proceed.""" click.echo(min_masters_message) elif len(masters) >= 3: ha_message = """ -NOTE: Multiple Masters specified, this will be an HA deployment with a separate +NOTE: Multiple masters specified, this will be an HA deployment with a separate etcd cluster. You will be prompted to provide the FQDN of a load balancer and a host for storage once finished entering hosts. """ click.echo(ha_message) dedicated_nodes_message = """ -WARNING: Dedicated Nodes are recommended for an HA deployment. If no dedicated -Nodes are specified, each configured Master will be marked as a schedulable -Node.""" +WARNING: Dedicated nodes are recommended for an HA deployment. If no dedicated +nodes are specified, each configured master will be marked as a schedulable +node.""" min_ha_nodes_message = """ -WARNING: A minimum of 3 dedicated Nodes are recommended for an HA +WARNING: A minimum of 3 dedicated nodes are recommended for an HA deployment.""" if len(dedicated_nodes) == 0: click.echo(dedicated_nodes_message) @@ -231,14 +249,14 @@ deployment.""" def print_host_summary(all_hosts, host): click.echo("- %s" % host.connect_to) if host.is_master(): - click.echo(" - OpenShift Master") + click.echo(" - OpenShift master") if host.is_node(): if host.is_dedicated_node(): - click.echo(" - OpenShift Node (Dedicated)") + click.echo(" - OpenShift node (Dedicated)") elif host.is_schedulable_node(all_hosts): - click.echo(" - OpenShift Node") + click.echo(" - OpenShift node") else: - click.echo(" - OpenShift Node (Unscheduled)") + click.echo(" - OpenShift node (Unscheduled)") if host.is_master_lb(): if host.preconfigured: click.echo(" - Load Balancer (Preconfigured)") @@ -262,14 +280,14 @@ def collect_master_lb(hosts): this is an invalid configuration. """ message = """ -Setting up High Availability Masters requires a load balancing solution. -Please provide a the FQDN of a host that will be configured as a proxy. This +Setting up high-availability masters requires a load balancing solution. +Please provide the FQDN of a host that will be configured as a proxy. This can be either an existing load balancer configured to balance all masters on port 8443 or a new host that will have HAProxy installed on it. -If the host provided does is not yet configured, a reference haproxy load -balancer will be installed. It's important to note that while the rest of the -environment will be fault tolerant this reference load balancer will not be. +If the host provided is not yet configured, a reference HAProxy load +balancer will be installed. It's important to note that while the rest of the +environment will be fault-tolerant, this reference load balancer will not be. It can be replaced post-installation with a load balancer with the same hostname. """ @@ -291,19 +309,20 @@ hostname. host_props['connect_to'] = click.prompt('Enter hostname or IP address', value_proc=validate_prompt_lb) install_haproxy = \ - click.confirm('Should the reference haproxy load balancer be installed on this host?') + click.confirm('Should the reference HAProxy load balancer be installed on this host?') host_props['preconfigured'] = not install_haproxy host_props['roles'] = ['master_lb'] master_lb = Host(**host_props) hosts.append(master_lb) + def collect_storage_host(hosts): """ Get a valid host for storage from the user and append it to the list of hosts. """ message = """ -Setting up High Availability Masters requires a storage host. Please provide a +Setting up high-availability masters requires a storage host. Please provide a host that will be configured as a Registry Storage. Note: Containerized storage hosts are not currently supported. @@ -314,20 +333,19 @@ Note: Containerized storage hosts are not currently supported. first_master = next(host for host in hosts if host.is_master()) hostname_or_ip = click.prompt('Enter hostname or IP address', - value_proc=validate_prompt_hostname, - default=first_master.connect_to) + value_proc=validate_prompt_hostname, + default=first_master.connect_to) existing, existing_host = is_host_already_node_or_master(hostname_or_ip, hosts) if existing and existing_host.is_node(): existing_host.roles.append('storage') else: host_props['connect_to'] = hostname_or_ip host_props['preconfigured'] = False - host_props['master'] = False - host_props['node'] = False - host_props['storage'] = True + host_props['roles'] = ['storage'] storage = Host(**host_props) hosts.append(storage) + def is_host_already_node_or_master(hostname, hosts): is_existing = False existing_host = None @@ -339,19 +357,20 @@ def is_host_already_node_or_master(hostname, hosts): return is_existing, existing_host + def confirm_hosts_facts(oo_cfg, callback_facts): hosts = oo_cfg.deployment.hosts click.clear() message = """ -A list of the facts gathered from the provided hosts follows. Because it is -often the case that the hostname for a system inside the cluster is different -from the hostname that is resolveable from command line or web clients -these settings cannot be validated automatically. +The following is a list of the facts gathered from the provided hosts. The +hostname for a system inside the cluster is often different from the hostname +that is resolveable from command-line or web clients, therefore these settings +cannot be validated automatically. -For some cloud providers the installer is able to gather metadata exposed in -the instance so reasonable defaults will be provided. +For some cloud providers, the installer is able to gather metadata exposed in +the instance, so reasonable defaults will be provided. -Plese confirm that they are correct before moving forward. +Please confirm that they are correct before moving forward. """ notes = """ @@ -365,7 +384,7 @@ Notes: * The public IP should be the externally accessible IP associated with the instance * The hostname should resolve to the internal IP from the instances themselves. - * The public hostname should resolve to the external ip from hosts outside of + * The public hostname should resolve to the external IP from hosts outside of the cloud. """ @@ -413,42 +432,41 @@ Edit %s with the desired values and run `atomic-openshift-installer --unattended return default_facts - def check_hosts_config(oo_cfg, unattended): click.clear() masters = [host for host in oo_cfg.deployment.hosts if host.is_master()] if len(masters) == 2: - click.echo("A minimum of 3 Masters are required for HA deployments.") + click.echo("A minimum of 3 masters are required for HA deployments.") sys.exit(1) if len(masters) > 1: master_lb = [host for host in oo_cfg.deployment.hosts if host.is_master_lb()] if len(master_lb) > 1: - click.echo('ERROR: More than one Master load balancer specified. Only one is allowed.') + click.echo('ERROR: More than one master load balancer specified. Only one is allowed.') sys.exit(1) elif len(master_lb) == 1: if master_lb[0].is_master() or master_lb[0].is_node(): - click.echo('ERROR: The Master load balancer is configured as a master or node. ' \ + click.echo('ERROR: The master load balancer is configured as a master or node. ' 'Please correct this.') sys.exit(1) else: message = """ ERROR: No master load balancer specified in config. You must provide the FQDN -of a load balancer to balance the API (port 8443) on all Master hosts. +of a load balancer to balance the API (port 8443) on all master hosts. https://docs.openshift.org/latest/install_config/install/advanced_install.html#multiple-masters """ click.echo(message) sys.exit(1) - dedicated_nodes = [host for host in oo_cfg.deployment.hosts \ - if host.is_node() and not host.is_master()] + dedicated_nodes = [host for host in oo_cfg.deployment.hosts + if host.is_node() and not host.is_master()] if len(dedicated_nodes) == 0: message = """ -WARNING: No dedicated Nodes specified. By default, colocated Masters have -their Nodes set to unschedulable. If you proceed all nodes will be labelled +WARNING: No dedicated nodes specified. By default, colocated masters have +their nodes set to unschedulable. If you proceed all nodes will be labelled as schedulable. """ if unattended: @@ -458,6 +476,7 @@ as schedulable. return + def get_variant_and_version(multi_master=False): message = "\nWhich variant would you like to install?\n\n" @@ -465,7 +484,7 @@ def get_variant_and_version(multi_master=False): combos = get_variant_version_combos() for (variant, version) in combos: message = "%s\n(%s) %s %s" % (message, i, variant.description, - version.name) + version.name) i = i + 1 message = "%s\n" % message @@ -477,12 +496,14 @@ def get_variant_and_version(multi_master=False): return product, version + def confirm_continue(message): if message: click.echo(message) click.confirm("Are you ready to continue?", default=False, abort=True) return + def error_if_missing_info(oo_cfg): missing_info = False if not oo_cfg.deployment.hosts: @@ -491,7 +512,7 @@ def error_if_missing_info(oo_cfg): 'command line or in the config file: %s' % oo_cfg.config_path) sys.exit(1) - if 'ansible_ssh_user' not in oo_cfg.settings: + if 'ansible_ssh_user' not in oo_cfg.deployment.variables: click.echo("Must specify ansible_ssh_user in configuration file.") sys.exit(1) @@ -522,6 +543,7 @@ def error_if_missing_info(oo_cfg): if missing_info: sys.exit(1) + def get_host_roles_set(oo_cfg): roles_set = set() for host in oo_cfg.deployment.hosts: @@ -530,13 +552,13 @@ def get_host_roles_set(oo_cfg): return roles_set + def get_proxy_hostnames_and_excludes(): message = """ -If a proxy is needed to reach HTTP and HTTPS traffic please enter the name below. -This proxy will be configured by default for all processes needing to reach systems outside -the cluster. +If a proxy is needed to reach HTTP and HTTPS traffic, please enter the name below. +This proxy will be configured by default for all processes that need to reach systems outside the cluster. -More advanced configuration is possible if using ansible directly: +More advanced configuration is possible if using Ansible directly: https://docs.openshift.com/enterprise/latest/install_config/http_proxies.html """ @@ -550,7 +572,7 @@ https://docs.openshift.com/enterprise/latest/install_config/http_proxies.html if http_proxy_hostname or https_proxy_hostname: message = """ -All hosts in your openshift inventory will automatically be added to the NO_PROXY value. +All hosts in your OpenShift inventory will automatically be added to the NO_PROXY value. Please provide any additional hosts to be added to NO_PROXY. (ENTER for none) """ proxy_excludes = click.prompt(message, default='') @@ -559,6 +581,7 @@ Please provide any additional hosts to be added to NO_PROXY. (ENTER for none) return http_proxy_hostname, https_proxy_hostname, proxy_excludes + def get_missing_info_from_user(oo_cfg): """ Prompts the user for any information missing from the given configuration. """ click.clear() @@ -574,10 +597,10 @@ Please confirm that following prerequisites have been met: repositories. * All systems have run docker-storage-setup (part of the Red Hat docker RPM). * All systems have working DNS that resolves not only from the perspective of - the installer but also from within the cluster. + the installer, but also from within the cluster. -When the process completes you will have a default configuration for Masters -and Nodes. For ongoing environment maintenance it's recommended that the +When the process completes you will have a default configuration for masters +and nodes. For ongoing environment maintenance it's recommended that the official Ansible playbooks be used. For more information on installation prerequisites please see: @@ -586,30 +609,34 @@ https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.h confirm_continue(message) click.clear() - if not oo_cfg.settings.get('ansible_ssh_user', ''): - oo_cfg.settings['ansible_ssh_user'] = get_ansible_ssh_user() + if not oo_cfg.deployment.variables.get('ansible_ssh_user', False): + oo_cfg.deployment.variables['ansible_ssh_user'] = get_ansible_ssh_user() click.clear() if not oo_cfg.settings.get('variant', ''): variant, version = get_variant_and_version() oo_cfg.settings['variant'] = variant.name oo_cfg.settings['variant_version'] = version.name + oo_cfg.settings['variant_subtype'] = version.subtype click.clear() if not oo_cfg.deployment.hosts: oo_cfg.deployment.hosts, roles = collect_hosts(oo_cfg) + set_infra_nodes(oo_cfg.deployment.hosts) for role in roles: oo_cfg.deployment.roles[role] = Role(name=role, variables={}) click.clear() - if not 'master_routingconfig_subdomain' in oo_cfg.deployment.variables: - oo_cfg.deployment.variables['master_routingconfig_subdomain'] = \ - get_master_routingconfig_subdomain() + if 'master_routingconfig_subdomain' not in oo_cfg.deployment.variables: + oo_cfg.deployment.variables['master_routingconfig_subdomain'] = get_master_routingconfig_subdomain() click.clear() + current_version = parse_version( + oo_cfg.settings.get('variant_version', '0.0')) + min_version = parse_version('3.2') if not oo_cfg.settings.get('openshift_http_proxy', None) and \ - LooseVersion(oo_cfg.settings.get('variant_version', '0.0')) >= LooseVersion('3.2'): + current_version >= min_version: http_proxy, https_proxy, proxy_excludes = get_proxy_hostnames_and_excludes() oo_cfg.deployment.variables['proxy_http'] = http_proxy oo_cfg.deployment.variables['proxy_https'] = https_proxy @@ -627,10 +654,12 @@ def get_role_variable(oo_cfg, role_name, variable_name): except (StopIteration, KeyError): return None + def set_role_variable(oo_cfg, role_name, variable_name, variable_value): target_role = next(role for role in oo_cfg.deployment.roles if role.name is role_name) target_role[variable_name] = variable_value + def collect_new_nodes(oo_cfg): click.clear() click.echo('*** New Node Configuration ***') @@ -641,32 +670,27 @@ Add new nodes here new_nodes, _ = collect_hosts(oo_cfg, existing_env=True, masters_set=True, print_summary=False) return new_nodes + def get_installed_hosts(hosts, callback_facts): installed_hosts = [] + uninstalled_hosts = [] + for host in [h for h in hosts if h.is_master() or h.is_node()]: + if host.connect_to in callback_facts.keys(): + if is_installed_host(host, callback_facts): + installed_hosts.append(host) + else: + uninstalled_hosts.append(host) + return installed_hosts, uninstalled_hosts - # count nativeha lb as an installed host - try: - first_master = next(host for host in hosts if host.is_master()) - lb_hostname = callback_facts[first_master.connect_to]['master'].get('cluster_hostname', '') - lb_host = \ - next(host for host in hosts if host.ip == callback_facts[lb_hostname]['common']['ip']) - - installed_hosts.append(lb_host) - except (KeyError, StopIteration): - pass - - for host in hosts: - if host.connect_to in callback_facts.keys() and is_installed_host(host, callback_facts): - installed_hosts.append(host) - return installed_hosts def is_installed_host(host, callback_facts): version_found = 'common' in callback_facts[host.connect_to].keys() and \ - callback_facts[host.connect_to]['common'].get('version', '') and \ - callback_facts[host.connect_to]['common'].get('version', '') != 'None' + callback_facts[host.connect_to]['common'].get('version', '') and \ + callback_facts[host.connect_to]['common'].get('version', '') != 'None' return version_found + # pylint: disable=too-many-branches # This pylint error will be corrected shortly in separate PR. def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose): @@ -675,16 +699,16 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose): hosts_to_run_on = list(oo_cfg.deployment.hosts) # Check if master or nodes already have something installed - installed_hosts = get_installed_hosts(oo_cfg.deployment.hosts, callback_facts) + installed_hosts, uninstalled_hosts = get_installed_hosts(oo_cfg.deployment.hosts, callback_facts) if len(installed_hosts) > 0: click.echo('Installed environment detected.') # This check has to happen before we start removing hosts later in this method if not force: if not unattended: - click.echo('By default the installer only adds new nodes ' \ + click.echo('By default the installer only adds new nodes ' 'to an installed environment.') - response = click.prompt('Do you want to (1) only add additional nodes or ' \ - '(2) reinstall the existing hosts ' \ + response = click.prompt('Do you want to (1) only add additional nodes or ' + '(2) reinstall the existing hosts ' 'potentially erasing any custom changes?', type=int) # TODO: this should be reworked with error handling. @@ -698,29 +722,31 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose): # present a message listing already installed hosts and remove hosts if needed for host in installed_hosts: if host.is_master(): - click.echo("{} is already an OpenShift Master".format(host)) + click.echo("{} is already an OpenShift master".format(host)) # Masters stay in the list, we need to run against them when adding # new nodes. elif host.is_node(): - click.echo("{} is already an OpenShift Node".format(host)) + click.echo("{} is already an OpenShift node".format(host)) # force is only used for reinstalls so we don't want to remove # anything. if not force: hosts_to_run_on.remove(host) # Handle the cases where we know about uninstalled systems - new_hosts = set(hosts_to_run_on) - set(installed_hosts) - if len(new_hosts) > 0: - for new_host in new_hosts: - click.echo("{} is currently uninstalled".format(new_host)) - + if len(uninstalled_hosts) > 0: + for uninstalled_host in uninstalled_hosts: + click.echo("{} is currently uninstalled".format(uninstalled_host)) # Fall through - click.echo('Adding additional nodes...') + click.echo('\nUninstalled hosts have been detected in your environment. ' + 'Please make sure your environment was installed successfully ' + 'before adding new nodes. If you want a fresh install, use ' + '`atomic-openshift-installer install --force`') + sys.exit(1) else: if unattended: if not force: - click.echo('Installed environment detected and no additional ' \ - 'nodes specified: aborting. If you want a fresh install, use ' \ + click.echo('Installed environment detected and no additional ' + 'nodes specified: aborting. If you want a fresh install, use ' '`atomic-openshift-installer install --force`') sys.exit(1) else: @@ -734,24 +760,35 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose): click.echo('Gathering information from hosts...') callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts, verbose) if error or callback_facts is None: - click.echo("There was a problem fetching the required information. See " \ + click.echo("There was a problem fetching the required information. See " "{} for details.".format(oo_cfg.settings['ansible_log_path'])) sys.exit(1) else: - pass # proceeding as normal should do a clean install + pass # proceeding as normal should do a clean install return hosts_to_run_on, callback_facts +def set_infra_nodes(hosts): + if all(host.is_master() for host in hosts): + infra_list = hosts + else: + nodes_list = [host for host in hosts if host.is_node()] + infra_list = nodes_list[:2] + + for host in infra_list: + host.node_labels = "{'region': 'infra'}" + + @click.group() @click.pass_context @click.option('--unattended', '-u', is_flag=True, default=False) @click.option('--configuration', '-c', - type=click.Path(file_okay=True, - dir_okay=False, - writable=True, - readable=True), - default=None) + type=click.Path(file_okay=True, + dir_okay=False, + writable=True, + readable=True), + default=None) @click.option('--ansible-playbook-directory', '-a', type=click.Path(exists=True, @@ -762,23 +799,27 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose): default=DEFAULT_PLAYBOOK_DIR, envvar='OO_ANSIBLE_PLAYBOOK_DIRECTORY') @click.option('--ansible-config', - type=click.Path(file_okay=True, - dir_okay=False, - writable=True, - readable=True), - default=None) + type=click.Path(file_okay=True, + dir_okay=False, + writable=True, + readable=True), + default=None) @click.option('--ansible-log-path', - type=click.Path(file_okay=True, - dir_okay=False, - writable=True, - readable=True), - default="/tmp/ansible.log") + type=click.Path(file_okay=True, + dir_okay=False, + writable=True, + readable=True), + default="/tmp/ansible.log") @click.option('-v', '--verbose', - is_flag=True, default=False) -#pylint: disable=too-many-arguments -#pylint: disable=line-too-long + is_flag=True, default=False) +@click.option('-d', '--debug', + help="Enable installer debugging (/tmp/installer.log)", + is_flag=True, default=False) +@click.help_option('--help', '-h') +# pylint: disable=too-many-arguments +# pylint: disable=line-too-long # Main CLI entrypoint, not much we can do about too many arguments. -def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_config, ansible_log_path, verbose): +def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_config, ansible_log_path, verbose, debug): """ atomic-openshift-installer makes the process for installing OSE or AEP easier by interactively gathering the data needed to run on each host. @@ -786,6 +827,14 @@ def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_conf Further reading: https://docs.openshift.com/enterprise/latest/install_config/install/quick_install.html """ + if debug: + # DEFAULT log level threshold is set to CRITICAL (the + # highest), anything below that (we only use debug/warning + # presently) is not logged. If '-d' is given though, we'll + # lower the threshold to debug (almost everything gets through) + installer_log.setLevel(logging.DEBUG) + installer_log.debug("Quick Installer debugging initialized") + ctx.obj = {} ctx.obj['unattended'] = unattended ctx.obj['configuration'] = configuration @@ -813,7 +862,7 @@ def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_conf if ctx.obj['ansible_config']: oo_cfg.settings['ansible_config'] = ctx.obj['ansible_config'] elif 'ansible_config' not in oo_cfg.settings and \ - os.path.exists(DEFAULT_ANSIBLE_CONFIG): + os.path.exists(DEFAULT_ANSIBLE_CONFIG): # If we're installed by RPM this file should exist and we can use it as our default: oo_cfg.settings['ansible_config'] = DEFAULT_ANSIBLE_CONFIG @@ -829,46 +878,34 @@ def uninstall(ctx): oo_cfg = ctx.obj['oo_cfg'] verbose = ctx.obj['verbose'] - if len(oo_cfg.deployment.hosts) == 0: + if hasattr(oo_cfg, 'deployment'): + hosts = oo_cfg.deployment.hosts + elif hasattr(oo_cfg, 'hosts'): + hosts = oo_cfg.hosts + else: click.echo("No hosts defined in: %s" % oo_cfg.config_path) sys.exit(1) click.echo("OpenShift will be uninstalled from the following hosts:\n") if not ctx.obj['unattended']: # Prompt interactively to confirm: - for host in oo_cfg.deployment.hosts: + for host in hosts: click.echo(" * %s" % host.connect_to) - proceed = click.confirm("\nDo you wish to proceed?") + proceed = click.confirm("\nDo you want to proceed?") if not proceed: click.echo("Uninstall cancelled.") sys.exit(0) - openshift_ansible.run_uninstall_playbook(verbose) + openshift_ansible.run_uninstall_playbook(hosts, verbose) @click.command() @click.option('--latest-minor', '-l', is_flag=True, default=False) @click.option('--next-major', '-n', is_flag=True, default=False) @click.pass_context -#pylint: disable=bad-builtin,too-many-statements +# pylint: disable=too-many-statements def upgrade(ctx, latest_minor, next_major): oo_cfg = ctx.obj['oo_cfg'] - verbose = ctx.obj['verbose'] - - # major/minor fields are optional, as we don't always support minor/major - # upgrade for what you're currently running. - upgrade_mappings = { - '3.1':{ - 'major_playbook':'v3_2/upgrade.yml', - 'major_version' :'3.2', - }, - '3.2':{ - 'minor_playbook':'v3_2/upgrade.yml', -# Uncomment these when we're ready to support 3.3. -# 'major_version' :'3.3', -# 'major_playbook':'v3_1_to_v3_2/upgrade.yml', - }, - } if len(oo_cfg.deployment.hosts) == 0: click.echo("No hosts defined in: %s" % oo_cfg.config_path) @@ -880,7 +917,7 @@ def upgrade(ctx, latest_minor, next_major): sys.exit(0) old_version = oo_cfg.settings['variant_version'] - mapping = upgrade_mappings.get(old_version) + mapping = UPGRADE_MAPPINGS.get(old_version) message = """ This tool will help you upgrade your existing OpenShift installation. @@ -909,41 +946,42 @@ def upgrade(ctx, latest_minor, next_major): if next_major: if 'major_playbook' not in mapping: - click.echo("No major upgrade supported for %s %s with this version "\ + click.echo("No major upgrade supported for %s %s with this version " "of atomic-openshift-utils." % (variant, old_version)) sys.exit(0) playbook = mapping['major_playbook'] new_version = mapping['major_version'] # Update config to reflect the version we're targetting, we'll write - # to disk once ansible completes successfully, not before. + # to disk once Ansible completes successfully, not before. oo_cfg.settings['variant_version'] = new_version if oo_cfg.settings['variant'] == 'enterprise': oo_cfg.settings['variant'] = 'openshift-enterprise' if latest_minor: if 'minor_playbook' not in mapping: - click.echo("No minor upgrade supported for %s %s with this version "\ + click.echo("No minor upgrade supported for %s %s with this version " "of atomic-openshift-utils." % (variant, old_version)) sys.exit(0) playbook = mapping['minor_playbook'] new_version = old_version - click.echo("Openshift will be upgraded from %s %s to latest %s %s on the following hosts:\n" % ( + click.echo("OpenShift will be upgraded from %s %s to latest %s %s on the following hosts:\n" % ( variant, old_version, oo_cfg.settings['variant'], new_version)) for host in oo_cfg.deployment.hosts: click.echo(" * %s" % host.connect_to) if not ctx.obj['unattended']: # Prompt interactively to confirm: - proceed = click.confirm("\nDo you wish to proceed?") - if not proceed: + if not click.confirm("\nDo you want to proceed?"): click.echo("Upgrade cancelled.") sys.exit(0) - retcode = openshift_ansible.run_upgrade_playbook(playbook, verbose) + retcode = openshift_ansible.run_upgrade_playbook(oo_cfg.deployment.hosts, + playbook, + ctx.obj['verbose']) if retcode > 0: click.echo("Errors encountered during upgrade, please check %s." % - oo_cfg.settings['ansible_log_path']) + oo_cfg.settings['ansible_log_path']) else: oo_cfg.save_to_disk() click.echo("Upgrade completed! Rebooting all hosts is recommended.") @@ -952,7 +990,7 @@ def upgrade(ctx, latest_minor, next_major): @click.command() @click.option('--force', '-f', is_flag=True, default=False) @click.option('--gen-inventory', is_flag=True, default=False, - help="Generate an ansible inventory file and exit.") + help="Generate an Ansible inventory file and exit.") @click.pass_context def install(ctx, force, gen_inventory): oo_cfg = ctx.obj['oo_cfg'] @@ -968,17 +1006,16 @@ def install(ctx, force, gen_inventory): print_installation_summary(oo_cfg.deployment.hosts, oo_cfg.settings.get('variant_version', None)) click.echo('Gathering information from hosts...') callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts, - verbose) + verbose) if error or callback_facts is None: - click.echo("There was a problem fetching the required information. " \ + click.echo("There was a problem fetching the required information. " "Please see {} for details.".format(oo_cfg.settings['ansible_log_path'])) sys.exit(1) hosts_to_run_on, callback_facts = get_hosts_to_run_on( oo_cfg, callback_facts, ctx.obj['unattended'], force, verbose) - # We already verified this is not the case for unattended installs, so this can # only trigger for live CLI users: # TODO: if there are *new* nodes and this is a live install, we may need the user @@ -990,12 +1027,12 @@ def install(ctx, force, gen_inventory): # Write quick installer config file to disk: oo_cfg.save_to_disk() - # Write ansible inventory file to disk: + # Write Ansible inventory file to disk: inventory_file = openshift_ansible.generate_inventory(hosts_to_run_on) click.echo() click.echo('Wrote atomic-openshift-installer config: %s' % oo_cfg.config_path) - click.echo("Wrote ansible inventory: %s" % inventory_file) + click.echo("Wrote Ansible inventory: %s" % inventory_file) click.echo() if gen_inventory: @@ -1014,7 +1051,7 @@ If changes are needed please edit the config file above and re-run. if error: # The bootstrap script will print out the log location. message = """ -An error was detected. After resolving the problem please relaunch the +An error was detected. After resolving the problem please relaunch the installation process. """ click.echo(message) @@ -1024,7 +1061,7 @@ installation process. The installation was successful! If this is your first time installing please take a look at the Administrator -Guide for advanced options related to routing, storage, authentication and much +Guide for advanced options related to routing, storage, authentication, and more: http://docs.openshift.com/enterprise/latest/admin_guide/overview.html diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py index fc06a0c4a..393b36f6f 100644 --- a/utils/src/ooinstall/oo_config.py +++ b/utils/src/ooinstall/oo_config.py @@ -2,9 +2,13 @@ import os import sys +import logging import yaml from pkg_resources import resource_filename + +installer_log = logging.getLogger('installer') + CONFIG_PERSIST_SETTINGS = [ 'ansible_ssh_user', 'ansible_callback_facts_yaml', @@ -14,19 +18,29 @@ CONFIG_PERSIST_SETTINGS = [ 'deployment', 'version', 'variant', + 'variant_subtype', 'variant_version', - ] +] -DEPLOYMENT_PERSIST_SETTINGS = [ - 'master_routingconfig_subdomain', - 'proxy_http', - 'proxy_https', - 'proxy_exclude_hosts', +DEPLOYMENT_VARIABLES_BLACKLIST = [ + 'hosts', + 'roles', ] DEFAULT_REQUIRED_FACTS = ['ip', 'public_ip', 'hostname', 'public_hostname'] PRECONFIGURED_REQUIRED_FACTS = ['hostname', 'public_hostname'] + +def print_read_config_error(error, path='the configuration file'): + message = """ +Error loading config. {}. + +See https://docs.openshift.com/enterprise/latest/install_config/install/quick_install.html#defining-an-installation-configuration-file +for information on creating a configuration file or delete {} and re-run the installer. +""" + print message.format(error, path) + + class OOConfigFileError(Exception): """The provided config file path can't be read/written """ @@ -56,6 +70,8 @@ class Host(object): # allowable roles: master, node, etcd, storage, master_lb, new self.roles = kwargs.get('roles', []) + self.other_variables = kwargs.get('other_variables', {}) + if self.connect_to is None: raise OOConfigInvalidHostError( "You must specify either an ip or hostname as 'connect_to'") @@ -71,7 +87,8 @@ class Host(object): d = {} for prop in ['ip', 'hostname', 'public_ip', 'public_hostname', 'connect_to', - 'preconfigured', 'containerized', 'schedulable', 'roles', 'node_labels']: + 'preconfigured', 'containerized', 'schedulable', 'roles', 'node_labels', + 'other_variables']: # If the property is defined (not None or False), export it: if getattr(self, prop): d[prop] = getattr(self, prop) @@ -89,7 +106,6 @@ class Host(object): def is_storage(self): return 'storage' in self.roles - def is_etcd_member(self, all_hosts): """ Will this host be a member of a standalone etcd cluster. """ if not self.is_master(): @@ -163,35 +179,60 @@ class OOConfig(object): self._read_config() self._set_defaults() - + # pylint: disable=too-many-branches + # Lots of different checks ran in a single method, could + # use a little refactoring-love some time def _read_config(self): + installer_log.debug("Attempting to read the OO Config") try: + installer_log.debug("Attempting to see if the provided config file exists: %s", self.config_path) if os.path.exists(self.config_path): + installer_log.debug("We think the config file exists: %s", self.config_path) with open(self.config_path, 'r') as cfgfile: loaded_config = yaml.safe_load(cfgfile.read()) - # Use the presence of a Description as an indicator this is - # a legacy config file: - if 'Description' in self.settings: - self._upgrade_legacy_config() + if 'version' not in loaded_config: + print_read_config_error('Legacy configuration file found', self.config_path) + sys.exit(0) + + if loaded_config.get('version', '') == 'v1': + loaded_config = self._upgrade_v1_config(loaded_config) try: host_list = loaded_config['deployment']['hosts'] role_list = loaded_config['deployment']['roles'] except KeyError as e: - print "Error loading config, no such key: {}".format(e) + print_read_config_error("No such key: {}".format(e), self.config_path) + print "Error loading config, required key missing: {}".format(e) sys.exit(0) for setting in CONFIG_PERSIST_SETTINGS: + persisted_value = loaded_config.get(setting) + if persisted_value is not None: + self.settings[setting] = str(persisted_value) + + # We've loaded any persisted configs, let's verify any + # paths which are required for a correct and complete + # install + + # - ansible_callback_facts_yaml - Settings from a + # pervious run. If the file doesn't exist then we + # will just warn about it for now and recollect the + # facts. + if self.settings.get('ansible_callback_facts_yaml', None) is not None: + if not os.path.exists(self.settings['ansible_callback_facts_yaml']): + # Cached callback facts file does not exist + installer_log.warning("The specified 'ansible_callback_facts_yaml'" + "file does not exist (%s)", + self.settings['ansible_callback_facts_yaml']) + installer_log.debug("Remote system facts will be collected again later") + self.settings.pop('ansible_callback_facts_yaml') + + for setting in loaded_config['deployment']: try: - self.settings[setting] = str(loaded_config[setting]) - except KeyError: - continue - - for setting in DEPLOYMENT_PERSIST_SETTINGS: - try: - self.deployment.variables[setting] = \ - str(loaded_config['deployment'][setting]) + if setting not in DEPLOYMENT_VARIABLES_BLACKLIST: + self.deployment.variables[setting] = \ + str(loaded_config['deployment'][setting]) except KeyError: continue @@ -203,47 +244,67 @@ class OOConfig(object): for name, variables in role_list.iteritems(): self.deployment.roles.update({name: Role(name, variables)}) - except IOError, ferr: raise OOConfigFileError('Cannot open config file "{}": {}'.format(ferr.filename, ferr.strerror)) except yaml.scanner.ScannerError: raise OOConfigFileError( 'Config file "{}" is not a valid YAML document'.format(self.config_path)) + installer_log.debug("Parsed the config file") + + def _upgrade_v1_config(self, config): + new_config_data = {} + new_config_data['deployment'] = {} + new_config_data['deployment']['hosts'] = [] + new_config_data['deployment']['roles'] = {} + new_config_data['deployment']['variables'] = {} + + role_list = {} - def _upgrade_legacy_config(self): - new_hosts = [] - remove_settings = ['validated_facts', 'Description', 'Name', - 'Subscription', 'Vendor', 'Version', 'masters', 'nodes'] - - if 'validated_facts' in self.settings: - for key, value in self.settings['validated_facts'].iteritems(): - value['connect_to'] = key - if 'masters' in self.settings and key in self.settings['masters']: - value['master'] = True - if 'nodes' in self.settings and key in self.settings['nodes']: - value['node'] = True - new_hosts.append(value) - self.settings['hosts'] = new_hosts - - for s in remove_settings: - if s in self.settings: - del self.settings[s] - - # A legacy config implies openshift-enterprise 3.0: - self.settings['variant'] = 'openshift-enterprise' - self.settings['variant_version'] = '3.0' - - def _upgrade_v1_config(self): - #TODO write code to upgrade old config - return + if config.get('ansible_ssh_user', False): + new_config_data['deployment']['ansible_ssh_user'] = config['ansible_ssh_user'] + + if config.get('variant', False): + new_config_data['variant'] = config['variant'] + + if config.get('variant_version', False): + new_config_data['variant_version'] = config['variant_version'] + + for host in config['hosts']: + host_props = {} + host_props['roles'] = [] + host_props['connect_to'] = host['connect_to'] + + for prop in ['ip', 'public_ip', 'hostname', 'public_hostname', 'containerized', 'preconfigured']: + host_props[prop] = host.get(prop, None) + + for role in ['master', 'node', 'master_lb', 'storage', 'etcd']: + if host.get(role, False): + host_props['roles'].append(role) + role_list[role] = '' + + new_config_data['deployment']['hosts'].append(host_props) + + new_config_data['deployment']['roles'] = role_list + + return new_config_data def _set_defaults(self): + installer_log.debug("Setting defaults, current OOConfig settings: %s", self.settings) if 'ansible_inventory_directory' not in self.settings: self.settings['ansible_inventory_directory'] = self._default_ansible_inv_dir() + if not os.path.exists(self.settings['ansible_inventory_directory']): + installer_log.debug("'ansible_inventory_directory' does not exist, " + "creating it now (%s)", + self.settings['ansible_inventory_directory']) os.makedirs(self.settings['ansible_inventory_directory']) + else: + installer_log.debug("We think this 'ansible_inventory_directory' " + "is OK: %s", + self.settings['ansible_inventory_directory']) + if 'ansible_plugins_directory' not in self.settings: self.settings['ansible_plugins_directory'] = \ resource_filename(__name__, 'ansible_plugins') @@ -251,8 +312,14 @@ class OOConfig(object): self.settings['version'] = 'v2' if 'ansible_callback_facts_yaml' not in self.settings: + installer_log.debug("No 'ansible_callback_facts_yaml' in self.settings") self.settings['ansible_callback_facts_yaml'] = '%s/callback_facts.yaml' % \ self.settings['ansible_inventory_directory'] + installer_log.debug("Value: %s", self.settings['ansible_callback_facts_yaml']) + else: + installer_log.debug("'ansible_callback_facts_yaml' already set " + "in self.settings: %s", + self.settings['ansible_callback_facts_yaml']) if 'ansible_ssh_user' not in self.settings: self.settings['ansible_ssh_user'] = '' @@ -260,11 +327,17 @@ class OOConfig(object): self.settings['ansible_inventory_path'] = \ '{}/hosts'.format(os.path.dirname(self.config_path)) + # pylint: disable=consider-iterating-dictionary + # Disabled because we shouldn't alter the container we're + # iterating over + # # clean up any empty sets for setting in self.settings.keys(): if not self.settings[setting]: self.settings.pop(setting) + installer_log.debug("Updated OOConfig settings: %s", self.settings) + def _default_ansible_inv_dir(self): return os.path.normpath( os.path.dirname(self.config_path) + "/.ansible") @@ -303,21 +376,20 @@ class OOConfig(object): if setting in self.settings and self.settings[setting]: p_settings[setting] = self.settings[setting] - p_settings['deployment'] = {} p_settings['deployment']['hosts'] = [] p_settings['deployment']['roles'] = {} - for setting in DEPLOYMENT_PERSIST_SETTINGS: - if setting in self.deployment.variables: - p_settings['deployment'][setting] = self.deployment.variables[setting] - for host in self.deployment.hosts: p_settings['deployment']['hosts'].append(host.to_dict()) for name, role in self.deployment.roles.iteritems(): p_settings['deployment']['roles'][name] = role.variables + for setting in self.deployment.variables: + if setting not in DEPLOYMENT_VARIABLES_BLACKLIST: + p_settings['deployment'][setting] = self.deployment.variables[setting] + try: p_settings['variant'] = self.settings['variant'] p_settings['variant_version'] = self.settings['variant_version'] @@ -328,7 +400,6 @@ class OOConfig(object): print "Error persisting settings: {}".format(e) sys.exit(0) - return p_settings def yaml(self): diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index 352955026..75d26c10a 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -1,124 +1,90 @@ -# TODO: Temporarily disabled due to importing old code into openshift-ansible -# repo. We will work on these over time. # pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,global-statement,global-variable-not-assigned import socket import subprocess import sys import os +import logging import yaml from ooinstall.variants import find_variant +installer_log = logging.getLogger('installer') + CFG = None ROLES_TO_GROUPS_MAP = { 'master': 'masters', 'node': 'nodes', - 'storage': 'nfs' + 'etcd': 'etcd', + 'storage': 'nfs', + 'master_lb': 'lb' } VARIABLES_MAP = { 'ansible_ssh_user': 'ansible_ssh_user', - 'ansible_config': 'ansible_config', - 'ansible_log_path': 'ansible_log_path', 'deployment_type': 'deployment_type', - 'master_routingconfig_subdomain':'openshift_master_default_subdomain', - 'proxy_http':'openshift_http_proxy', + 'variant_subtype': 'deployment_subtype', + 'master_routingconfig_subdomain': 'openshift_master_default_subdomain', + 'proxy_http': 'openshift_http_proxy', 'proxy_https': 'openshift_https_proxy', 'proxy_exclude_hosts': 'openshift_no_proxy', } + def set_config(cfg): global CFG CFG = cfg + def generate_inventory(hosts): global CFG + masters = [host for host in hosts if host.is_master()] - nodes = [host for host in hosts if host.is_node()] - new_nodes = [host for host in hosts if host.is_node() and host.new_host] - proxy = determine_proxy_configuration(hosts) - storage = determine_storage_configuration(hosts) multiple_masters = len(masters) > 1 + + new_nodes = [host for host in hosts if host.is_node() and host.new_host] scaleup = len(new_nodes) > 0 + lb = determine_lb_configuration(hosts) + base_inventory_path = CFG.settings['ansible_inventory_path'] base_inventory = open(base_inventory_path, 'w') - write_inventory_children(base_inventory, multiple_masters, proxy, scaleup) - - write_inventory_vars(base_inventory, multiple_masters, proxy) - - # Find the correct deployment type for ansible: - ver = find_variant(CFG.settings['variant'], - version=CFG.settings.get('variant_version', None))[1] - base_inventory.write('deployment_type={}\n'.format(ver.ansible_key)) - - if 'OO_INSTALL_ADDITIONAL_REGISTRIES' in os.environ: - base_inventory.write('openshift_docker_additional_registries={}\n' - .format(os.environ['OO_INSTALL_ADDITIONAL_REGISTRIES'])) - if 'OO_INSTALL_INSECURE_REGISTRIES' in os.environ: - base_inventory.write('openshift_docker_insecure_registries={}\n' - .format(os.environ['OO_INSTALL_INSECURE_REGISTRIES'])) - if 'OO_INSTALL_PUDDLE_REPO' in os.environ: - # We have to double the '{' here for literals - base_inventory.write("openshift_additional_repos=[{{'id': 'ose-devel', " - "'name': 'ose-devel', " - "'baseurl': '{}', " - "'enabled': 1, 'gpgcheck': 0}}]\n".format(os.environ['OO_INSTALL_PUDDLE_REPO'])) - - base_inventory.write('\n[masters]\n') - for master in masters: - write_host(master, base_inventory) - - if len(masters) > 1: - base_inventory.write('\n[etcd]\n') - for master in masters: - write_host(master, base_inventory) + write_inventory_children(base_inventory, scaleup) - base_inventory.write('\n[nodes]\n') - - for node in nodes: - # Let the fact defaults decide if we're not a master: - schedulable = None - - # If the node is also a master, we must explicitly set schedulablity: - if node.is_master(): - schedulable = node.is_schedulable_node(hosts) - write_host(node, base_inventory, schedulable) - - if not getattr(proxy, 'preconfigured', True): - base_inventory.write('\n[lb]\n') - write_host(proxy, base_inventory) + write_inventory_vars(base_inventory, multiple_masters, lb) + # write_inventory_hosts + for role in CFG.deployment.roles: + # write group block + group = ROLES_TO_GROUPS_MAP.get(role, role) + base_inventory.write("\n[{}]\n".format(group)) + # write each host + group_hosts = [host for host in hosts if role in host.roles] + for host in group_hosts: + schedulable = host.is_schedulable_node(hosts) + write_host(host, role, base_inventory, schedulable) if scaleup: base_inventory.write('\n[new_nodes]\n') for node in new_nodes: - write_host(node, base_inventory) - - if storage: - base_inventory.write('\n[nfs]\n') - write_host(storage, base_inventory) + write_host(node, 'new_nodes', base_inventory) base_inventory.close() return base_inventory_path -def determine_proxy_configuration(hosts): - proxy = next((host for host in hosts if host.is_master_lb()), None) - if proxy: - if proxy.hostname == None: - proxy.hostname = proxy.connect_to - proxy.public_hostname = proxy.connect_to - return proxy +def determine_lb_configuration(hosts): + lb = next((host for host in hosts if host.is_master_lb()), None) + if lb: + if lb.hostname is None: + lb.hostname = lb.connect_to + lb.public_hostname = lb.connect_to -def determine_storage_configuration(hosts): - storage = next((host for host in hosts if host.is_storage()), None) + return lb - return storage -def write_inventory_children(base_inventory, multiple_masters, proxy, scaleup): +def write_inventory_children(base_inventory, scaleup): global CFG base_inventory.write('\n[OSEv3:children]\n') @@ -128,12 +94,10 @@ def write_inventory_children(base_inventory, multiple_masters, proxy, scaleup): if scaleup: base_inventory.write('new_nodes\n') - if multiple_masters: - base_inventory.write('etcd\n') - if not getattr(proxy, 'preconfigured', True): - base_inventory.write('lb\n') -def write_inventory_vars(base_inventory, multiple_masters, proxy): + +# pylint: disable=too-many-branches +def write_inventory_vars(base_inventory, multiple_masters, lb): global CFG base_inventory.write('\n[OSEv3:vars]\n') @@ -147,21 +111,41 @@ def write_inventory_vars(base_inventory, multiple_masters, proxy): if value: base_inventory.write('{}={}\n'.format(inventory_var, value)) - if CFG.settings['ansible_ssh_user'] != 'root': + if CFG.deployment.variables['ansible_ssh_user'] != 'root': base_inventory.write('ansible_become=yes\n') - if multiple_masters and proxy is not None: + if multiple_masters and lb is not None: base_inventory.write('openshift_master_cluster_method=native\n') - base_inventory.write("openshift_master_cluster_hostname={}\n".format(proxy.hostname)) + base_inventory.write("openshift_master_cluster_hostname={}\n".format(lb.hostname)) base_inventory.write( - "openshift_master_cluster_public_hostname={}\n".format(proxy.public_hostname)) + "openshift_master_cluster_public_hostname={}\n".format(lb.public_hostname)) if CFG.settings.get('variant_version', None) == '3.1': - #base_inventory.write('openshift_image_tag=v{}\n'.format(CFG.settings.get('variant_version'))) + # base_inventory.write('openshift_image_tag=v{}\n'.format(CFG.settings.get('variant_version'))) base_inventory.write('openshift_image_tag=v{}\n'.format('3.1.1.6')) write_proxy_settings(base_inventory) + # Find the correct deployment type for ansible: + ver = find_variant(CFG.settings['variant'], + version=CFG.settings.get('variant_version', None))[1] + base_inventory.write('deployment_type={}\n'.format(ver.ansible_key)) + if getattr(ver, 'variant_subtype', False): + base_inventory.write('deployment_subtype={}\n'.format(ver.deployment_subtype)) + + if 'OO_INSTALL_ADDITIONAL_REGISTRIES' in os.environ: + base_inventory.write('openshift_docker_additional_registries={}\n'.format( + os.environ['OO_INSTALL_ADDITIONAL_REGISTRIES'])) + if 'OO_INSTALL_INSECURE_REGISTRIES' in os.environ: + base_inventory.write('openshift_docker_insecure_registries={}\n'.format( + os.environ['OO_INSTALL_INSECURE_REGISTRIES'])) + if 'OO_INSTALL_PUDDLE_REPO' in os.environ: + # We have to double the '{' here for literals + base_inventory.write("openshift_additional_repos=[{{'id': 'ose-devel', " + "'name': 'ose-devel', " + "'baseurl': '{}', " + "'enabled': 1, 'gpgcheck': 0}}]\n".format(os.environ['OO_INSTALL_PUDDLE_REPO'])) + for name, role_obj in CFG.deployment.roles.iteritems(): if role_obj.variables: group_name = ROLES_TO_GROUPS_MAP.get(name, name) @@ -176,24 +160,28 @@ def write_inventory_vars(base_inventory, multiple_masters, proxy): def write_proxy_settings(base_inventory): try: base_inventory.write("openshift_http_proxy={}\n".format( - CFG.settings['openshift_http_proxy'])) + CFG.settings['openshift_http_proxy'])) except KeyError: pass try: base_inventory.write("openshift_https_proxy={}\n".format( - CFG.settings['openshift_https_proxy'])) + CFG.settings['openshift_https_proxy'])) except KeyError: pass try: base_inventory.write("openshift_no_proxy={}\n".format( - CFG.settings['openshift_no_proxy'])) + CFG.settings['openshift_no_proxy'])) except KeyError: pass -def write_host(host, inventory, schedulable=None): +# pylint: disable=too-many-branches +def write_host(host, role, inventory, schedulable=None): global CFG + if host.preconfigured: + return + facts = '' if host.ip: facts += ' openshift_ip={}'.format(host.ip) @@ -205,15 +193,19 @@ def write_host(host, inventory, schedulable=None): facts += ' openshift_public_hostname={}'.format(host.public_hostname) if host.containerized: facts += ' containerized={}'.format(host.containerized) + if host.other_variables: + for variable, value in host.other_variables.iteritems(): + facts += " {}={}".format(variable, value) + if host.node_labels: + if role == 'node': + facts += ' openshift_node_labels="{}"'.format(host.node_labels) # Distinguish between three states, no schedulability specified (use default), # explicitly set to True, or explicitly set to False: - if schedulable is None: + if role != 'node' or schedulable is None: pass - elif schedulable: - facts += ' openshift_schedulable=True' - elif not schedulable: - facts += ' openshift_schedulable=False' + else: + facts += " openshift_schedulable={}".format(schedulable) installer_host = socket.gethostname() if installer_host in [host.connect_to, host.hostname, host.public_hostname]: @@ -232,17 +224,21 @@ def load_system_facts(inventory_file, os_facts_path, env_vars, verbose=False): """ Retrieves system facts from the remote systems. """ + installer_log.debug("Inside load_system_facts") FNULL = open(os.devnull, 'w') args = ['ansible-playbook', '-v'] if verbose \ else ['ansible-playbook'] args.extend([ '--inventory-file={}'.format(inventory_file), os_facts_path]) + installer_log.debug("Going to subprocess out to ansible now with these args: %s", ' '.join(args)) status = subprocess.call(args, env=env_vars, stdout=FNULL) - if not status == 0: + if status != 0: + installer_log.debug("Exit status from subprocess was not 0") return [], 1 with open(CFG.settings['ansible_callback_facts_yaml'], 'r') as callback_facts_file: + installer_log.debug("Going to try to read this file: %s", CFG.settings['ansible_callback_facts_yaml']) try: callback_facts = yaml.safe_load(callback_facts_file) except yaml.YAMLError, exc: @@ -255,6 +251,7 @@ def load_system_facts(inventory_file, os_facts_path, env_vars, verbose=False): def default_facts(hosts, verbose=False): global CFG + installer_log.debug("Current global CFG vars here: %s", CFG) inventory_file = generate_inventory(hosts) os_facts_path = '{}/playbooks/byo/openshift_facts.yml'.format(CFG.ansible_playbook_directory) @@ -266,6 +263,9 @@ def default_facts(hosts, verbose=False): facts_env["ANSIBLE_LOG_PATH"] = CFG.settings['ansible_log_path'] if 'ansible_config' in CFG.settings: facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config'] + + installer_log.debug("facts_env: %s", facts_env) + installer_log.debug("Going to 'load_system_facts' next") return load_system_facts(inventory_file, os_facts_path, facts_env, verbose) @@ -294,10 +294,10 @@ def run_ansible(playbook, inventory, env_vars, verbose=False): return subprocess.call(args, env=env_vars) -def run_uninstall_playbook(verbose=False): +def run_uninstall_playbook(hosts, verbose=False): playbook = os.path.join(CFG.settings['ansible_playbook_directory'], - 'playbooks/adhoc/uninstall.yml') - inventory_file = generate_inventory(CFG.hosts) + 'playbooks/adhoc/uninstall.yml') + inventory_file = generate_inventory(hosts) facts_env = os.environ.copy() if 'ansible_log_path' in CFG.settings: facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path'] @@ -306,12 +306,12 @@ def run_uninstall_playbook(verbose=False): return run_ansible(playbook, inventory_file, facts_env, verbose) -def run_upgrade_playbook(playbook, verbose=False): +def run_upgrade_playbook(hosts, playbook, verbose=False): playbook = os.path.join(CFG.settings['ansible_playbook_directory'], - 'playbooks/byo/openshift-cluster/upgrades/{}'.format(playbook)) + 'playbooks/byo/openshift-cluster/upgrades/{}'.format(playbook)) # TODO: Upgrade inventory for upgrade? - inventory_file = generate_inventory(CFG.hosts) + inventory_file = generate_inventory(hosts) facts_env = os.environ.copy() if 'ansible_log_path' in CFG.settings: facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path'] diff --git a/utils/src/ooinstall/variants.py b/utils/src/ooinstall/variants.py index 8889e42e6..6993794fe 100644 --- a/utils/src/ooinstall/variants.py +++ b/utils/src/ooinstall/variants.py @@ -11,12 +11,16 @@ to be specified by the user, and to point the generic variants to the latest version. """ +import logging +installer_log = logging.getLogger('installer') + class Version(object): - def __init__(self, name, ansible_key): + def __init__(self, name, ansible_key, subtype=''): self.name = name # i.e. 3.0, 3.1 self.ansible_key = ansible_key + self.subtype = subtype class Variant(object): @@ -34,30 +38,36 @@ class Variant(object): # WARNING: Keep the versions ordered, most recent first: -OSE = Variant('openshift-enterprise', 'OpenShift Enterprise', - [ - Version('3.2', 'openshift-enterprise'), - Version('3.1', 'openshift-enterprise'), - Version('3.0', 'enterprise') - ] +OSE = Variant('openshift-enterprise', 'OpenShift Container Platform', + [ + Version('3.3', 'openshift-enterprise'), + ] ) -AEP = Variant('atomic-enterprise', 'Atomic Enterprise Platform', - [ - Version('3.2', 'atomic-enterprise'), - Version('3.1', 'atomic-enterprise') - ] +REG = Variant('openshift-enterprise', 'Registry', + [ + Version('3.3', 'openshift-enterprise', 'registry'), + ] ) origin = Variant('origin', 'OpenShift Origin', - [ - Version('1.2', 'origin'), - ] + [ + Version('1.2', 'origin'), + ] +) + +LEGACY = Variant('openshift-enterprise', 'OpenShift Container Platform', + [ + Version('3.2', 'openshift-enterprise'), + Version('3.1', 'openshift-enterprise'), + Version('3.0', 'openshift-enterprise'), + ] ) # Ordered list of variants we can install, first is the default. -SUPPORTED_VARIANTS = (OSE, AEP, origin) -DISPLAY_VARIANTS = (OSE, AEP) +SUPPORTED_VARIANTS = (OSE, REG, origin, LEGACY) +DISPLAY_VARIANTS = (OSE, REG,) + def find_variant(name, version=None): """ @@ -76,6 +86,7 @@ def find_variant(name, version=None): return (None, None) + def get_variant_version_combos(): combos = [] for variant in DISPLAY_VARIANTS: diff --git a/utils/test-requirements.txt b/utils/test-requirements.txt new file mode 100644 index 000000000..f2216a177 --- /dev/null +++ b/utils/test-requirements.txt @@ -0,0 +1,11 @@ +enum +configparser +pylint +pep8 +nose +coverage +mock +flake8 +PyYAML +click +backports.functools_lru_cache diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py index 13973f22f..6d9d443ff 100644 --- a/utils/test/cli_installer_tests.py +++ b/utils/test/cli_installer_tests.py @@ -101,8 +101,9 @@ MOCK_FACTS_QUICKHA = { # Missing connect_to on some hosts: BAD_CONFIG = """ variant: %s -ansible_ssh_user: root +version: v2 deployment: + ansible_ssh_user: root hosts: - connect_to: 10.0.0.1 ip: 10.0.0.1 @@ -132,8 +133,9 @@ deployment: QUICKHA_CONFIG = """ variant: %s -ansible_ssh_user: root +version: v2 deployment: + ansible_ssh_user: root hosts: - connect_to: 10.0.0.1 ip: 10.0.0.1 @@ -189,8 +191,9 @@ deployment: QUICKHA_2_MASTER_CONFIG = """ variant: %s -ansible_ssh_user: root +version: v2 deployment: + ansible_ssh_user: root hosts: - connect_to: 10.0.0.1 ip: 10.0.0.1 @@ -238,8 +241,9 @@ deployment: QUICKHA_CONFIG_REUSED_LB = """ variant: %s -ansible_ssh_user: root +version: v2 deployment: + ansible_ssh_user: root hosts: - connect_to: 10.0.0.1 ip: 10.0.0.1 @@ -281,8 +285,9 @@ deployment: QUICKHA_CONFIG_NO_LB = """ variant: %s -ansible_ssh_user: root +version: v2 deployment: + ansible_ssh_user: root hosts: - connect_to: 10.0.0.1 ip: 10.0.0.1 @@ -323,8 +328,9 @@ deployment: QUICKHA_CONFIG_PRECONFIGURED_LB = """ variant: %s -ansible_ssh_user: root +version: v2 deployment: + ansible_ssh_user: root hosts: - connect_to: 10.0.0.1 ip: 10.0.0.1 @@ -557,7 +563,7 @@ class UnattendedCliTests(OOCliFixture): self.assertEquals('openshift-enterprise', written_config['variant']) # We didn't specify a version so the latest should have been assumed, # and written to disk: - self.assertEquals('3.2', written_config['variant_version']) + self.assertEquals('3.3', written_config['variant_version']) # Make sure the correct value was passed to ansible: inventory = ConfigParser.ConfigParser(allow_no_value=True) @@ -573,7 +579,7 @@ class UnattendedCliTests(OOCliFixture): run_playbook_mock.return_value = 0 config = SAMPLE_CONFIG % 'openshift-enterprise' - config = '%s\n%s' % (config, 'variant_version: 3.0') + config = '%s\n%s' % (config, 'variant_version: 3.3') config_file = self.write_config(os.path.join(self.work_dir, 'ooinstall.conf'), config) @@ -586,11 +592,11 @@ class UnattendedCliTests(OOCliFixture): self.assertEquals('openshift-enterprise', written_config['variant']) # Make sure our older version was preserved: # and written to disk: - self.assertEquals('3.0', written_config['variant_version']) + self.assertEquals('3.3', written_config['variant_version']) inventory = ConfigParser.ConfigParser(allow_no_value=True) inventory.read(os.path.join(self.work_dir, 'hosts')) - self.assertEquals('enterprise', + self.assertEquals('openshift-enterprise', inventory.get('OSEv3:vars', 'deployment_type')) @patch('ooinstall.openshift_ansible.run_ansible') @@ -722,7 +728,7 @@ class UnattendedCliTests(OOCliFixture): # This is an invalid config: self.assert_result(result, 1) - self.assertTrue("A minimum of 3 Masters are required" in result.output) + self.assertTrue("A minimum of 3 masters are required" in result.output) #unattended with three masters, one node, but no load balancer specified: @patch('ooinstall.openshift_ansible.run_main_playbook') @@ -815,9 +821,9 @@ class AttendedCliTests(OOCliFixture): self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1', 'openshift_schedulable=False') self.assert_inventory_host_var_unset(inventory, 'nodes', '10.0.0.2', - 'openshift_schedulable') + 'openshift_schedulable=True') self.assert_inventory_host_var_unset(inventory, 'nodes', '10.0.0.3', - 'openshift_schedulable') + 'openshift_schedulable=True') # interactive with config file and some installed some uninstalled hosts @patch('ooinstall.openshift_ansible.run_main_playbook') @@ -939,7 +945,7 @@ class AttendedCliTests(OOCliFixture): self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.3', 'openshift_schedulable=False') self.assert_inventory_host_var_unset(inventory, 'nodes', '10.0.0.4', - 'openshift_schedulable') + 'openshift_schedulable=True') self.assertTrue(inventory.has_section('etcd')) self.assertEquals(3, len(inventory.items('etcd'))) @@ -1068,26 +1074,6 @@ class AttendedCliTests(OOCliFixture): self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1', 'openshift_schedulable=True') - #interactive 3.0 install confirm no HA hints - @patch('ooinstall.openshift_ansible.run_main_playbook') - @patch('ooinstall.openshift_ansible.load_system_facts') - def test_ha_hint(self, load_facts_mock, run_playbook_mock): - load_facts_mock.return_value = (MOCK_FACTS, 0) - run_playbook_mock.return_value = 0 - - cli_input = build_input(hosts=[ - ('10.0.0.1', True, False)], - ssh_user='root', - variant_num=3, - confirm_facts='y', - storage='10.1.0.1',) - self.cli_args.append("install") - result = self.runner.invoke(cli.cli, self.cli_args, - input=cli_input) - self.assert_result(result, 0) - print result.output - self.assertTrue("NOTE: Add a total of 3 or more Masters to perform an HA installation." - not in result.output) @patch('ooinstall.openshift_ansible.run_main_playbook') @patch('ooinstall.openshift_ansible.load_system_facts') @@ -1122,9 +1108,9 @@ class AttendedCliTests(OOCliFixture): self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1', 'openshift_schedulable=False') self.assert_inventory_host_var_unset(inventory, 'nodes', '10.0.0.2', - 'openshift_schedulable') + 'openshift_schedulable=True') self.assert_inventory_host_var_unset(inventory, 'nodes', '10.0.0.3', - 'openshift_schedulable') + 'openshift_schedulable=True') # TODO: test with config file, attended add node diff --git a/utils/test/fixture.py b/utils/test/fixture.py index 006df739b..a883e5c56 100644 --- a/utils/test/fixture.py +++ b/utils/test/fixture.py @@ -10,10 +10,11 @@ from click.testing import CliRunner # Substitute in a product name before use: SAMPLE_CONFIG = """ variant: %s -variant_version: 3.2 -ansible_ssh_user: root +variant_version: 3.3 master_routingconfig_subdomain: example.com +version: v2 deployment: + ansible_ssh_user: root hosts: - connect_to: 10.0.0.1 ip: 10.0.0.1 @@ -137,15 +138,19 @@ class OOCliFixture(OOInstallFixture): written_config = read_yaml(config_file) self._verify_config_hosts(written_config, exp_hosts_len) - self.assert_result(result, 0) - self._verify_load_facts(load_facts_mock) - self._verify_run_playbook(run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len) - - # Make sure we ran on the expected masters and nodes: - hosts = run_playbook_mock.call_args[0][1] - hosts_to_run_on = run_playbook_mock.call_args[0][2] - self.assertEquals(exp_hosts_len, len(hosts)) - self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on)) + if "Uninstalled" in result.output: + # verify we exited on seeing uninstalled hosts + self.assertEqual(result.exit_code, 1) + else: + self.assert_result(result, 0) + self._verify_load_facts(load_facts_mock) + self._verify_run_playbook(run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len) + + # Make sure we ran on the expected masters and nodes: + hosts = run_playbook_mock.call_args[0][1] + hosts_to_run_on = run_playbook_mock.call_args[0][2] + self.assertEquals(exp_hosts_len, len(hosts)) + self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on)) #pylint: disable=too-many-arguments,too-many-branches,too-many-statements diff --git a/utils/test/oo_config_tests.py b/utils/test/oo_config_tests.py index c19fe9e0d..b5068cc14 100644 --- a/utils/test/oo_config_tests.py +++ b/utils/test/oo_config_tests.py @@ -12,9 +12,10 @@ from ooinstall.oo_config import OOConfig, Host, OOConfigInvalidHostError SAMPLE_CONFIG = """ variant: openshift-enterprise -variant_version: 3.2 -ansible_ssh_user: root +variant_version: 3.3 +version: v2 deployment: + ansible_ssh_user: root hosts: - connect_to: master-private.example.com ip: 10.0.0.1 @@ -43,28 +44,11 @@ deployment: node: """ -# Used to test automatic upgrading of config: -LEGACY_CONFIG = """ -Description: This is the configuration file for the OpenShift Ansible-Based Installer. -Name: OpenShift Ansible-Based Installer Configuration -Subscription: {type: none} -Vendor: OpenShift Community -Version: 0.0.1 -ansible_config: /tmp/notreal/ansible.cfg -ansible_inventory_directory: /tmp/notreal/.config/openshift/.ansible -ansible_log_path: /tmp/ansible.log -ansible_plugins_directory: /tmp/notreal/.python-eggs/ooinstall-3.0.0-py2.7.egg-tmp/ooinstall/ansible_plugins -masters: [10.0.0.1] -nodes: [10.0.0.2, 10.0.0.3] -validated_facts: - 10.0.0.1: {hostname: master-private.example.com, ip: 10.0.0.1, public_hostname: master.example.com, public_ip: 24.222.0.1} - 10.0.0.2: {hostname: node1-private.example.com, ip: 10.0.0.2, public_hostname: node1.example.com, public_ip: 24.222.0.2} - 10.0.0.3: {hostname: node2-private.example.com, ip: 10.0.0.3, public_hostname: node2.example.com, public_ip: 24.222.0.3} -""" - CONFIG_INCOMPLETE_FACTS = """ +version: v2 deployment: + ansible_ssh_user: root hosts: - connect_to: 10.0.0.1 ip: 10.0.0.1 @@ -90,8 +74,9 @@ deployment: CONFIG_BAD = """ variant: openshift-enterprise -ansible_ssh_user: root +version: v2 deployment: + ansible_ssh_user: root hosts: - connect_to: master-private.example.com ip: 10.0.0.1 @@ -212,7 +197,7 @@ class OOConfigTests(OOInstallFixture): self.assertTrue('hostname' in h) self.assertTrue('public_hostname' in h) - self.assertTrue('ansible_ssh_user' in written_config) + self.assertTrue('ansible_ssh_user' in written_config['deployment']) self.assertTrue('variant' in written_config) self.assertEquals('v2', written_config['version']) |