diff options
Diffstat (limited to 'utils')
-rw-r--r-- | utils/setup.py | 5 | ||||
-rw-r--r-- | utils/src/data/data_file | 1 | ||||
-rw-r--r-- | utils/src/ooinstall/cli_installer.py | 79 | ||||
-rw-r--r-- | utils/src/ooinstall/oo_config.py | 4 | ||||
-rw-r--r-- | utils/src/ooinstall/openshift_ansible.py | 8 | ||||
-rw-r--r-- | utils/test/test_utils.py | 30 | ||||
-rw-r--r-- | utils/workflows/enterprise_deploy/openshift.sh | 2 |
7 files changed, 103 insertions, 26 deletions
diff --git a/utils/setup.py b/utils/setup.py index 563897bb1..7909321c9 100644 --- a/utils/setup.py +++ b/utils/setup.py @@ -65,11 +65,6 @@ setup( 'ooinstall': ['ansible.cfg', 'ansible-quiet.cfg', 'ansible_plugins/*'], }, - # Although 'package_data' is the preferred approach, in some case you may - # need to place data files outside of your packages. See: - # http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa - # In this case, 'data_file' will be installed into '<sys.prefix>/my_data' - #data_files=[('my_data', ['data/data_file'])], tests_require=['nose'], test_suite='nose.collector', diff --git a/utils/src/data/data_file b/utils/src/data/data_file deleted file mode 100644 index 7c0646bfd..000000000 --- a/utils/src/data/data_file +++ /dev/null @@ -1 +0,0 @@ -some data
\ No newline at end of file diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index 7c7770207..7e5ad4144 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -43,6 +43,16 @@ UPGRADE_MAPPINGS = { 'major_playbook': 'v3_3/upgrade.yml', 'major_version': '3.3', }, + '3.3': { + 'minor_version': '3.3', + 'minor_playbook': 'v3_3/upgrade.yml', + 'major_playbook': 'v3_4/upgrade.yml', + 'major_version': '3.4', + }, + '3.4': { + 'minor_version': '3.4', + 'minor_playbook': 'v3_4/upgrade.yml', + }, } @@ -108,11 +118,6 @@ a high-availability (HA) deployment. If you choose an HA deployment, then you are prompted to identify a *separate* system to act as the load balancer for your cluster once you define all masters and nodes. -If only one master is specified, an etcd instance is embedded within the -OpenShift master service to use as the datastore. This can be later replaced -with a separate etcd instance, if required. If multiple masters are specified, -then a separate etcd cluster is configured with each master serving as a member. - Any masters configured as part of this installation process are also configured as nodes. This enables the master to proxy to pods from the API. By default, this node is unschedulable, but this can be changed @@ -170,10 +175,13 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen if masters_set or num_masters != 2: more_hosts = click.confirm('Do you want to add additional hosts?') - master_lb = collect_master_lb(hosts) - if master_lb: - hosts.append(master_lb) - roles.add('master_lb') + if num_masters > 2: + master_lb = collect_master_lb(hosts) + if master_lb: + hosts.append(master_lb) + roles.add('master_lb') + else: + set_cluster_hostname(oo_cfg) if not existing_env: collect_storage_host(hosts) @@ -253,11 +261,8 @@ def print_host_summary(all_hosts, host): click.echo(" - Load Balancer (Preconfigured)") else: click.echo(" - Load Balancer (HAProxy)") - if host.is_master(): - if host.is_etcd_member(all_hosts): - click.echo(" - Etcd Member") - else: - click.echo(" - Etcd (Embedded)") + if host.is_etcd(): + click.echo(" - Etcd") if host.is_storage(): click.echo(" - Storage") if host.new_host: @@ -300,8 +305,7 @@ hostname. return hostname lb_hostname = click.prompt('Enter hostname or IP address', - value_proc=validate_prompt_lb, - default='') + value_proc=validate_prompt_lb) if lb_hostname: host_props['connect_to'] = lb_hostname install_haproxy = \ @@ -313,6 +317,24 @@ hostname. return None +def set_cluster_hostname(oo_cfg): + first_master = next((host for host in oo_cfg.deployment.hosts if host.is_master()), None) + message = """ +You have chosen to install a single master cluster (non-HA). + +In a single master cluster, the cluster host name (Ansible variable openshift_master_cluster_public_hostname) is set by default to the host name of the single master. In a multiple master (HA) cluster, the FQDN of a host must be provided that will be configured as a proxy. This could be either an existing load balancer configured to balance all masters on +port 8443 or a new host that would have HAProxy installed on it. + +(Optional) +If you want to override the cluster host name now to something other than the default (the host name of the single master), or if you think you might add masters later to become an HA cluster and want to future proof your cluster host name choice, please provide a FQDN. Otherwise, press ENTER to continue and accept the default. +""" + click.echo(message) + cluster_hostname = click.prompt('Enter hostname or IP address', + default=str(first_master)) + oo_cfg.deployment.variables['openshift_master_cluster_hostname'] = cluster_hostname + oo_cfg.deployment.variables['openshift_master_cluster_public_hostname'] = cluster_hostname + + def collect_storage_host(hosts): """ Get a valid host for storage from the user and append it to the list of @@ -683,8 +705,10 @@ def get_installed_hosts(hosts, callback_facts): for host in [h for h in hosts if h.is_master() or h.is_node()]: if host.connect_to in callback_facts.keys(): if is_installed_host(host, callback_facts): + INSTALLER_LOG.debug("%s is already installed", str(host)) installed_hosts.append(host) else: + INSTALLER_LOG.debug("%s is not installed", str(host)) uninstalled_hosts.append(host) return installed_hosts, uninstalled_hosts @@ -717,6 +741,17 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force): installed_hosts, uninstalled_hosts = get_installed_hosts(oo_cfg.deployment.hosts, callback_facts) nodes = [host for host in oo_cfg.deployment.hosts if host.is_node()] + masters_and_nodes = [host for host in oo_cfg.deployment.hosts if host.is_master() or host.is_node()] + + in_hosts = [str(h) for h in installed_hosts] + un_hosts = [str(h) for h in uninstalled_hosts] + all_hosts = [str(h) for h in oo_cfg.deployment.hosts] + m_and_n = [str(h) for h in masters_and_nodes] + + INSTALLER_LOG.debug("installed hosts: %s", ", ".join(in_hosts)) + INSTALLER_LOG.debug("uninstalled hosts: %s", ", ".join(un_hosts)) + INSTALLER_LOG.debug("deployment hosts: %s", ", ".join(all_hosts)) + INSTALLER_LOG.debug("masters and nodes: %s", ", ".join(m_and_n)) # Case (1): All uninstalled hosts if len(uninstalled_hosts) == len(nodes): @@ -724,7 +759,7 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force): hosts_to_run_on = list(oo_cfg.deployment.hosts) else: # Case (2): All installed hosts - if len(installed_hosts) == len(list(oo_cfg.deployment.hosts)): + if len(installed_hosts) == len(masters_and_nodes): message = """ All specified hosts in specified environment are installed. """ @@ -735,6 +770,16 @@ A mix of installed and uninstalled hosts have been detected in your environment. Please make sure your environment was installed successfully before adding new nodes. """ + # Still inside the case 2/3 else condition + mixed_msg = """ +\tInstalled hosts: +\t\t{inst_hosts} + +\tUninstalled hosts: +\t\t{uninst_hosts}""".format(inst_hosts=", ".join(in_hosts), uninst_hosts=", ".join(un_hosts)) + click.echo(mixed_msg) + + # Out of the case 2/3 if/else click.echo(message) if not unattended: diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py index e6bff7133..64eb340f3 100644 --- a/utils/src/ooinstall/oo_config.py +++ b/utils/src/ooinstall/oo_config.py @@ -120,6 +120,10 @@ class Host(object): def is_storage(self): return 'storage' in self.roles + def is_etcd(self): + """ Does this host have the etcd role """ + return 'etcd' in self.roles + def is_etcd_member(self, all_hosts): """ Will this host be a member of a standalone etcd cluster. """ if not self.is_master(): diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index 764cc1e56..f542fb376 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -314,6 +314,10 @@ def run_uninstall_playbook(hosts, verbose=False): facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path'] if 'ansible_config' in CFG.settings: facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config'] + # override the ansible config for our main playbook run + if 'ansible_quiet_config' in CFG.settings: + facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_quiet_config'] + return run_ansible(playbook, inventory_file, facts_env, verbose) @@ -328,4 +332,8 @@ def run_upgrade_playbook(hosts, playbook, verbose=False): facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path'] if 'ansible_config' in CFG.settings: facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config'] + # override the ansible config for our main playbook run + if 'ansible_quiet_config' in CFG.settings: + facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_quiet_config'] + return run_ansible(playbook, inventory_file, facts_env, verbose) diff --git a/utils/test/test_utils.py b/utils/test/test_utils.py index 8d59f388e..2e59d86f2 100644 --- a/utils/test/test_utils.py +++ b/utils/test/test_utils.py @@ -6,7 +6,7 @@ import unittest import logging import sys import copy -from ooinstall.utils import debug_env +from ooinstall.utils import debug_env, is_valid_hostname import mock @@ -70,3 +70,31 @@ class TestUtils(unittest.TestCase): self.assertItemsEqual( self.expected, _il.debug.call_args_list) + + ###################################################################### + def test_utils_is_valid_hostname_invalid(self): + """Verify is_valid_hostname can detect None or too-long hostnames""" + # A hostname that's empty, None, or more than 255 chars is invalid + empty_hostname = '' + res = is_valid_hostname(empty_hostname) + self.assertFalse(res) + + none_hostname = None + res = is_valid_hostname(none_hostname) + self.assertFalse(res) + + too_long_hostname = "a" * 256 + res = is_valid_hostname(too_long_hostname) + self.assertFalse(res) + + def test_utils_is_valid_hostname_ends_with_dot(self): + """Verify is_valid_hostname can parse hostnames with trailing periods""" + hostname = "foo.example.com." + res = is_valid_hostname(hostname) + self.assertTrue(res) + + def test_utils_is_valid_hostname_normal_hostname(self): + """Verify is_valid_hostname can parse regular hostnames""" + hostname = "foo.example.com" + res = is_valid_hostname(hostname) + self.assertTrue(res) diff --git a/utils/workflows/enterprise_deploy/openshift.sh b/utils/workflows/enterprise_deploy/openshift.sh deleted file mode 100644 index 040a9a84d..000000000 --- a/utils/workflows/enterprise_deploy/openshift.sh +++ /dev/null @@ -1,2 +0,0 @@ -# This file is not used for OpenShift 3.0. It's merely an artifact of the the -# installation framework originally used for OpenShift 2.x. |