diff options
12 files changed, 219 insertions, 78 deletions
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml index 834461e14..381e3ed8f 100644 --- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -1,6 +1,6 @@ - name: Check for appropriate Docker versions - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config roles: - openshift_facts tasks: @@ -19,19 +19,19 @@ # don't want to carry on, potentially taking out every node. The playbook can safely be re-run # and will not take any action on a node already running the requested docker version. - name: Evacuate and upgrade nodes - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config serial: 1 any_errors_fatal: true tasks: - name: Prepare for Node evacuation command: > - {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=false + {{ openshift.common.client_binary }} adm manage-node {{ openshift.common.hostname | lower }} --schedulable=false delegate_to: "{{ groups.oo_first_master.0 }}" when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade - name: Evacuate Node for Kubelet upgrade command: > - {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --evacuate --force + {{ openshift.common.client_binary }} adm manage-node {{ openshift.common.hostname | lower }} --evacuate --force delegate_to: "{{ groups.oo_first_master.0 }}" when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade @@ -40,7 +40,7 @@ - name: Set node schedulability command: > - {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=true + {{ openshift.common.client_binary }} adm manage-node {{ openshift.common.hostname | lower }} --schedulable=true delegate_to: "{{ groups.oo_first_master.0 }}" when: openshift.node.schedulable | bool when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade and openshift.node.schedulable | bool diff --git a/playbooks/common/openshift-cluster/redeploy-certificates.yml b/playbooks/common/openshift-cluster/redeploy-certificates.yml index 5f008a045..74147fe01 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates.yml @@ -212,7 +212,7 @@ - name: Determine if node is currently scheduleable command: > {{ openshift.common.client_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig - get node {{ openshift.node.nodename }} -o json + get node {{ openshift.common.hostname | lower }} -o json register: node_output when: openshift_certificates_redeploy_ca | default(false) | bool delegate_to: "{{ groups.oo_first_master.0 }}" @@ -225,7 +225,7 @@ - name: Prepare for node evacuation command: > {{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig - manage-node {{ openshift.node.nodename }} + manage-node {{ openshift.common.hostname | lower }} --schedulable=false delegate_to: "{{ groups.oo_first_master.0 }}" when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool @@ -233,7 +233,7 @@ - name: Evacuate node command: > {{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig - manage-node {{ openshift.node.nodename }} + manage-node {{ openshift.common.hostname | lower }} --evacuate --force delegate_to: "{{ groups.oo_first_master.0 }}" when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool @@ -241,7 +241,7 @@ - name: Set node schedulability command: > {{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig - manage-node {{ openshift.node.nodename }} --schedulable=true + manage-node {{ openshift.common.hostname | lower }} --schedulable=true delegate_to: "{{ groups.oo_first_master.0 }}" when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool diff --git a/playbooks/common/openshift-cluster/upgrades/openvswitch-avoid-oom.conf b/playbooks/common/openshift-cluster/upgrades/openvswitch-avoid-oom.conf new file mode 120000 index 000000000..514526fe2 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/openvswitch-avoid-oom.conf @@ -0,0 +1 @@ +../../../../roles/openshift_node/templates/openvswitch-avoid-oom.conf
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index 1f314c854..e66344f99 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -17,7 +17,7 @@ # we merge upgrade functionality into the base roles and a normal config.yml playbook run. - name: Determine if node is currently scheduleable command: > - {{ openshift.common.client_binary }} get node {{ openshift.node.nodename | lower }} -o json + {{ openshift.common.client_binary }} get node {{ openshift.common.hostname | lower }} -o json register: node_output delegate_to: "{{ groups.oo_first_master.0 }}" changed_when: false @@ -29,7 +29,7 @@ - name: Mark unschedulable if host is a node command: > - {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false + {{ openshift.common.client_binary }} adm manage-node {{ openshift.common.hostname | lower }} --schedulable=false delegate_to: "{{ groups.oo_first_master.0 }}" when: inventory_hostname in groups.oo_nodes_to_upgrade # NOTE: There is a transient "object has been modified" error here, allow a couple @@ -41,7 +41,7 @@ - name: Evacuate Node for Kubelet upgrade command: > - {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --evacuate --force + {{ openshift.common.client_binary }} adm manage-node {{ openshift.common.hostname | lower }} --evacuate --force delegate_to: "{{ groups.oo_first_master.0 }}" when: inventory_hostname in groups.oo_nodes_to_upgrade tasks: @@ -64,12 +64,10 @@ - name: Set node schedulability command: > - {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true + {{ openshift.common.client_binary }} adm manage-node {{ openshift.common.hostname | lower }} --schedulable=true delegate_to: "{{ groups.oo_first_master.0 }}" when: inventory_hostname in groups.oo_nodes_to_upgrade and was_schedulable | bool register: node_sched until: node_sched.rc == 0 retries: 3 delay: 1 - - diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml index 20ce47c07..458cf5ac7 100644 --- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml +++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml @@ -45,7 +45,7 @@ parameters: node_port_incoming: type: string label: Source of node port connections - description: Authorized sources targeting node ports + description: Authorized sources targetting node ports default: 0.0.0.0/0 num_etcd: @@ -88,6 +88,11 @@ parameters: label: Infra image description: Name of the image for the infra node servers + dns_image: + type: string + label: DNS image + description: Name of the image for the DNS server + etcd_flavor: type: string label: Etcd flavor @@ -108,6 +113,11 @@ parameters: label: Infra flavor description: Flavor of the infra node servers + dns_flavor: + type: string + label: DNS flavor + description: Flavor of the DNS server + outputs: etcd_names: @@ -158,6 +168,26 @@ outputs: description: Floating IPs of the nodes value: { get_attr: [ infra_nodes, floating_ip ] } + dns_name: + description: Name of the DNS + value: + get_attr: + - dns + - name + + dns_floating_ip: + description: Floating IP of the DNS + value: + get_attr: + - dns + - addresses + - str_replace: + template: openshift-ansible-cluster_id-net + params: + cluster_id: { get_param: cluster_id } + - 1 + - addr + resources: net: @@ -183,7 +213,22 @@ resources: template: subnet_24_prefix.0/24 params: subnet_24_prefix: { get_param: subnet_24_prefix } - dns_nameservers: { get_param: dns_nameservers } + allocation_pools: + - start: + str_replace: + template: subnet_24_prefix.3 + params: + subnet_24_prefix: { get_param: subnet_24_prefix } + end: + str_replace: + template: subnet_24_prefix.254 + params: + subnet_24_prefix: { get_param: subnet_24_prefix } + dns_nameservers: + - str_replace: + template: subnet_24_prefix.2 + params: + subnet_24_prefix: { get_param: subnet_24_prefix } router: type: OS::Neutron::Router @@ -383,6 +428,44 @@ resources: port_range_min: 443 port_range_max: 443 + dns-secgrp: + type: OS::Neutron::SecurityGroup + properties: + name: + str_replace: + template: openshift-ansible-cluster_id-dns-secgrp + params: + cluster_id: { get_param: cluster_id } + description: + str_replace: + template: Security group for cluster_id cluster DNS + params: + cluster_id: { get_param: cluster_id } + rules: + - direction: ingress + protocol: tcp + port_range_min: 22 + port_range_max: 22 + remote_ip_prefix: { get_param: ssh_incoming } + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + remote_mode: remote_group_id + remote_group_id: { get_resource: etcd-secgrp } + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + remote_mode: remote_group_id + remote_group_id: { get_resource: master-secgrp } + - direction: ingress + protocol: udp + port_range_min: 53 + port_range_max: 53 + remote_mode: remote_group_id + remote_group_id: { get_resource: node-secgrp } + etcd: type: OS::Heat::ResourceGroup properties: @@ -516,3 +599,79 @@ resources: cluster_id: { get_param: cluster_id } depends_on: - interface + + dns: + type: OS::Nova::Server + properties: + name: + str_replace: + template: cluster_id-dns + params: + cluster_id: { get_param: cluster_id } + key_name: { get_resource: keypair } + image: { get_param: dns_image } + flavor: { get_param: dns_flavor } + networks: + - port: { get_resource: dns-port } + user_data: { get_resource: dns-config } + user_data_format: RAW + + dns-port: + type: OS::Neutron::Port + properties: + network: { get_resource: net } + fixed_ips: + - subnet: { get_resource: subnet } + ip_address: + str_replace: + template: subnet_24_prefix.2 + params: + subnet_24_prefix: { get_param: subnet_24_prefix } + security_groups: + - { get_resource: dns-secgrp } + + dns-floating-ip: + type: OS::Neutron::FloatingIP + properties: + floating_network: { get_param: external_net } + port_id: { get_resource: dns-port } + + dns-config: + type: OS::Heat::MultipartMime + properties: + parts: + - config: + str_replace: + template: | + #cloud-config + disable_root: true + + system_info: + default_user: + name: openshift + sudo: ["ALL=(ALL) NOPASSWD: ALL"] + + write_files: + - path: /etc/sudoers.d/00-openshift-no-requiretty + permissions: 440 + content: | + Defaults:openshift !requiretty + - path: /etc/sysconfig/network-scripts/ifcfg-eth0 + content: | + DEVICE="eth0" + BOOTPROTO="dhcp" + DNS1="$dns1" + DNS2="$dns2" + PEERDNS="no" + ONBOOT="yes" + runcmd: + - [ "/usr/bin/systemctl", "restart", "network" ] + params: + $dns1: + get_param: + - dns_nameservers + - 0 + $dns2: + get_param: + - dns_nameservers + - 1 diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml index 435139849..f83f2c984 100644 --- a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml +++ b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml @@ -107,7 +107,7 @@ resources: flavor: { get_param: flavor } networks: - port: { get_resource: port } - user_data: { get_resource: config } + user_data: { get_file: user-data } user_data_format: RAW metadata: environment: { get_param: cluster_env } @@ -128,25 +128,3 @@ resources: properties: floating_network: { get_param: floating_network } port_id: { get_resource: port } - - config: - type: OS::Heat::CloudConfig - properties: - cloud_config: - disable_root: true - - hostname: { get_param: name } - - system_info: - default_user: - name: openshift - sudo: ["ALL=(ALL) NOPASSWD: ALL"] - - write_files: - - path: /etc/sudoers.d/00-openshift-no-requiretty - permissions: 440 - # content: Defaults:openshift !requiretty - # Encoded in base64 to be sure that we do not forget the trailing newline or - # sudo will not be able to parse that file - encoding: b64 - content: RGVmYXVsdHM6b3BlbnNoaWZ0ICFyZXF1aXJldHR5Cg== diff --git a/playbooks/openstack/openshift-cluster/files/user-data b/playbooks/openstack/openshift-cluster/files/user-data new file mode 100644 index 000000000..eb65f7cec --- /dev/null +++ b/playbooks/openstack/openshift-cluster/files/user-data @@ -0,0 +1,13 @@ +#cloud-config +disable_root: true + +system_info: + default_user: + name: openshift + sudo: ["ALL=(ALL) NOPASSWD: ALL"] + +write_files: + - path: /etc/sudoers.d/00-openshift-no-requiretty + permissions: 440 + content: | + Defaults:openshift !requiretty diff --git a/roles/flannel/tasks/main.yml b/roles/flannel/tasks/main.yml index f5b16fb76..bf400cfe8 100644 --- a/roles/flannel/tasks/main.yml +++ b/roles/flannel/tasks/main.yml @@ -2,7 +2,7 @@ - name: Install flannel become: yes action: "{{ ansible_pkg_mgr }} name=flannel state=present" - when: not openshift.common.is_containerized | bool + when: not openshift.common.is_atomic | bool - name: Set flannel etcd options become: yes @@ -15,7 +15,7 @@ - { regexp: "^(FLANNEL_ETCD=)", line: '\1{{ etcd_hosts|join(",") }}' } - { regexp: "^(FLANNEL_ETCD_ENDPOINTS=)", line: '\1{{ etcd_hosts|join(",") }}' } - { regexp: "^(FLANNEL_ETCD_KEY=)", line: '\1{{ flannel_etcd_key }}' } - - { regexp: "^(FLANNEL_ETCD_KEY_PREFIX=)", line: '\1{{ flannel_etcd_key }}' } + - { regexp: "^(FLANNEL_ETCD_PREFIX=)", line: '\1{{ flannel_etcd_key }}' } - name: Set flannel options become: yes diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 769d11a42..537b6c60f 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -148,7 +148,6 @@ def hostname_valid(hostname): if (not hostname or hostname.startswith('localhost') or hostname.endswith('localdomain') or - hostname.endswith('novalocal') or len(hostname.split('.')) < 2): return False @@ -363,15 +362,12 @@ def normalize_openstack_facts(metadata, facts): facts['network']['ip'] = local_ipv4 facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4'] - for f_var, h_var, ip_var in [('hostname', 'hostname', 'local-ipv4'), - ('public_hostname', 'public-hostname', 'public-ipv4')]: - try: - if socket.gethostbyname(metadata['ec2_compat'][h_var]) == metadata['ec2_compat'][ip_var]: - facts['network'][f_var] = metadata['ec2_compat'][h_var] - else: - facts['network'][f_var] = metadata['ec2_compat'][ip_var] - except socket.gaierror: - facts['network'][f_var] = metadata['ec2_compat'][ip_var] + # TODO: verify local hostname makes sense and is resolvable + facts['network']['hostname'] = metadata['hostname'] + + # TODO: verify that public hostname makes sense and is resolvable + pub_h = metadata['ec2_compat']['public-hostname'] + facts['network']['public_hostname'] = pub_h return facts @@ -903,8 +899,8 @@ def set_sdn_facts_if_unset(facts, system_facts): if 'master' in facts: # set defaults for sdn_cluster_network_cidr and sdn_host_subnet_length # these might be overridden if they exist in the master config file - facts['master']['sdn_cluster_network_cidr'] = '10.128.0.0/14' - facts['master']['sdn_host_subnet_length'] = '9' + sdn_cluster_network_cidr = '10.128.0.0/14' + sdn_host_subnet_length = '9' master_cfg_path = os.path.join(facts['common']['config_base'], 'master/master-config.yaml') @@ -914,12 +910,17 @@ def set_sdn_facts_if_unset(facts, system_facts): if 'networkConfig' in config: if 'clusterNetworkCIDR' in config['networkConfig']: - facts['master']['sdn_cluster_network_cidr'] = \ + sdn_cluster_network_cidr = \ config['networkConfig']['clusterNetworkCIDR'] if 'hostSubnetLength' in config['networkConfig']: - facts['master']['sdn_host_subnet_length'] = \ + sdn_host_subnet_length = \ config['networkConfig']['hostSubnetLength'] + if 'sdn_cluster_network_cidr' not in facts['master']: + facts['master']['sdn_cluster_network_cidr'] = sdn_cluster_network_cidr + if 'sdn_host_subnet_length' not in facts['master']: + facts['master']['sdn_host_subnet_length'] = sdn_host_subnet_length + if 'node' in facts and 'sdn_mtu' not in facts['node']: node_ip = facts['common']['ip'] @@ -935,14 +936,6 @@ def set_sdn_facts_if_unset(facts, system_facts): return facts -def set_nodename(facts): - if 'node' in facts and 'common' in facts: - if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack': - facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '') - else: - facts['node']['nodename'] = facts['common']['hostname'].lower() - return facts - def migrate_oauth_template_facts(facts): """ Migrate an old oauth template fact to a newer format if it's present. @@ -1319,7 +1312,7 @@ def apply_provider_facts(facts, provider_facts): facts['common'][h_var] = choose_hostname( [provider_facts['network'].get(h_var)], - facts['common'][h_var] + facts['common'][ip_var] ) facts['provider'] = provider_facts @@ -1527,8 +1520,8 @@ def set_proxy_facts(facts): safe_get_bool(common['generate_no_proxy_hosts']): if 'no_proxy_internal_hostnames' in common: common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(',')) - common['no_proxy'].append('.' + common['dns_domain']) - # We always add ourselves no matter what + # We always add local dns domain and ourselves no matter what + common['no_proxy'].append('.' + common['dns_domain']) common['no_proxy'].append(common['hostname']) common['no_proxy'] = sort_unique(common['no_proxy']) facts['common'] = common @@ -1802,7 +1795,6 @@ class OpenShiftFacts(object): facts = set_proxy_facts(facts) if not safe_get_bool(facts['common']['is_containerized']): facts = set_installed_variant_rpm_facts(facts) - facts = set_nodename(facts) return dict(openshift=facts) def get_defaults(self, roles, deployment_type, deployment_subtype): diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml index 28e4e46e9..a1f42f8c4 100644 --- a/roles/openshift_manage_node/tasks/main.yml +++ b/roles/openshift_manage_node/tasks/main.yml @@ -14,7 +14,7 @@ - name: Wait for Node Registration command: > - {{ openshift.common.client_binary }} get node {{ hostvars[item].openshift.node.nodename }} + {{ openshift.common.client_binary }} get node {{ hostvars[item].openshift.common.hostname }} --config={{ openshift_manage_node_kubeconfig }} -n default register: omd_get_node @@ -26,19 +26,19 @@ - name: Set node schedulability command: > - {{ openshift.common.client_binary }} adm manage-node {{ hostvars[item].openshift.node.nodename }} --schedulable={{ 'true' if hostvars[item].openshift.node.schedulable | bool else 'false' }} + {{ openshift.common.client_binary }} adm manage-node {{ hostvars[item].openshift.common.hostname | lower }} --schedulable={{ 'true' if hostvars[item].openshift.node.schedulable | bool else 'false' }} --config={{ openshift_manage_node_kubeconfig }} -n default with_items: "{{ openshift_nodes }}" - when: hostvars[item].openshift.node.nodename is defined + when: hostvars[item].openshift.common.hostname is defined - name: Label nodes command: > - {{ openshift.common.client_binary }} label --overwrite node {{ hostvars[item].openshift.node.nodename }} {{ hostvars[item].openshift.node.labels | oo_combine_dict }} + {{ openshift.common.client_binary }} label --overwrite node {{ hostvars[item].openshift.common.hostname | lower }} {{ hostvars[item].openshift.node.labels | oo_combine_dict }} --config={{ openshift_manage_node_kubeconfig }} -n default with_items: "{{ openshift_nodes }}" - when: hostvars[item].openshift.node.nodename is defined and 'labels' in hostvars[item].openshift.node and hostvars[item].openshift.node.labels != {} + when: hostvars[item].openshift.common.hostname is defined and 'labels' in hostvars[item].openshift.node and hostvars[item].openshift.node.labels != {} - name: Delete temp directory file: diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index 9bcaf4d84..68d153052 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -33,7 +33,7 @@ networkConfig: {% if openshift.node.set_node_ip | bool %} nodeIP: {{ openshift.common.ip }} {% endif %} -nodeName: {{ openshift.node.nodename }} +nodeName: {{ openshift.common.hostname | lower }} podManifestConfig: servingInfo: bindAddress: 0.0.0.0:10250 diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index 286e50f64..8470fb527 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -739,17 +739,17 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force): installed_hosts, uninstalled_hosts = get_installed_hosts(oo_cfg.deployment.hosts, callback_facts) nodes = [host for host in oo_cfg.deployment.hosts if host.is_node()] - not_balancers = [host for host in oo_cfg.deployment.hosts if not host.is_master_lb()] + masters_and_nodes = [host for host in oo_cfg.deployment.hosts if host.is_master() or host.is_node()] in_hosts = [str(h) for h in installed_hosts] un_hosts = [str(h) for h in uninstalled_hosts] all_hosts = [str(h) for h in oo_cfg.deployment.hosts] - no_bals = [str(h) for h in not_balancers] + m_and_n = [str(h) for h in masters_and_nodes] INSTALLER_LOG.debug("installed hosts: %s", ", ".join(in_hosts)) INSTALLER_LOG.debug("uninstalled hosts: %s", ", ".join(un_hosts)) INSTALLER_LOG.debug("deployment hosts: %s", ", ".join(all_hosts)) - INSTALLER_LOG.debug("not balancers: %s", ", ".join(no_bals)) + INSTALLER_LOG.debug("masters and nodes: %s", ", ".join(m_and_n)) # Case (1): All uninstalled hosts if len(uninstalled_hosts) == len(nodes): @@ -757,7 +757,7 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force): hosts_to_run_on = list(oo_cfg.deployment.hosts) else: # Case (2): All installed hosts - if len(installed_hosts) == len(not_balancers): + if len(installed_hosts) == len(masters_and_nodes): message = """ All specified hosts in specified environment are installed. """ |