diff options
19 files changed, 201 insertions, 97 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index a8e890624..722fad28d 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.0.42-1 ./ +3.0.45-1 ./ diff --git a/README_AWS.md b/README_AWS.md index c605de43d..4a2399d42 100644 --- a/README_AWS.md +++ b/README_AWS.md @@ -43,6 +43,16 @@ You may also want to allow access from the outside world on the following ports: ``` +Determine your subnet and setup the VPC +--------------------------------------- + +In the AWS VPC console, look up your subnet ID for the region you want to use and set it as such: + +- export ec2_vpc_subnet='my_vpc_subnet' + +Go to Your VPCs, select the VPC, and under Actions -> DNS Hostnames, set to Yes and Save. + + (Optional) Setup your $HOME/.ssh/config file ------------------------------------------- In case of a cluster creation, or any other case where you don't know the machine hostname in advance, you can use `.ssh/config` @@ -62,7 +72,7 @@ Alternatively, you can configure your ssh-agent to hold the credentials to conne By default, a cluster is launched with the following configuration: - Instance type: m4.large -- AMI: ami-307b3658 (for online deployments, ami-acd999c4 for origin deployments and ami-10663b78 for enterprise deployments) +- AMI: ami-7a9e9812 (for online deployments, ami-61bbf104 for origin deployments and ami-10663b78 for enterprise deployments) - Region: us-east-1 - Keypair name: libra - Security group: public @@ -109,7 +119,6 @@ If needed, these values can be changed by setting environment variables on your - export ec2_region='us-east-1' - export ec2_keypair='libra' - export ec2_security_groups="['public']" -- export ec2_vpc_subnet='my_vpc_subnet' - export ec2_assign_public_ip='true' - export os_etcd_root_vol_size='20' - export os_etcd_root_vol_type='standard' diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 017515045..4eef29a4d 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -5,7 +5,7 @@ } Name: openshift-ansible -Version: 3.0.42 +Version: 3.0.45 Release: 1%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 @@ -261,6 +261,40 @@ Atomic OpenShift Utilities includes %changelog +* Mon Feb 22 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.45-1 +- Do not monitor for etcd watchers (mmahut@redhat.com) +- remove old master registry item/triggers (jdiaz@redhat.com) +- a-o-i: Redo logic for detecting master_lb (smunilla@redhat.com) +- Fix 1.2 version check (jdetiber@redhat.com) +- Fix pv/c creation failed_when. (abutcher@redhat.com) +- Rename variable to delete temporary file, add configurable path. + (hrosnet@redhat.com) +- Add /var/log to containerized node mounts (sdodson@redhat.com) +- Add extra parameters for S3 registry: delete file, create bucket. + (hrosnet@redhat.com) +- Don't make config files world readable (sdodson@redhat.com) +- Fix requiring state and providing a default (rharriso@redhat.com) +- bind in /etc/origin/node for non-master monitoring to be able to talk with + master (jdiaz@redhat.com) +- a-o-i: pylint fixes related to too-long lines (smunilla@redhat.com) + +* Wed Feb 17 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.44-1 +- create registry items/triggers under Openshift Node (jdiaz@redhat.com) +- a-o-i: Change method for counting master_lb as installed + (smunilla@redhat.com) + +* Tue Feb 16 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.43-1 +- Add default to state param (rharriso@redhat.com) +- Add type to record_type param (rharriso@redhat.com) +- Add types to module params (rharriso@redhat.com) +- Adding examples to the dyn_record module (rharriso@redhat.com) +- add item to track docker-registry pings (jdiaz@redhat.com) +- Handle case where the user already had access to the scc + (bleanhar@redhat.com) +- Refactoring the add-scc-to-user logic (bleanhar@redhat.com) +- Apply openshift_docker to nodes during scaleup. (abutcher@redhat.com) +- Change etcd deamon name for atomic-host (florian.lambert@enovance.com) + * Tue Feb 16 2016 Joel Diaz <jdiaz@redhat.com> 3.0.42-1 - Add gce softlink for openshift-ansible-bin diff --git a/playbooks/adhoc/s3_registry/s3_registry.yml b/playbooks/adhoc/s3_registry/s3_registry.yml index 38ce92e92..daf84e242 100644 --- a/playbooks/adhoc/s3_registry/s3_registry.yml +++ b/playbooks/adhoc/s3_registry/s3_registry.yml @@ -15,6 +15,9 @@ aws_secret_key: "{{ lookup('env', 'S3_SECRET_ACCESS_KEY') }}" aws_bucket_name: "{{ aws_bucket | default(clusterid ~ '-docker') }}" aws_bucket_region: "{{ aws_region | default(lookup('env', 'S3_REGION') | default('us-east-1', true)) }}" + aws_create_bucket: "{{ aws_create | default(True) }}" + aws_tmp_path: "{{ aws_tmp_pathfile | default('/root/config.yml')}}" + aws_delete_tmp_file: "{{ aws_delete_tmp | default(True) }}" tasks: @@ -30,6 +33,7 @@ command: oc scale --replicas=0 dc/docker-registry - name: Create S3 bucket + when: aws_create_bucket | bool local_action: module: s3 bucket="{{ aws_bucket_name }}" mode=create @@ -70,4 +74,5 @@ command: oc scale --replicas=1 dc/docker-registry - name: Delete temporary config file - file: path=/root/config.yml state=absent + file: path={{ aws_tmp_path }} state=absent + when: aws_delete_tmp_file | bool diff --git a/playbooks/common/openshift-cluster/scaleup.yml b/playbooks/common/openshift-cluster/scaleup.yml index d2ba3fc7a..c4340902b 100644 --- a/playbooks/common/openshift-cluster/scaleup.yml +++ b/playbooks/common/openshift-cluster/scaleup.yml @@ -1,6 +1,16 @@ --- - include: evaluate_groups.yml +- name: Configure docker hosts + hosts: oo_nodes_to_config + vars: + docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}" + docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}" + docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') | oo_split }}" + roles: + - openshift_facts + - openshift_docker + - include: ../openshift-node/config.yml vars: openshift_deployment_type: "{{ deployment_type }}" diff --git a/roles/lib_dyn/library/dyn_record.py b/roles/lib_dyn/library/dyn_record.py index f2796ccf2..7b80064f4 100644 --- a/roles/lib_dyn/library/dyn_record.py +++ b/roles/lib_dyn/library/dyn_record.py @@ -95,6 +95,26 @@ requirements: [ dyn ] author: "Russell Harrison" ''' +EXAMPLES = ''' +- name: Update CNAME record + local_action: + module: dyn_record + state: present + record_fqdn: www.example.com + zone: example.com + record_type: CNAME + record_value: web1.example.com + +- name: Update A record + local_action: + module: dyn_record + state: present + record_fqdn: web1.example.com + zone: example.com + record_value: 10.0.0.10 + record_type: A +''' + try: IMPORT_ERROR = False from dyn.tm.session import DynectSession @@ -158,15 +178,15 @@ def main(): '''Ansible module for managing Dyn DNS records.''' module = AnsibleModule( argument_spec=dict( - state=dict(required=True, choices=['present', 'absent', 'list']), + state=dict(default='present', choices=['present', 'absent', 'list']), customer_name=dict(default=os.environ.get('DYNECT_CUSTOMER_NAME', None), type='str'), user_name=dict(default=os.environ.get('DYNECT_USER_NAME', None), type='str', no_log=True), user_password=dict(default=os.environ.get('DYNECT_PASSWORD', None), type='str', no_log=True), - zone=dict(required=True), - record_fqdn=dict(required=False), - record_type=dict(required=False, choices=[ + zone=dict(required=True, type='str'), + record_fqdn=dict(required=False, type='str'), + record_type=dict(required=False, type='str', choices=[ 'A', 'AAAA', 'CNAME', 'PTR', 'TXT']), - record_value=dict(required=False), + record_value=dict(required=False, type='str'), record_ttl=dict(required=False, default=0, type='int'), ), required_together=( diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 25b9534dd..34fbe1296 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -718,7 +718,7 @@ def set_version_facts_if_unset(facts): if deployment_type == 'origin': version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('1.1.0') version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('1.1.1') - version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('1.1.2') + version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('1.2.0') else: version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('3.0.2.905') version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('3.1.1') diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 23dfacf79..dd66eeebb 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -221,6 +221,9 @@ template: dest: "{{ openshift.master.session_secrets_file }}" src: sessionSecretsFile.yaml.v1.j2 + owner: root + group: root + mode: 0600 when: openshift.master.session_auth_secrets is defined and openshift.master.session_encryption_secrets is defined notify: - restart master @@ -235,6 +238,9 @@ dest: "{{ openshift_master_config_file }}" src: master.yaml.v1.j2 backup: true + owner: root + group: root + mode: 0600 notify: - restart master - restart master api diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index acf2f74e3..43253d72b 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -84,6 +84,9 @@ dest: "{{ openshift_node_config_file }}" src: node.yaml.v1.j2 backup: true + owner: root + group: root + mode: 0600 notify: - restart node diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service index 7a11a10fa..df3e0a44a 100644 --- a/roles/openshift_node/templates/openshift.docker.node.service +++ b/roles/openshift_node/templates/openshift.docker.node.service @@ -11,7 +11,7 @@ PartOf=docker.service [Service] EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node -ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:ro -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /etc/systemd/system:/host-etc/systemd/system {{ openshift.node.node_image }} +ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:ro -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log {{ openshift.node.node_image }} ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node Restart=always diff --git a/roles/openshift_persistent_volumes/tasks/main.yml b/roles/openshift_persistent_volumes/tasks/main.yml index 2455fc792..e431e978c 100644 --- a/roles/openshift_persistent_volumes/tasks/main.yml +++ b/roles/openshift_persistent_volumes/tasks/main.yml @@ -23,7 +23,7 @@ --config={{ mktemp.stdout }}/admin.kubeconfig register: pv_create_output when: persistent_volumes | length > 0 - failed_when: ('already exists' not in pv_create_output.stderr if pv_create_output.stderr else False) or ('created' not in pv_create_output.stdout if pv_create_output.stdout else False) + failed_when: ('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout) changed_when: ('created' in pv_create_output.stdout) - name: Deploy PersistentVolumeClaim definitions @@ -40,7 +40,7 @@ --config={{ mktemp.stdout }}/admin.kubeconfig register: pvc_create_output when: persistent_volume_claims | length > 0 - failed_when: ('already exists' not in pvc_create_output.stderr if pvc_create_output.stderr else False) or ('created' not in pvc_create_output.stdout if pvc_create_output.stdout else False) + failed_when: ('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout) changed_when: ('created' in pvc_create_output.stdout) - name: Delete temp directory diff --git a/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml b/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml new file mode 100644 index 000000000..1efab9466 --- /dev/null +++ b/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml @@ -0,0 +1,37 @@ +#### +# +# OSE 3.0.z did not have 'oadm policy add-scc-to-user'. +# +#### + +- name: tmp dir for openshift + file: + path: /tmp/openshift + state: directory + owner: root + mode: 700 + +- name: Create service account configs + template: + src: serviceaccount.j2 + dest: "/tmp/openshift/{{ item }}-serviceaccount.yaml" + with_items: openshift_serviceaccounts_names + +- name: Get current security context constraints + shell: > + {{ openshift.common.client_binary }} get scc privileged -o yaml + --output-version=v1 > /tmp/openshift/scc.yaml + changed_when: false + +- name: Add security context constraint for {{ item }} + lineinfile: + dest: /tmp/openshift/scc.yaml + line: "- system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item.0 }}" + insertafter: "^users:$" + when: "item.1.rc == 0 and 'system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item.0 }}' not in {{ (item.1.stdout | from_yaml).users }}" + with_nested: + - openshift_serviceaccounts_names + - scc_test.results + +- name: Apply new scc rules for service accounts + command: "{{ openshift.common.client_binary }} update -f /tmp/openshift/scc.yaml --api-version=v1" diff --git a/roles/openshift_serviceaccounts/tasks/main.yml b/roles/openshift_serviceaccounts/tasks/main.yml index 89d9e3aa7..f34fa7b74 100644 --- a/roles/openshift_serviceaccounts/tasks/main.yml +++ b/roles/openshift_serviceaccounts/tasks/main.yml @@ -32,42 +32,5 @@ - openshift_serviceaccounts_names - scc_test.results -#### -# -# Support for 3.0.z -# -#### - -- name: tmp dir for openshift - file: - path: /tmp/openshift - state: directory - owner: root - mode: 700 - when: not openshift.common.version_gte_3_1_or_1_1 - -- name: Create service account configs - template: - src: serviceaccount.j2 - dest: "/tmp/openshift/{{ item }}-serviceaccount.yaml" - with_items: openshift_serviceaccounts_names - when: not openshift.common.version_gte_3_1_or_1_1 - -- name: Get current security context constraints - shell: > - {{ openshift.common.client_binary }} get scc privileged -o yaml - --output-version=v1 > /tmp/openshift/scc.yaml - changed_when: false - when: not openshift.common.version_gte_3_1_or_1_1 - -- name: Add security context constraint for {{ item }} - lineinfile: - dest: /tmp/openshift/scc.yaml - line: "- system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item }}" - insertafter: "^users:$" - with_items: openshift_serviceaccounts_names - when: not openshift.common.version_gte_3_1_or_1_1 - -- name: Apply new scc rules for service accounts - command: "{{ openshift.common.client_binary }} update -f /tmp/openshift/scc.yaml --api-version=v1" +- include: legacy_add_scc_to_user.yml when: not openshift.common.version_gte_3_1_or_1_1 diff --git a/roles/os_zabbix/vars/template_openshift_master.yml b/roles/os_zabbix/vars/template_openshift_master.yml index 1824d7881..e36f23a2b 100644 --- a/roles/os_zabbix/vars/template_openshift_master.yml +++ b/roles/os_zabbix/vars/template_openshift_master.yml @@ -7,12 +7,6 @@ g_template_openshift_master: - Openshift Master key: openshift.master.app.create - - key: openshift.master.registry.healthy_pct - description: "Shows the percentage of healthy registries in the cluster" - type: int - applications: - - Openshift Master - - key: openshift.master.process.count description: Shows number of master processes running type: int @@ -278,11 +272,6 @@ g_template_openshift_master: url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc' priority: high - - name: 'Low number of etcd watchers on {HOST.NAME}' - expression: '{Template Openshift Master:openshift.master.etcd.watchers.last(#1)}<10 and {Template Openshift Master:openshift.master.etcd.watchers.last(#2)}<10' - url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_etcd.asciidoc' - priority: avg - - name: 'Etcd ping failed on {HOST.NAME}' expression: '{Template Openshift Master:openshift.master.etcd.ping.last(#1)}=0 and {Template Openshift Master:openshift.master.etcd.ping.last(#2)}=0' url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_etcd.asciidoc' @@ -345,20 +334,6 @@ g_template_openshift_master: - 'Openshift Master process not running on {HOST.NAME}' priority: avg - - name: 'One or more Docker Registries is unhealthy according to {HOST.NAME}' - expression: '{Template Openshift Master:openshift.master.registry.healthy_pct.last(#2)}<100 and {Template Openshift Master:openshift.master.registry.healthy_pct.max(#2)}>50' - url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_registry.asciidoc' - dependencies: - - 'Openshift Master process not running on {HOST.NAME}' - priority: avg - - - name: 'Multiple Docker Registries are unhealthy according to {HOST.NAME}' - expression: '{Template Openshift Master:openshift.master.registry.healthy_pct.last(#2)}<51' - url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_registry.asciidoc' - dependencies: - - 'Openshift Master process not running on {HOST.NAME}' - priority: high - - name: 'SkyDNS port not listening on {HOST.NAME}' expression: '{Template Openshift Master:openshift.master.skydns.port.open.max(#3)}<1' url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc' diff --git a/roles/os_zabbix/vars/template_openshift_node.yml b/roles/os_zabbix/vars/template_openshift_node.yml index b0488656d..c36c593df 100644 --- a/roles/os_zabbix/vars/template_openshift_node.yml +++ b/roles/os_zabbix/vars/template_openshift_node.yml @@ -26,7 +26,29 @@ g_template_openshift_node: applications: - Openshift Node + - key: openshift.node.registry-pods.healthy_pct + description: Shows the percentage of healthy registries in the cluster + type: int + applications: + - Openshift Node + + - key: openshift.node.registry.service.ping + description: Ping docker-registry service from node + type: int + applications: + - Openshift Node + ztriggers: + - name: 'One or more Docker Registries is unhealthy according to {HOST.NAME}' + expression: '{Template Openshift Node:openshift.node.registry-pods.healthy_pct.last(#2)}<100 and {Template Openshift Node:openshift.node.registry-pods.healthy_pct.last(#1)}<100' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_registry.asciidoc' + priority: avg + + - name: 'Docker Registry service is unhealthy according to {HOST.NAME}' + expression: '{Template Openshift Node:openshift.node.registry.service.ping.last(#2)}<1 and {Template Openshift Node:openshift.node.registry.service.ping.last(#1)}<1' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_registry.asciidoc' + priority: avg + - name: 'Openshift Node process not running on {HOST.NAME}' expression: '{Template Openshift Node:openshift.node.process.count.max(#3)}<1' url: 'https://github.com/openshift/ops-sop/blob/node/V3/Alerts/openshift_node.asciidoc' diff --git a/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2 b/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2 index 453a9a3b4..d85d8b94e 100644 --- a/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2 +++ b/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2 @@ -57,11 +57,13 @@ ExecStart=/usr/bin/docker run --name {{ osohm_host_monitoring }} -v /var/lib/docker:/var/lib/docker:ro \ -v /var/run/docker.sock:/var/run/docker.sock \ -v /var/run/openvswitch:/var/run/openvswitch \ -{% if hostvars[inventory_hostname]['ec2_tag_host-type'] == 'master' %} +{% if hostvars[inventory_hostname]['oo_hosttype'] == 'master' %} -v /etc/origin/master/admin.kubeconfig:/etc/origin/master/admin.kubeconfig \ -v /etc/origin/master/master.etcd-client.crt:/etc/origin/master/master.etcd-client.crt \ -v /etc/origin/master/master.etcd-client.key:/etc/origin/master/master.etcd-client.key \ -v /etc/origin/master/master-config.yaml:/etc/origin/master/master-config.yaml \ +{% elif hostvars[inventory_hostname]['oo_hosttype'] == 'node' %} + -v /etc/origin/node:/etc/origin/node \ {% endif %} {{ osohm_docker_registry_url }}{{ osohm_host_monitoring }} diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index 9839bf447..f09f90288 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -138,9 +138,10 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen host_props['containerized'] = False if oo_cfg.settings['variant_version'] != '3.0': - rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?', - type=click.Choice(['rpm', 'container']), - default='rpm') + rpm_or_container = \ + click.prompt('Will this host be RPM or Container based (rpm/container)?', + type=click.Choice(['rpm', 'container']), + default='rpm') if rpm_or_container == 'container': host_props['containerized'] = True @@ -281,7 +282,8 @@ hostname. host_props['connect_to'] = click.prompt('Enter hostname or IP address', value_proc=validate_prompt_lb) - install_haproxy = click.confirm('Should the reference haproxy load balancer be installed on this host?') + install_haproxy = \ + click.confirm('Should the reference haproxy load balancer be installed on this host?') host_props['preconfigured'] = not install_haproxy host_props['master'] = False host_props['node'] = False @@ -375,7 +377,8 @@ def check_hosts_config(oo_cfg, unattended): sys.exit(1) elif len(master_lb) == 1: if master_lb[0].master or master_lb[0].node: - click.echo('ERROR: The Master load balancer is configured as a master or node. Please correct this.') + click.echo('ERROR: The Master load balancer is configured as a master or node. ' \ + 'Please correct this.') sys.exit(1) else: message = """ @@ -525,18 +528,27 @@ Add new nodes here def get_installed_hosts(hosts, callback_facts): installed_hosts = [] + + # count nativeha lb as an installed host + try: + first_master = next(host for host in hosts if host.master) + lb_hostname = callback_facts[first_master.connect_to]['master'].get('cluster_hostname', '') + lb_host = next(host for host in hosts if host.connect_to == lb_hostname) + installed_hosts.append(lb_host) + except KeyError: + pass + + for host in hosts: - if host.connect_to in callback_facts.keys() and ( - ('common' in callback_facts[host.connect_to].keys() and - callback_facts[host.connect_to]['common'].get('version', '') and - callback_facts[host.connect_to]['common'].get('version', '') != 'None') \ - or - ('master' in callback_facts[host.connect_to].keys() and - callback_facts[host.connect_to]['master'].get('cluster_method', '') == 'native') - ): + if host.connect_to in callback_facts.keys() and is_installed_host(host, callback_facts): installed_hosts.append(host) return installed_hosts +def is_installed_host(host, callback_facts): + return 'common' in callback_facts[host.connect_to].keys() and \ + callback_facts[host.connect_to]['common'].get('version', '') and \ + callback_facts[host.connect_to]['common'].get('version', '') != 'None' + # pylint: disable=too-many-branches # This pylint error will be corrected shortly in separate PR. def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose): diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py index 4861b1b1b..b1af21773 100644 --- a/utils/src/ooinstall/oo_config.py +++ b/utils/src/ooinstall/oo_config.py @@ -147,7 +147,8 @@ class OOConfig(object): raise OOConfigFileError('Cannot open config file "{}": {}'.format(ferr.filename, ferr.strerror)) except yaml.scanner.ScannerError: - raise OOConfigFileError('Config file "{}" is not a valid YAML document'.format(self.config_path)) + raise OOConfigFileError( + 'Config file "{}" is not a valid YAML document'.format(self.config_path)) def _upgrade_legacy_config(self): new_hosts = [] @@ -180,7 +181,8 @@ class OOConfig(object): if not os.path.exists(self.settings['ansible_inventory_directory']): os.makedirs(self.settings['ansible_inventory_directory']) if 'ansible_plugins_directory' not in self.settings: - self.settings['ansible_plugins_directory'] = resource_filename(__name__, 'ansible_plugins') + self.settings['ansible_plugins_directory'] = \ + resource_filename(__name__, 'ansible_plugins') if 'version' not in self.settings: self.settings['version'] = 'v1' @@ -191,7 +193,8 @@ class OOConfig(object): if 'ansible_ssh_user' not in self.settings: self.settings['ansible_ssh_user'] = '' - self.settings['ansible_inventory_path'] = '{}/hosts'.format(self.settings['ansible_inventory_directory']) + self.settings['ansible_inventory_path'] = \ + '{}/hosts'.format(self.settings['ansible_inventory_directory']) # clean up any empty sets for setting in self.settings.keys(): diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index ec49c9601..64875dbe9 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -113,9 +113,11 @@ def write_inventory_vars(base_inventory, multiple_masters, proxy): if multiple_masters and proxy is not None: base_inventory.write('openshift_master_cluster_method=native\n') base_inventory.write("openshift_master_cluster_hostname={}\n".format(proxy.hostname)) - base_inventory.write("openshift_master_cluster_public_hostname={}\n".format(proxy.public_hostname)) + base_inventory.write( + "openshift_master_cluster_public_hostname={}\n".format(proxy.public_hostname)) if CFG.settings.get('master_routingconfig_subdomain', False): - base_inventory.write("osm_default_subdomain={}\n".format(CFG.settings['master_routingconfig_subdomain'])) + base_inventory.write( + "osm_default_subdomain={}\n".format(CFG.settings['master_routingconfig_subdomain'])) @@ -247,7 +249,8 @@ def run_upgrade_playbook(old_version, new_version, verbose=False): 'playbooks/byo/openshift-cluster/upgrades/v{}_minor/upgrade.yml'.format(new_version)) else: playbook = os.path.join(CFG.settings['ansible_playbook_directory'], - 'playbooks/byo/openshift-cluster/upgrades/v{}_to_v{}/upgrade.yml'.format(old_version, new_version)) + 'playbooks/byo/openshift-cluster/upgrades/v{}_to_v{}/upgrade.yml'.format(old_version, + new_version)) # TODO: Upgrade inventory for upgrade? inventory_file = generate_inventory(CFG.hosts) facts_env = os.environ.copy() |