diff options
55 files changed, 653 insertions, 921 deletions
diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 000000000..001bfdc39 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,15 @@ +sudo: false + +language: python +python: + - "2.7" + +install: + - pip install -r requirements.txt + +script: + # TODO(rhcarvalho): check syntax of other important entrypoint playbooks + - ansible-playbook --syntax-check playbooks/byo/config.yml + # TODO(rhcarvalho): update make ci to pick up these tests + - nosetests --tests=test + - cd utils && make ci @@ -1,4 +1,5 @@ [![Join the chat at https://gitter.im/openshift/openshift-ansible](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/openshift/openshift-ansible) +[![Build Status](https://travis-ci.org/openshift/openshift-ansible.svg?branch=master)](https://travis-ci.org/openshift/openshift-ansible) # OpenShift Ansible diff --git a/README_CONTAINERIZED_INSTALLATION.md b/README_CONTAINERIZED_INSTALLATION.md index c615154ef..0a0ebb836 100644 --- a/README_CONTAINERIZED_INSTALLATION.md +++ b/README_CONTAINERIZED_INSTALLATION.md @@ -31,7 +31,7 @@ native clients. The wrapper scripts mount a limited subset of paths, _~/.kube_, _/etc/origin/_, and _/tmp_. Be mindful of this when passing in files to be processed by `oc` or `oadm`. You may find it easier to redirect input like this : - + `oc create -f - < my_file.json` ## Technical Notes @@ -48,18 +48,18 @@ before attempting to pull any of the following images. openshift/origin openshift/node (node + openshift-sdn + openvswitch rpm for client tools) openshift/openvswitch (centos7 + openvswitch rpm, runs ovsdb ovsctl processes) - registry.access.redhat.com/rhel7/etcd + registry.access.redhat.com/rhel7/etcd3 OpenShift Enterprise openshift3/ose openshift3/node openshift3/openvswitch - registry.access.redhat.com/rhel7/etcd + registry.access.redhat.com/rhel7/etcd3 Atomic Enterprise Platform aep3/aep aep3/node aep3/openvswitch - registry.access.redhat.com/rhel7/etcd - + registry.access.redhat.com/rhel7/etcd3 + * note openshift3/* and aep3/* images come from registry.access.redhat.com and rely on the --additional-repository flag being set appropriately. diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index 93fdd5ae4..38bc3ad6b 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -608,8 +608,8 @@ class FilterModule(object): host_type=_get_tag_value(host['group_names'], 'host-type'), sub_host_type=_get_tag_value(host['group_names'], 'sub-host-type'), host={'name': host['inventory_hostname'], - 'public IP': host['ansible_ssh_host'], - 'private IP': host['ansible_default_ipv4']['address']}) + 'public IP': host['oo_public_ipv4'], + 'private IP': host['oo_private_ipv4']}) except KeyError: pass return clusters @@ -889,11 +889,32 @@ class FilterModule(object): 'servers': FilterModule.oo_haproxy_backend_masters(servers_hostvars, nuage_rest_port)}) return loadbalancer_backends + @staticmethod + def oo_chomp_commit_offset(version): + """Chomp any "+git.foo" commit offset string from the given `version` + and return the modified version string. + + Ex: + - chomp_commit_offset(None) => None + - chomp_commit_offset(1337) => "1337" + - chomp_commit_offset("v3.4.0.15+git.derp") => "v3.4.0.15" + - chomp_commit_offset("v3.4.0.15") => "v3.4.0.15" + - chomp_commit_offset("v1.3.0+52492b4") => "v1.3.0" + """ + if version is None: + return version + else: + # Stringify, just in case it's a Number type. Split by '+' and + # return the first split. No concerns about strings without a + # '+', .split() returns an array of the original string. + return str(version).split('+')[0] + def filters(self): """ returns a mapping of filters to methods """ return { "oo_select_keys": self.oo_select_keys, "oo_select_keys_from_list": self.oo_select_keys_from_list, + "oo_chomp_commit_offset": self.oo_chomp_commit_offset, "oo_collect": self.oo_collect, "oo_flatten": self.oo_flatten, "oo_pdb": self.oo_pdb, diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 9ed1542cf..4961d23ef 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -13,7 +13,7 @@ URL: https://github.com/openshift/openshift-ansible Source0: https://github.com/openshift/openshift-ansible/archive/%{commit}/%{name}-%{version}.tar.gz BuildArch: noarch -Requires: ansible >= 2.1.0.0 +Requires: ansible >= 2.2.0.0-1 Requires: python2 Requires: openshift-ansible-docs = %{version}-%{release} diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 789f66b14..4ea639cbe 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -338,6 +338,7 @@ failed_when: False with_items: - etcd + - etcd3 - firewalld - name: Stop additional atomic services @@ -352,6 +353,7 @@ when: not is_atomic | bool with_items: - etcd + - etcd3 - shell: systemctl reset-failed changed_when: False @@ -365,6 +367,7 @@ - /etc/ansible/facts.d/openshift.fact - /etc/etcd - /etc/systemd/system/etcd_container.service + - /etc/profile.d/etcdctl.sh # Intenationally using rm command over file module because if someone had mounted a filesystem # at /var/lib/etcd then the contents was not removed correctly diff --git a/playbooks/aws/openshift-cluster/list.yml b/playbooks/aws/openshift-cluster/list.yml index 4934ae6d0..ed8aac398 100644 --- a/playbooks/aws/openshift-cluster/list.yml +++ b/playbooks/aws/openshift-cluster/list.yml @@ -16,11 +16,8 @@ groups: oo_list_hosts ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_become: "{{ deployment_vars[deployment_type].become }}" + oo_public_ipv4: "{{ hostvars[item].ec2_ip_address }}" + oo_private_ipv4: "{{ hostvars[item].ec2_private_ip_address }}" with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}" - -- name: List Hosts - hosts: oo_list_hosts - gather_facts: no - tasks: - debug: - msg: "public ip:{{ hostvars[inventory_hostname].ec2_ip_address }} private ip:{{ hostvars[inventory_hostname].ec2_private_ip_address }}" + msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}" diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml index 381e3ed8f..834461e14 100644 --- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -1,6 +1,6 @@ - name: Check for appropriate Docker versions - hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config roles: - openshift_facts tasks: @@ -19,19 +19,19 @@ # don't want to carry on, potentially taking out every node. The playbook can safely be re-run # and will not take any action on a node already running the requested docker version. - name: Evacuate and upgrade nodes - hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config serial: 1 any_errors_fatal: true tasks: - name: Prepare for Node evacuation command: > - {{ openshift.common.client_binary }} adm manage-node {{ openshift.common.hostname | lower }} --schedulable=false + {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=false delegate_to: "{{ groups.oo_first_master.0 }}" when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade - name: Evacuate Node for Kubelet upgrade command: > - {{ openshift.common.client_binary }} adm manage-node {{ openshift.common.hostname | lower }} --evacuate --force + {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --evacuate --force delegate_to: "{{ groups.oo_first_master.0 }}" when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade @@ -40,7 +40,7 @@ - name: Set node schedulability command: > - {{ openshift.common.client_binary }} adm manage-node {{ openshift.common.hostname | lower }} --schedulable=true + {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=true delegate_to: "{{ groups.oo_first_master.0 }}" when: openshift.node.schedulable | bool when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade and openshift.node.schedulable | bool diff --git a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml new file mode 100644 index 000000000..c25f96212 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml @@ -0,0 +1,26 @@ +--- +- include: ../../../common/openshift-cluster/verify_ansible_version.yml + +- name: Create initial host groups for localhost + hosts: localhost + connection: local + become: no + gather_facts: no + tags: + - always + tasks: + - include_vars: ../cluster_hosts.yml + - add_host: + name: "{{ item }}" + groups: l_oo_all_hosts + with_items: "{{ g_all_hosts | default([]) }}" + +- name: Create initial host groups for all hosts + hosts: l_oo_all_hosts + gather_facts: no + tags: + - always + tasks: + - include_vars: ../cluster_hosts.yml + +- include: ../../../common/openshift-cluster/upgrades/etcd/main.yml diff --git a/playbooks/common/openshift-cluster/redeploy-certificates.yml b/playbooks/common/openshift-cluster/redeploy-certificates.yml index 74147fe01..5f008a045 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates.yml @@ -212,7 +212,7 @@ - name: Determine if node is currently scheduleable command: > {{ openshift.common.client_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig - get node {{ openshift.common.hostname | lower }} -o json + get node {{ openshift.node.nodename }} -o json register: node_output when: openshift_certificates_redeploy_ca | default(false) | bool delegate_to: "{{ groups.oo_first_master.0 }}" @@ -225,7 +225,7 @@ - name: Prepare for node evacuation command: > {{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig - manage-node {{ openshift.common.hostname | lower }} + manage-node {{ openshift.node.nodename }} --schedulable=false delegate_to: "{{ groups.oo_first_master.0 }}" when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool @@ -233,7 +233,7 @@ - name: Evacuate node command: > {{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig - manage-node {{ openshift.common.hostname | lower }} + manage-node {{ openshift.node.nodename }} --evacuate --force delegate_to: "{{ groups.oo_first_master.0 }}" when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool @@ -241,7 +241,7 @@ - name: Set node schedulability command: > {{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig - manage-node {{ openshift.common.hostname | lower }} --schedulable=true + manage-node {{ openshift.node.nodename }} --schedulable=true delegate_to: "{{ groups.oo_first_master.0 }}" when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml new file mode 100644 index 000000000..57b156b1c --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml @@ -0,0 +1,73 @@ +- name: Backup etcd + hosts: etcd_hosts_to_backup + vars: + embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" + timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" + roles: + - openshift_facts + tasks: + # Ensure we persist the etcd role for this host in openshift_facts + - openshift_facts: + role: etcd + local_facts: {} + when: "'etcd' not in openshift" + + - stat: path=/var/lib/openshift + register: var_lib_openshift + + - stat: path=/var/lib/origin + register: var_lib_origin + + - name: Create origin symlink if necessary + file: src=/var/lib/openshift/ dest=/var/lib/origin state=link + when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False + + # TODO: replace shell module with command and update later checks + # We assume to be using the data dir for all backups. + - name: Check available disk space for etcd backup + shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1 + register: avail_disk + + # TODO: replace shell module with command and update later checks + - name: Check current embedded etcd disk usage + shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1 + register: etcd_disk_usage + when: embedded_etcd | bool + + - name: Abort if insufficient disk space for etcd backup + fail: + msg: > + {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup, + {{ avail_disk.stdout }} Kb available. + when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int) + + - name: Install etcd (for etcdctl) + action: "{{ ansible_pkg_mgr }} name=etcd state=present" + when: not openshift.common.is_atomic | bool + + - name: Generate etcd backup + command: > + etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }} + --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ backup_tag | default('') }}{{ timestamp }} + + - set_fact: + etcd_backup_complete: True + + - name: Display location of etcd backup + debug: + msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ backup_tag | default('') }}{{ timestamp }}" + +- name: Gate on etcd backup + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + etcd_backup_completed: "{{ hostvars + | oo_select_keys(groups.etcd_hosts_to_backup) + | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}" + - set_fact: + etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}" + when: etcd_backup_failed | length > 0 diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml new file mode 100644 index 000000000..35f391f8c --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml @@ -0,0 +1,47 @@ +--- +- name: Verify cluster is healthy pre-upgrade + command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health" + +- name: Get current image + shell: grep 'ExecStart=' /etc/systemd/system/etcd_container.service | awk '{print $NF}' + register: current_image + +- name: Set new_etcd_image + set_fact: + new_etcd_image: "{{ current_image.stdout | regex_replace('/etcd.*$','/etcd3:' ~ upgrade_version ) if upgrade_version | version_compare('3.0','>=') + else current_image.stdout.split(':')[0] ~ ':' ~ upgrade_version }}" + +- name: Pull new etcd image + command: "docker pull {{ new_etcd_image }}" + +- name: Update to latest etcd image + replace: + dest: /etc/systemd/system/etcd_container.service + regexp: "{{ current_image.stdout }}$" + replace: "{{ new_etcd_image }}" + +- name: Restart etcd_container + systemd: + name: etcd_container + daemon_reload: yes + state: restarted + +## TODO: probably should just move this into the backup playbooks, also this +## will fail on atomic host. We need to revisit how to do etcd backups there as +## the container may be newer than etcdctl on the host. Assumes etcd3 obsoletes etcd (7.3.1) +- name: Upgrade etcd for etcdctl when not atomic + action: "{{ ansible_pkg_mgr }} name=etcd ensure=latest" + when: not openshift.common.is_atomic | bool + +- name: Verify cluster is healthy + command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health" + register: etcdctl + until: etcdctl.rc == 0 + retries: 3 + delay: 10 + +- name: Store new etcd_image + openshift_facts: + role: etcd + local_facts: + etcd_image: "{{ new_etcd_image }}" diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml new file mode 100644 index 000000000..30232110e --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml @@ -0,0 +1,23 @@ +--- +# F23 GA'd with etcd 2.0, currently has 2.2 in updates +# F24 GA'd with etcd-2.2, currently has 2.2 in updates +# F25 Beta currently has etcd 3.0 +- name: Verify cluster is healthy pre-upgrade + command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health" + +- name: Update etcd + package: + name: "etcd" + state: "latest" + +- name: Restart etcd + service: + name: etcd + state: restarted + +- name: Verify cluster is healthy + command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health" + register: etcdctl + until: etcdctl.rc == 0 + retries: 3 + delay: 10 diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh b/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh new file mode 120000 index 000000000..641e04e44 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh @@ -0,0 +1 @@ +../roles/etcd/files/etcdctl.sh
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins b/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins new file mode 120000 index 000000000..27ddaa18b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins @@ -0,0 +1 @@ +../../../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins new file mode 120000 index 000000000..cf407f69b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins @@ -0,0 +1 @@ +../../../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml new file mode 100644 index 000000000..cce844403 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml @@ -0,0 +1,122 @@ +--- +# For 1.4/3.4 we want to upgrade everyone to etcd-3.0. etcd docs say to +# upgrade from 2.0.x to 2.1.x to 2.2.x to 2.3.x to 3.0.x. While this is a tedius +# task for RHEL and CENTOS it's simply not possible in Fedora unless you've +# mirrored packages on your own because only the GA and latest versions are +# available in the repos. So for Fedora we'll simply skip this, sorry. + +- include: ../../evaluate_groups.yml + tags: + - always + +- name: Evaluate additional groups for upgrade + hosts: localhost + connection: local + become: no + tasks: + - name: Evaluate etcd_hosts_to_upgrade + add_host: + name: "{{ item }}" + groups: etcd_hosts_to_upgrade, etcd_hosts_to_backup + with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}" + +- name: Backup etcd before upgrading anything + include: backup.yml + vars: + backup_tag: "pre-upgrade-" + +- name: Drop etcdctl profiles + hosts: etcd_hosts_to_upgrade + tasks: + - include: roles/etcd/tasks/etcdctl.yml + +- name: Determine etcd version + hosts: etcd_hosts_to_upgrade + tasks: + - name: Record RPM based etcd version + command: rpm -qa --qf '%{version}' etcd\* + register: etcd_installed_version + failed_when: false + when: not openshift.common.is_containerized | bool + - name: Record containerized etcd version + command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\* + register: etcd_installed_version + failed_when: false + when: openshift.common.is_containerized | bool + +# I really dislike this copy/pasta but I wasn't able to find a way to get it to loop +# through hosts, then loop through tasks only when appropriate +- name: Upgrade to 2.1 + hosts: etcd_hosts_to_upgrade + serial: 1 + vars: + upgrade_version: '2.1' + tasks: + - include: rhel_tasks.yml + when: etcd_installed_version.stdout | default('99') | version_compare('2.1','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool + +- name: Upgrade RPM hosts to 2.2 + hosts: etcd_hosts_to_upgrade + serial: 1 + vars: + upgrade_version: '2.2' + tasks: + - include: rhel_tasks.yml + when: etcd_installed_version.stdout | default('99') | version_compare('2.2','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool + +- name: Upgrade containerized hosts to 2.2.5 + hosts: etcd_hosts_to_upgrade + serial: 1 + vars: + upgrade_version: 2.2.5 + tasks: + - include: containerized_tasks.yml + when: etcd_installed_version.stdout | default('99') | version_compare('2.2','<') and openshift.common.is_containerized | bool + +- name: Upgrade RPM hosts to 2.3 + hosts: etcd_hosts_to_upgrade + serial: 1 + vars: + upgrade_version: '2.3' + tasks: + - include: rhel_tasks.yml + when: etcd_installed_version.stdout | default('99') | version_compare('2.3','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool + +- name: Upgrade containerized hosts to 2.3.7 + hosts: etcd_hosts_to_upgrade + serial: 1 + vars: + upgrade_version: 2.3.7 + tasks: + - include: containerized_tasks.yml + when: etcd_installed_version.stdout | default('99') | version_compare('2.3','<') and openshift.common.is_containerized | bool + +- name: Upgrade RPM hosts to 3.0 + hosts: etcd_hosts_to_upgrade + serial: 1 + vars: + upgrade_version: '3.0' + tasks: + - include: rhel_tasks.yml + when: etcd_installed_version.stdout | default('99') | version_compare('3.0','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool + +- name: Upgrade containerized hosts to etcd3 image + hosts: etcd_hosts_to_upgrade + serial: 1 + vars: + upgrade_version: 3.0.3 + tasks: + - include: containerized_tasks.yml + when: etcd_installed_version.stdout | default('99') | version_compare('3.0','<') and openshift.common.is_containerized | bool + +- name: Upgrade fedora to latest + hosts: etcd_hosts_to_upgrade + serial: 1 + tasks: + - include: fedora_tasks.yml + when: ansible_distribution == 'Fedora' and not openshift.common.is_containerized | bool + +- name: Backup etcd + include: backup.yml + vars: + backup_tag: "post-3.0-" diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml new file mode 100644 index 000000000..8e7dc9d9b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml @@ -0,0 +1,23 @@ +--- +- name: Verify cluster is healthy pre-upgrade + command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health" + +- name: Update etcd package but exclude etcd3 + command: "{{ ansible_pkg_mgr }} install -y etcd-{{ upgrade_version }}\\* --exclude etcd3" + when: upgrade_version | version_compare('3.0','<') + +- name: Update etcd package not excluding etcd3 + command: "{{ ansible_pkg_mgr }} install -y etcd3-{{ upgrade_version }}\\*" + when: not upgrade_version | version_compare('3.0','<') + +- name: Restart etcd + service: + name: etcd + state: restarted + +- name: Verify cluster is healthy + command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health" + register: etcdctl + until: etcdctl.rc == 0 + retries: 3 + delay: 10 diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/roles b/playbooks/common/openshift-cluster/upgrades/etcd/roles new file mode 120000 index 000000000..6bc1a7aef --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/etcd/roles @@ -0,0 +1 @@ +../../../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 927d9b4ca..57c25aa41 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -13,80 +13,22 @@ groups: etcd_hosts_to_backup with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}" -- name: Backup etcd - hosts: etcd_hosts_to_backup - vars: - embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" - timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" +# If facts cache were for some reason deleted, this fact may not be set, and if not set +# it will always default to true. This causes problems for the etcd data dir fact detection +# so we must first make sure this is set correctly before attempting the backup. +- name: Set master embedded_etcd fact + hosts: oo_masters_to_config roles: - openshift_facts tasks: - # Ensure we persist the etcd role for this host in openshift_facts - openshift_facts: - role: etcd - local_facts: {} - when: "'etcd' not in openshift" - - - stat: path=/var/lib/openshift - register: var_lib_openshift - - - stat: path=/var/lib/origin - register: var_lib_origin - - - name: Create origin symlink if necessary - file: src=/var/lib/openshift/ dest=/var/lib/origin state=link - when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False - - # TODO: replace shell module with command and update later checks - # We assume to be using the data dir for all backups. - - name: Check available disk space for etcd backup - shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1 - register: avail_disk - - # TODO: replace shell module with command and update later checks - - name: Check current embedded etcd disk usage - shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1 - register: etcd_disk_usage - when: embedded_etcd | bool - - - name: Abort if insufficient disk space for etcd backup - fail: - msg: > - {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup, - {{ avail_disk.stdout }} Kb available. - when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int) - - - name: Install etcd (for etcdctl) - action: "{{ ansible_pkg_mgr }} name=etcd state=latest" - when: not openshift.common.is_atomic | bool - - - name: Generate etcd backup - command: > - etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }} - --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }} - - - set_fact: - etcd_backup_complete: True - - - name: Display location of etcd backup - debug: - msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}" - + role: master + local_facts: + embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" + debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level | default(2)) }}" -- name: Gate on etcd backup - hosts: localhost - connection: local - become: no - tasks: - - set_fact: - etcd_backup_completed: "{{ hostvars - | oo_select_keys(groups.etcd_hosts_to_backup) - | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}" - - set_fact: - etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}" - - fail: - msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}" - when: etcd_backup_failed | length > 0 +- name: Backup etcd + include: ./etcd/backup.yml - name: Upgrade master packages hosts: oo_masters_to_config @@ -215,6 +157,12 @@ when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool run_once: true + - name: Reconcile Jenkins Pipeline Role Bindings + command: > + {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm + run_once: true + when: openshift.common.version_gte_3_4_or_1_4 | bool + - name: Reconcile Security Context Constraints command: > {{ openshift.common.client_binary }} adm policy reconcile-sccs --confirm --additive-only=true diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index e66344f99..1f314c854 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -17,7 +17,7 @@ # we merge upgrade functionality into the base roles and a normal config.yml playbook run. - name: Determine if node is currently scheduleable command: > - {{ openshift.common.client_binary }} get node {{ openshift.common.hostname | lower }} -o json + {{ openshift.common.client_binary }} get node {{ openshift.node.nodename | lower }} -o json register: node_output delegate_to: "{{ groups.oo_first_master.0 }}" changed_when: false @@ -29,7 +29,7 @@ - name: Mark unschedulable if host is a node command: > - {{ openshift.common.client_binary }} adm manage-node {{ openshift.common.hostname | lower }} --schedulable=false + {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false delegate_to: "{{ groups.oo_first_master.0 }}" when: inventory_hostname in groups.oo_nodes_to_upgrade # NOTE: There is a transient "object has been modified" error here, allow a couple @@ -41,7 +41,7 @@ - name: Evacuate Node for Kubelet upgrade command: > - {{ openshift.common.client_binary }} adm manage-node {{ openshift.common.hostname | lower }} --evacuate --force + {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --evacuate --force delegate_to: "{{ groups.oo_first_master.0 }}" when: inventory_hostname in groups.oo_nodes_to_upgrade tasks: @@ -64,10 +64,12 @@ - name: Set node schedulability command: > - {{ openshift.common.client_binary }} adm manage-node {{ openshift.common.hostname | lower }} --schedulable=true + {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true delegate_to: "{{ groups.oo_first_master.0 }}" when: inventory_hostname in groups.oo_nodes_to_upgrade and was_schedulable | bool register: node_sched until: node_sched.rc == 0 retries: 3 delay: 1 + + diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index a53c55c14..5fcb850a2 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -53,7 +53,7 @@ when: openshift_hosted_metrics_deployer_prefix is not defined - set_fact: openshift_hosted_metrics_deployer_version: "{{ lookup('oo_option', 'openshift_hosted_metrics_deployer_version') | default('latest') }}" - when: openshift_hosted_metrics_deployer_prefix is not defined + when: openshift_hosted_metrics_deployer_version is not defined roles: - openshift_facts post_tasks: diff --git a/playbooks/gce/openshift-cluster/library/gce.py b/playbooks/gce/openshift-cluster/library/gce.py deleted file mode 100644 index fcaa3b850..000000000 --- a/playbooks/gce/openshift-cluster/library/gce.py +++ /dev/null @@ -1,543 +0,0 @@ -#!/usr/bin/python -# Copyright 2013 Google Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see <http://www.gnu.org/licenses/>. - -DOCUMENTATION = ''' ---- -module: gce -version_added: "1.4" -short_description: create or terminate GCE instances -description: - - Creates or terminates Google Compute Engine (GCE) instances. See - U(https://cloud.google.com/products/compute-engine) for an overview. - Full install/configuration instructions for the gce* modules can - be found in the comments of ansible/test/gce_tests.py. -options: - image: - description: - - image string to use for the instance - required: false - default: "debian-7" - instance_names: - description: - - a comma-separated list of instance names to create or destroy - required: false - default: null - machine_type: - description: - - machine type to use for the instance, use 'n1-standard-1' by default - required: false - default: "n1-standard-1" - metadata: - description: - - a hash/dictionary of custom data for the instance; - '{"key":"value", ...}' - required: false - default: null - service_account_email: - version_added: "1.5.1" - description: - - service account email - required: false - default: null - service_account_permissions: - version_added: "2.0" - description: - - service account permissions (see - U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create), - --scopes section for detailed information) - required: false - default: null - choices: [ - "bigquery", "cloud-platform", "compute-ro", "compute-rw", - "computeaccounts-ro", "computeaccounts-rw", "datastore", "logging-write", - "monitoring", "sql", "sql-admin", "storage-full", "storage-ro", - "storage-rw", "taskqueue", "userinfo-email" - ] - pem_file: - version_added: "1.5.1" - description: - - path to the pem file associated with the service account email - required: false - default: null - project_id: - version_added: "1.5.1" - description: - - your GCE project ID - required: false - default: null - name: - description: - - identifier when working with a single instance - required: false - network: - description: - - name of the network, 'default' will be used if not specified - required: false - default: "default" - persistent_boot_disk: - description: - - if set, create the instance with a persistent boot disk - required: false - default: "false" - disks: - description: - - a list of persistent disks to attach to the instance; a string value - gives the name of the disk; alternatively, a dictionary value can - define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry - will be the boot disk (which must be READ_WRITE). - required: false - default: null - version_added: "1.7" - state: - description: - - desired state of the resource - required: false - default: "present" - choices: ["active", "present", "absent", "deleted"] - tags: - description: - - a comma-separated list of tags to associate with the instance - required: false - default: null - zone: - description: - - the GCE zone to use - required: true - default: "us-central1-a" - ip_forward: - version_added: "1.9" - description: - - set to true if the instance can forward ip packets (useful for - gateways) - required: false - default: "false" - external_ip: - version_added: "1.9" - description: - - type of external ip, ephemeral by default - required: false - default: "ephemeral" - disk_auto_delete: - version_added: "1.9" - description: - - if set boot disk will be removed after instance destruction - required: false - default: "true" - -requirements: - - "python >= 2.6" - - "apache-libcloud >= 0.13.3" -notes: - - Either I(name) or I(instance_names) is required. -author: "Eric Johnson (@erjohnso) <erjohnso@google.com>" -''' - -EXAMPLES = ''' -# Basic provisioning example. Create a single Debian 7 instance in the -# us-central1-a Zone of n1-standard-1 machine type. -- local_action: - module: gce - name: test-instance - zone: us-central1-a - machine_type: n1-standard-1 - image: debian-7 - -# Example using defaults and with metadata to create a single 'foo' instance -- local_action: - module: gce - name: foo - metadata: '{"db":"postgres", "group":"qa", "id":500}' - - -# Launch instances from a control node, runs some tasks on the new instances, -# and then terminate them -- name: Create a sandbox instance - hosts: localhost - vars: - names: foo,bar - machine_type: n1-standard-1 - image: debian-6 - zone: us-central1-a - service_account_email: unique-email@developer.gserviceaccount.com - pem_file: /path/to/pem_file - project_id: project-id - tasks: - - name: Launch instances - local_action: gce instance_names={{names}} machine_type={{machine_type}} - image={{image}} zone={{zone}} - service_account_email={{ service_account_email }} - pem_file={{ pem_file }} project_id={{ project_id }} - register: gce - - name: Wait for SSH to come up - local_action: wait_for host={{item.public_ip}} port=22 delay=10 - timeout=60 state=started - with_items: {{gce.instance_data}} - -- name: Configure instance(s) - hosts: launched - sudo: True - roles: - - my_awesome_role - - my_awesome_tasks - -- name: Terminate instances - hosts: localhost - connection: local - tasks: - - name: Terminate instances that were previously launched - local_action: - module: gce - state: 'absent' - instance_names: {{gce.instance_names}} - -''' - -try: - import libcloud - from libcloud.compute.types import Provider - from libcloud.compute.providers import get_driver - from libcloud.common.google import GoogleBaseError, QuotaExceededError, \ - ResourceExistsError, ResourceInUseError, ResourceNotFoundError - _ = Provider.GCE - HAS_LIBCLOUD = True -except ImportError: - HAS_LIBCLOUD = False - -try: - from ast import literal_eval - HAS_PYTHON26 = True -except ImportError: - HAS_PYTHON26 = False - - -def get_instance_info(inst): - """Retrieves instance information from an instance object and returns it - as a dictionary. - - """ - metadata = {} - if 'metadata' in inst.extra and 'items' in inst.extra['metadata']: - for md in inst.extra['metadata']['items']: - metadata[md['key']] = md['value'] - - try: - netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1] - except: - netname = None - if 'disks' in inst.extra: - disk_names = [disk_info['source'].split('/')[-1] - for disk_info - in sorted(inst.extra['disks'], - key=lambda disk_info: disk_info['index'])] - else: - disk_names = [] - - if len(inst.public_ips) == 0: - public_ip = None - else: - public_ip = inst.public_ips[0] - - return({ - 'image': inst.image is not None and inst.image.split('/')[-1] or None, - 'disks': disk_names, - 'machine_type': inst.size, - 'metadata': metadata, - 'name': inst.name, - 'network': netname, - 'private_ip': inst.private_ips[0], - 'public_ip': public_ip, - 'status': ('status' in inst.extra) and inst.extra['status'] or None, - 'tags': ('tags' in inst.extra) and inst.extra['tags'] or [], - 'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None, - }) - - -def create_instances(module, gce, instance_names): - """Creates new instances. Attributes other than instance_names are picked - up from 'module' - - module : AnsibleModule object - gce: authenticated GCE libcloud driver - instance_names: python list of instance names to create - - Returns: - A list of dictionaries with instance information - about the instances that were launched. - - """ - image = module.params.get('image') - machine_type = module.params.get('machine_type') - metadata = module.params.get('metadata') - network = module.params.get('network') - persistent_boot_disk = module.params.get('persistent_boot_disk') - disks = module.params.get('disks') - state = module.params.get('state') - tags = module.params.get('tags') - zone = module.params.get('zone') - ip_forward = module.params.get('ip_forward') - external_ip = module.params.get('external_ip') - disk_auto_delete = module.params.get('disk_auto_delete') - service_account_permissions = module.params.get('service_account_permissions') - service_account_email = module.params.get('service_account_email') - - if external_ip == "none": - external_ip = None - - new_instances = [] - changed = False - - lc_image = gce.ex_get_image(image) - lc_disks = [] - disk_modes = [] - for i, disk in enumerate(disks or []): - if isinstance(disk, dict): - lc_disks.append(gce.ex_get_volume(disk['name'])) - disk_modes.append(disk['mode']) - else: - lc_disks.append(gce.ex_get_volume(disk)) - # boot disk is implicitly READ_WRITE - disk_modes.append('READ_ONLY' if i > 0 else 'READ_WRITE') - lc_network = gce.ex_get_network(network) - lc_machine_type = gce.ex_get_size(machine_type) - lc_zone = gce.ex_get_zone(zone) - - # Try to convert the user's metadata value into the format expected - # by GCE. First try to ensure user has proper quoting of a - # dictionary-like syntax using 'literal_eval', then convert the python - # dict into a python list of 'key' / 'value' dicts. Should end up - # with: - # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...] - if metadata: - if isinstance(metadata, dict): - md = metadata - else: - try: - md = literal_eval(str(metadata)) - if not isinstance(md, dict): - raise ValueError('metadata must be a dict') - except ValueError as e: - module.fail_json(msg='bad metadata: %s' % str(e)) - except SyntaxError as e: - module.fail_json(msg='bad metadata syntax') - - if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15': - items = [] - for k, v in md.items(): - items.append({"key": k, "value": v}) - metadata = {'items': items} - else: - metadata = md - - ex_sa_perms = [] - bad_perms = [] - if service_account_permissions: - for perm in service_account_permissions: - if perm not in gce.SA_SCOPES_MAP.keys(): - bad_perms.append(perm) - if len(bad_perms) > 0: - module.fail_json(msg='bad permissions: %s' % str(bad_perms)) - if service_account_email: - ex_sa_perms.append({'email': service_account_email}) - else: - ex_sa_perms.append({'email': "default"}) - ex_sa_perms[0]['scopes'] = service_account_permissions - - # These variables all have default values but check just in case - if not lc_image or not lc_network or not lc_machine_type or not lc_zone: - module.fail_json(msg='Missing required create instance variable', - changed=False) - - for name in instance_names: - pd = None - if lc_disks: - pd = lc_disks[0] - elif persistent_boot_disk: - try: - pd = gce.create_volume(None, "%s" % name, image=lc_image) - except ResourceExistsError: - pd = gce.ex_get_volume("%s" % name, lc_zone) - inst = None - try: - inst = gce.create_node( - name, lc_machine_type, lc_image, location=lc_zone, - ex_network=network, ex_tags=tags, ex_metadata=metadata, - ex_boot_disk=pd, ex_can_ip_forward=ip_forward, - external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete, - ex_service_accounts=ex_sa_perms - ) - changed = True - except ResourceExistsError: - inst = gce.ex_get_node(name, lc_zone) - except GoogleBaseError as e: - module.fail_json(msg='Unexpected error attempting to create ' + - 'instance %s, error: %s' % (name, e.value)) - - for i, lc_disk in enumerate(lc_disks): - # Check whether the disk is already attached - if (len(inst.extra['disks']) > i): - attached_disk = inst.extra['disks'][i] - if attached_disk['source'] != lc_disk.extra['selfLink']: - module.fail_json( - msg=("Disk at index %d does not match: requested=%s found=%s" % ( - i, lc_disk.extra['selfLink'], attached_disk['source']))) - elif attached_disk['mode'] != disk_modes[i]: - module.fail_json( - msg=("Disk at index %d is in the wrong mode: requested=%s found=%s" % ( - i, disk_modes[i], attached_disk['mode']))) - else: - continue - gce.attach_volume(inst, lc_disk, ex_mode=disk_modes[i]) - # Work around libcloud bug: attached volumes don't get added - # to the instance metadata. get_instance_info() only cares about - # source and index. - if len(inst.extra['disks']) != i+1: - inst.extra['disks'].append( - {'source': lc_disk.extra['selfLink'], 'index': i}) - - if inst: - new_instances.append(inst) - - instance_names = [] - instance_json_data = [] - for inst in new_instances: - d = get_instance_info(inst) - instance_names.append(d['name']) - instance_json_data.append(d) - - return (changed, instance_json_data, instance_names) - - -def terminate_instances(module, gce, instance_names, zone_name): - """Terminates a list of instances. - - module: Ansible module object - gce: authenticated GCE connection object - instance_names: a list of instance names to terminate - zone_name: the zone where the instances reside prior to termination - - Returns a dictionary of instance names that were terminated. - - """ - changed = False - terminated_instance_names = [] - for name in instance_names: - inst = None - try: - inst = gce.ex_get_node(name, zone_name) - except ResourceNotFoundError: - pass - except Exception as e: - module.fail_json(msg=unexpected_error_msg(e), changed=False) - if inst: - gce.destroy_node(inst) - terminated_instance_names.append(inst.name) - changed = True - - return (changed, terminated_instance_names) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - image=dict(default='debian-7'), - instance_names=dict(), - machine_type=dict(default='n1-standard-1'), - metadata=dict(), - name=dict(), - network=dict(default='default'), - persistent_boot_disk=dict(type='bool', default=False), - disks=dict(type='list'), - state=dict(choices=['active', 'present', 'absent', 'deleted'], - default='present'), - tags=dict(type='list'), - zone=dict(default='us-central1-a'), - service_account_email=dict(), - service_account_permissions=dict(type='list'), - pem_file=dict(), - project_id=dict(), - ip_forward=dict(type='bool', default=False), - external_ip=dict(choices=['ephemeral', 'none'], - default='ephemeral'), - disk_auto_delete=dict(type='bool', default=True), - ) - ) - - if not HAS_PYTHON26: - module.fail_json(msg="GCE module requires python's 'ast' module, python v2.6+") - if not HAS_LIBCLOUD: - module.fail_json(msg='libcloud with GCE support (0.13.3+) required for this module') - - gce = gce_connect(module) - - image = module.params.get('image') - instance_names = module.params.get('instance_names') - machine_type = module.params.get('machine_type') - metadata = module.params.get('metadata') - name = module.params.get('name') - network = module.params.get('network') - persistent_boot_disk = module.params.get('persistent_boot_disk') - state = module.params.get('state') - tags = module.params.get('tags') - zone = module.params.get('zone') - ip_forward = module.params.get('ip_forward') - changed = False - - inames = [] - if isinstance(instance_names, list): - inames = instance_names - elif isinstance(instance_names, str): - inames = instance_names.split(',') - if name: - inames.append(name) - if not inames: - module.fail_json(msg='Must specify a "name" or "instance_names"', - changed=False) - if not zone: - module.fail_json(msg='Must specify a "zone"', changed=False) - - json_output = {'zone': zone} - if state in ['absent', 'deleted']: - json_output['state'] = 'absent' - (changed, terminated_instance_names) = terminate_instances( - module, gce, inames, zone) - - # based on what user specified, return the same variable, although - # value could be different if an instance could not be destroyed - if instance_names: - json_output['instance_names'] = terminated_instance_names - elif name: - json_output['name'] = name - - elif state in ['active', 'present']: - json_output['state'] = 'present' - (changed, instance_data, instance_name_list) = create_instances( - module, gce, inames) - json_output['instance_data'] = instance_data - if instance_names: - json_output['instance_names'] = instance_name_list - elif name: - json_output['name'] = name - - json_output['changed'] = changed - module.exit_json(**json_output) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.gce import * -if __name__ == '__main__': - main() diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml index 34dcd2496..34ab09533 100644 --- a/playbooks/gce/openshift-cluster/list.yml +++ b/playbooks/gce/openshift-cluster/list.yml @@ -16,18 +16,8 @@ groups: oo_list_hosts ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_become: "{{ deployment_vars[deployment_type].become }}" + oo_public_ipv4: "{{ hostvars[item].gce_public_ip }}" + oo_private_ipv4: "{{ hostvars[item].gce_private_ip }}" with_items: "{{ groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true)) }}" - -- name: List Hosts - hosts: oo_list_hosts - -- name: List Hosts - hosts: localhost - become: no - connection: local - gather_facts: no - vars_files: - - vars.yml - tasks: - debug: msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}" diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml index 7c8189224..b7604580c 100644 --- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml @@ -9,6 +9,7 @@ project_id: "{{ lookup('env', 'gce_project_id') }}" zone: "{{ lookup('env', 'zone') }}" network: "{{ lookup('env', 'network') }}" + subnetwork: "{{ lookup('env', 'subnetwork') | default(omit, True) }}" # unsupported in 1.9.+ #service_account_permissions: "datastore,logging-write" tags: diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml index 86d5d0aad..579cd7ac6 100644 --- a/playbooks/libvirt/openshift-cluster/list.yml +++ b/playbooks/libvirt/openshift-cluster/list.yml @@ -16,18 +16,8 @@ groups: oo_list_hosts ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_become: "{{ deployment_vars[deployment_type].become }}" + oo_public_ipv4: "" + oo_private_ipv4: "{{ hostvars[item].libvirt_ip_address }}" with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}" - -- name: List Hosts - hosts: oo_list_hosts - -- name: List Hosts - hosts: localhost - become: no - connection: local - gather_facts: no - vars_files: - - vars.yml - tasks: - debug: msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}" diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml index 458cf5ac7..20ce47c07 100644 --- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml +++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml @@ -45,7 +45,7 @@ parameters: node_port_incoming: type: string label: Source of node port connections - description: Authorized sources targetting node ports + description: Authorized sources targeting node ports default: 0.0.0.0/0 num_etcd: @@ -88,11 +88,6 @@ parameters: label: Infra image description: Name of the image for the infra node servers - dns_image: - type: string - label: DNS image - description: Name of the image for the DNS server - etcd_flavor: type: string label: Etcd flavor @@ -113,11 +108,6 @@ parameters: label: Infra flavor description: Flavor of the infra node servers - dns_flavor: - type: string - label: DNS flavor - description: Flavor of the DNS server - outputs: etcd_names: @@ -168,26 +158,6 @@ outputs: description: Floating IPs of the nodes value: { get_attr: [ infra_nodes, floating_ip ] } - dns_name: - description: Name of the DNS - value: - get_attr: - - dns - - name - - dns_floating_ip: - description: Floating IP of the DNS - value: - get_attr: - - dns - - addresses - - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: { get_param: cluster_id } - - 1 - - addr - resources: net: @@ -213,22 +183,7 @@ resources: template: subnet_24_prefix.0/24 params: subnet_24_prefix: { get_param: subnet_24_prefix } - allocation_pools: - - start: - str_replace: - template: subnet_24_prefix.3 - params: - subnet_24_prefix: { get_param: subnet_24_prefix } - end: - str_replace: - template: subnet_24_prefix.254 - params: - subnet_24_prefix: { get_param: subnet_24_prefix } - dns_nameservers: - - str_replace: - template: subnet_24_prefix.2 - params: - subnet_24_prefix: { get_param: subnet_24_prefix } + dns_nameservers: { get_param: dns_nameservers } router: type: OS::Neutron::Router @@ -428,44 +383,6 @@ resources: port_range_min: 443 port_range_max: 443 - dns-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-dns-secgrp - params: - cluster_id: { get_param: cluster_id } - description: - str_replace: - template: Security group for cluster_id cluster DNS - params: - cluster_id: { get_param: cluster_id } - rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: { get_param: ssh_incoming } - - direction: ingress - protocol: udp - port_range_min: 53 - port_range_max: 53 - remote_mode: remote_group_id - remote_group_id: { get_resource: etcd-secgrp } - - direction: ingress - protocol: udp - port_range_min: 53 - port_range_max: 53 - remote_mode: remote_group_id - remote_group_id: { get_resource: master-secgrp } - - direction: ingress - protocol: udp - port_range_min: 53 - port_range_max: 53 - remote_mode: remote_group_id - remote_group_id: { get_resource: node-secgrp } - etcd: type: OS::Heat::ResourceGroup properties: @@ -599,79 +516,3 @@ resources: cluster_id: { get_param: cluster_id } depends_on: - interface - - dns: - type: OS::Nova::Server - properties: - name: - str_replace: - template: cluster_id-dns - params: - cluster_id: { get_param: cluster_id } - key_name: { get_resource: keypair } - image: { get_param: dns_image } - flavor: { get_param: dns_flavor } - networks: - - port: { get_resource: dns-port } - user_data: { get_resource: dns-config } - user_data_format: RAW - - dns-port: - type: OS::Neutron::Port - properties: - network: { get_resource: net } - fixed_ips: - - subnet: { get_resource: subnet } - ip_address: - str_replace: - template: subnet_24_prefix.2 - params: - subnet_24_prefix: { get_param: subnet_24_prefix } - security_groups: - - { get_resource: dns-secgrp } - - dns-floating-ip: - type: OS::Neutron::FloatingIP - properties: - floating_network: { get_param: external_net } - port_id: { get_resource: dns-port } - - dns-config: - type: OS::Heat::MultipartMime - properties: - parts: - - config: - str_replace: - template: | - #cloud-config - disable_root: true - - system_info: - default_user: - name: openshift - sudo: ["ALL=(ALL) NOPASSWD: ALL"] - - write_files: - - path: /etc/sudoers.d/00-openshift-no-requiretty - permissions: 440 - content: | - Defaults:openshift !requiretty - - path: /etc/sysconfig/network-scripts/ifcfg-eth0 - content: | - DEVICE="eth0" - BOOTPROTO="dhcp" - DNS1="$dns1" - DNS2="$dns2" - PEERDNS="no" - ONBOOT="yes" - runcmd: - - [ "/usr/bin/systemctl", "restart", "network" ] - params: - $dns1: - get_param: - - dns_nameservers - - 0 - $dns2: - get_param: - - dns_nameservers - - 1 diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml index f83f2c984..435139849 100644 --- a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml +++ b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml @@ -107,7 +107,7 @@ resources: flavor: { get_param: flavor } networks: - port: { get_resource: port } - user_data: { get_file: user-data } + user_data: { get_resource: config } user_data_format: RAW metadata: environment: { get_param: cluster_env } @@ -128,3 +128,25 @@ resources: properties: floating_network: { get_param: floating_network } port_id: { get_resource: port } + + config: + type: OS::Heat::CloudConfig + properties: + cloud_config: + disable_root: true + + hostname: { get_param: name } + + system_info: + default_user: + name: openshift + sudo: ["ALL=(ALL) NOPASSWD: ALL"] + + write_files: + - path: /etc/sudoers.d/00-openshift-no-requiretty + permissions: 440 + # content: Defaults:openshift !requiretty + # Encoded in base64 to be sure that we do not forget the trailing newline or + # sudo will not be able to parse that file + encoding: b64 + content: RGVmYXVsdHM6b3BlbnNoaWZ0ICFyZXF1aXJldHR5Cg== diff --git a/playbooks/openstack/openshift-cluster/files/user-data b/playbooks/openstack/openshift-cluster/files/user-data deleted file mode 100644 index eb65f7cec..000000000 --- a/playbooks/openstack/openshift-cluster/files/user-data +++ /dev/null @@ -1,13 +0,0 @@ -#cloud-config -disable_root: true - -system_info: - default_user: - name: openshift - sudo: ["ALL=(ALL) NOPASSWD: ALL"] - -write_files: - - path: /etc/sudoers.d/00-openshift-no-requiretty - permissions: 440 - content: | - Defaults:openshift !requiretty diff --git a/playbooks/openstack/openshift-cluster/list.yml b/playbooks/openstack/openshift-cluster/list.yml index de68f5207..6c6f671be 100644 --- a/playbooks/openstack/openshift-cluster/list.yml +++ b/playbooks/openstack/openshift-cluster/list.yml @@ -17,18 +17,8 @@ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_ssh_host: "{{ hostvars[item].ansible_ssh_host | default(item) }}" ansible_become: "{{ deployment_vars[deployment_type].become }}" + oo_public_ipv4: "{{ hostvars[item].openstack.public_v4 }}" + oo_private_ipv4: "{{ hostvars[item].openstack.private_v4 }}" with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}" - -- name: List Hosts - hosts: oo_list_hosts - -- name: List Hosts - hosts: localhost - become: no - connection: local - gather_facts: no - vars_files: - - vars.yml - tasks: - debug: msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster('meta-') }}" diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 000000000..e55ef5f0b --- /dev/null +++ b/requirements.txt @@ -0,0 +1,2 @@ +ansible>=2.1 +pyOpenSSL diff --git a/roles/docker/README.md b/roles/docker/README.md index 6b5ee4421..1f0d94da0 100644 --- a/roles/docker/README.md +++ b/roles/docker/README.md @@ -17,7 +17,7 @@ docker_udev_workaround: raises udevd timeout to 5 minutes (https://bugzilla.redh Dependencies ------------ -None +Depends on the os_firewall role. Example Playbook ---------------- diff --git a/roles/docker/meta/main.yml b/roles/docker/meta/main.yml index 6e2c98601..3d362158d 100644 --- a/roles/docker/meta/main.yml +++ b/roles/docker/meta/main.yml @@ -9,4 +9,6 @@ galaxy_info: - name: EL versions: - 7 -dependencies: [] +dependencies: + - role: os_firewall + os_firewall_use_firewalld: False diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 7147aa2d4..9b7ef0830 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -1,7 +1,4 @@ --- -- stat: path=/etc/sysconfig/docker-storage - register: docker_storage_check - - name: Get current installed Docker version command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker" when: not openshift.common.is_atomic | bool @@ -46,15 +43,16 @@ action: "{{ ansible_pkg_mgr }} name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present" when: not openshift.common.is_atomic | bool -- name: Start the Docker service - service: - name: docker - enabled: yes - state: started - register: start_result +- name: Ensure docker.service.d directory exists + file: + path: "{{ docker_systemd_dir }}" + state: directory -- set_fact: - docker_service_status_changed: start_result | changed +# Extend the default Docker service unit file +- name: Configure Docker service unit file + template: + dest: "{{ docker_systemd_dir }}/custom.conf" + src: custom.conf.j2 - include: udev_workaround.yml when: docker_udev_workaround | default(False) | bool @@ -113,4 +111,15 @@ notify: - restart docker +- name: Start the Docker service + systemd: + name: docker + enabled: yes + state: started + daemon_reload: yes + register: start_result + +- set_fact: + docker_service_status_changed: start_result | changed + - meta: flush_handlers diff --git a/roles/docker/templates/custom.conf.j2 b/roles/docker/templates/custom.conf.j2 new file mode 100644 index 000000000..53ed56abc --- /dev/null +++ b/roles/docker/templates/custom.conf.j2 @@ -0,0 +1,5 @@ +# {{ ansible_managed }} + +[Unit] +Requires=iptables.service +After=iptables.service diff --git a/roles/docker/vars/main.yml b/roles/docker/vars/main.yml index f81f99e2b..5237ed8f2 100644 --- a/roles/docker/vars/main.yml +++ b/roles/docker/vars/main.yml @@ -1,2 +1,3 @@ --- udevw_udevd_dir: /etc/systemd/system/systemd-udevd.service.d +docker_systemd_dir: /etc/systemd/system/docker.service.d diff --git a/roles/etcd/etcdctl.sh b/roles/etcd/etcdctl.sh new file mode 100644 index 000000000..0e324a8a9 --- /dev/null +++ b/roles/etcd/etcdctl.sh @@ -0,0 +1,11 @@ +#!/bin/bash +# Sets up handy aliases for etcd, need etcdctl2 and etcdctl3 because +# command flags are different between the two. Should work on stand +# alone etcd hosts and master + etcd hosts too because we use the peer keys. +etcdctl2() { + /usr/bin/etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://`hostname`:2379 ${@} +} + +etcdctl3() { + ETCDCTL_API=3 /usr/bin/etcdctl --cert /etc/etcd/peer.crt --key /etc/etcd/peer.key --cacert /etc/etcd/ca.crt --endpoints https://`hostname`:2379 ${@} +} diff --git a/roles/etcd/files/etcdctl.sh b/roles/etcd/files/etcdctl.sh new file mode 100644 index 000000000..0e324a8a9 --- /dev/null +++ b/roles/etcd/files/etcdctl.sh @@ -0,0 +1,11 @@ +#!/bin/bash +# Sets up handy aliases for etcd, need etcdctl2 and etcdctl3 because +# command flags are different between the two. Should work on stand +# alone etcd hosts and master + etcd hosts too because we use the peer keys. +etcdctl2() { + /usr/bin/etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://`hostname`:2379 ${@} +} + +etcdctl3() { + ETCDCTL_API=3 /usr/bin/etcdctl --cert /etc/etcd/peer.crt --key /etc/etcd/peer.key --cacert /etc/etcd/ca.crt --endpoints https://`hostname`:2379 ${@} +} diff --git a/roles/etcd/tasks/etcdctl.yml b/roles/etcd/tasks/etcdctl.yml new file mode 100644 index 000000000..32c176449 --- /dev/null +++ b/roles/etcd/tasks/etcdctl.yml @@ -0,0 +1,11 @@ +- name: Install etcd for etcdctl + action: "{{ ansible_pkg_mgr }} name=etcd state=present" + when: not openshift.common.is_atomic | bool + +- name: Configure etcd profile.d alises + copy: + src: etcdctl.sh + dest: /etc/profile.d/etcdctl.sh + mode: 0755 + owner: root + group: root diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index 2bc6a8678..790eb3c5a 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -74,5 +74,8 @@ enabled: yes register: start_result +- include: etcdctl.yml + when: openshift_etcd_etcdctl_profile | default(true) | bool + - set_fact: etcd_service_status_changed: "{{ start_result | changed }}" diff --git a/roles/openshift_examples/files/examples/v1.4/image-streams/dotnet_imagestreams.json b/roles/openshift_examples/files/examples/v1.4/image-streams/dotnet_imagestreams.json index 00b363c42..a65d35c2e 100644 --- a/roles/openshift_examples/files/examples/v1.4/image-streams/dotnet_imagestreams.json +++ b/roles/openshift_examples/files/examples/v1.4/image-streams/dotnet_imagestreams.json @@ -12,14 +12,18 @@ "kind": "ImageStream", "apiVersion": "v1", "metadata": { - "name": "dotnet" + "name": "dotnet", + "annotations": { + "openshift.io/display-name": ".NET Core" + } }, "spec": { "tags": [ { "name": "latest", "annotations": { - "description": "Build and run .NET Core 1.0 applications", + "openshift.io/display-name": ".NET Core (Latest)", + "description": "Build and run .NET Core applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/1.0/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of .NET Core available on OpenShift, including major versions updates.", "iconClass": "icon-dotnet", "tags": "builder,.net,dotnet,dotnetcore", "supports":"dotnet", @@ -34,7 +38,8 @@ { "name": "1.0", "annotations": { - "description": "Build and run .NET Core 1.0 applications", + "openshift.io/display-name": ".NET Core 1.0", + "description": "Build and run .NET Core 1.0 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/1.0/README.md.", "iconClass": "icon-dotnet", "tags": "builder,.net,dotnet,dotnetcore,rh-dotnetcore10", "supports":"dotnet:1.0,dotnet", diff --git a/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-centos7.json index ef77bfcab..a645de7e2 100644 --- a/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-centos7.json +++ b/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-centos7.json @@ -95,7 +95,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "Node.js (Latest)", - "description": "Build and run Node.js applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Ruby available on OpenShift, including major versions updates.", + "description": "Build and run Node.js applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Node.js available on OpenShift, including major versions updates.", "iconClass": "icon-nodejs", "tags": "builder,nodejs", "supports":"nodejs", @@ -156,7 +156,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "Perl (Latest)", - "description": "Build and run Perl applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.20/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Ruby available on OpenShift, including major versions updates.", + "description": "Build and run Perl applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.20/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Perl available on OpenShift, including major versions updates.", "iconClass": "icon-perl", "tags": "builder,perl", "supports":"perl", @@ -218,7 +218,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "PHP (Latest)", - "description": "Build and run PHP applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Ruby available on OpenShift, including major versions updates.", + "description": "Build and run PHP applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PHP available on OpenShift, including major versions updates.", "iconClass": "icon-php", "tags": "builder,php", "supports":"php", @@ -279,7 +279,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "Python (Latest)", - "description": "Build and run Python applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Ruby available on OpenShift, including major versions updates.", + "description": "Build and run Python applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.", "iconClass": "icon-python", "tags": "builder,python", "supports":"python", @@ -372,7 +372,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "WildFly (Latest)", - "description": "Build and run WildFly applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/openshift-s2i/s2i-wildfly/blob/master/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Ruby available on OpenShift, including major versions updates.", + "description": "Build and run WildFly applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/openshift-s2i/s2i-wildfly/blob/master/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of WildFly available on OpenShift, including major versions updates.", "iconClass": "icon-wildfly", "tags": "builder,wildfly,java", "supports":"jee,java", @@ -465,7 +465,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "MySQL (Latest)", - "description": "Provides a MySQL database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Ruby available on OpenShift, including major versions updates.", + "description": "Provides a MySQL database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MySQL available on OpenShift, including major versions updates.", "iconClass": "icon-mysql-database", "tags": "mysql" }, @@ -520,7 +520,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "MariaDB (Latest)", - "description": "Provides a MariaDB database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Ruby available on OpenShift, including major versions updates.", + "description": "Provides a MariaDB database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MariaDB available on OpenShift, including major versions updates.", "iconClass": "icon-mariadb", "tags": "mariadb" }, @@ -561,7 +561,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "PostgreSQL (Latest)", - "description": "Provides a PostgreSQL database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Ruby available on OpenShift, including major versions updates.", + "description": "Provides a PostgreSQL database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PostgreSQL available on OpenShift, including major versions updates.", "iconClass": "icon-postgresql", "tags": "postgresql" }, @@ -630,7 +630,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "MongoDB (Latest)", - "description": "Provides a MongoDB database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Ruby available on OpenShift, including major versions updates.", + "description": "Provides a MongoDB database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MongoDB available on OpenShift, including major versions updates.", "iconClass": "icon-mongodb", "tags": "mongodb" }, @@ -699,7 +699,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "Jenkins (Latest)", - "description": "Provides a Jenkins server on CentOS 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Ruby available on OpenShift, including major versions updates.", + "description": "Provides a Jenkins server on CentOS 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Jenkins available on OpenShift, including major versions updates.", "iconClass": "icon-jenkins", "tags": "jenkins" }, diff --git a/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-rhel7.json index 161227d2f..9b9cd236f 100644 --- a/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-rhel7.json +++ b/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-rhel7.json @@ -95,7 +95,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "Node.js (Latest)", - "description": "Build and run Node.js applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Ruby available on OpenShift, including major versions updates.", + "description": "Build and run Node.js applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Node.js available on OpenShift, including major versions updates.", "iconClass": "icon-nodejs", "tags": "builder,nodejs", "supports":"nodejs", @@ -156,7 +156,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "Perl (Latest)", - "description": "Build and run Perl applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.20/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Ruby available on OpenShift, including major versions updates.", + "description": "Build and run Perl applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.20/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Perl available on OpenShift, including major versions updates.", "iconClass": "icon-perl", "tags": "builder,perl", "supports":"perl", @@ -218,7 +218,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "PHP (Latest)", - "description": "Build and run PHP applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Ruby available on OpenShift, including major versions updates.", + "description": "Build and run PHP applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PHP available on OpenShift, including major versions updates.", "iconClass": "icon-php", "tags": "builder,php", "supports":"php", @@ -279,7 +279,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "Python (Latest)", - "description": "Build and run Python applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Ruby available on OpenShift, including major versions updates.", + "description": "Build and run Python applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.", "iconClass": "icon-python", "tags": "builder,python", "supports":"python", @@ -372,7 +372,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "MySQL (Latest)", - "description": "Provides a MySQL database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Ruby available on OpenShift, including major versions updates.", + "description": "Provides a MySQL database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MySQL available on OpenShift, including major versions updates.", "iconClass": "icon-mysql-database", "tags": "mysql" }, @@ -427,7 +427,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "MariaDB (Latest)", - "description": "Provides a MariaDB database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Ruby available on OpenShift, including major versions updates.", + "description": "Provides a MariaDB database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MariaDB available on OpenShift, including major versions updates.", "iconClass": "icon-mariadb", "tags": "mariadb" }, @@ -468,7 +468,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "PostgreSQL (Latest)", - "description": "Provides a PostgreSQL database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Ruby available on OpenShift, including major versions updates.", + "description": "Provides a PostgreSQL database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PostgreSQL available on OpenShift, including major versions updates.", "iconClass": "icon-postgresql", "tags": "postgresql" }, @@ -537,7 +537,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "MongoDB (Latest)", - "description": "Provides a MongoDB database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Ruby available on OpenShift, including major versions updates.", + "description": "Provides a MongoDB database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MongoDB available on OpenShift, including major versions updates.", "iconClass": "icon-mongodb", "tags": "mongodb" }, @@ -606,7 +606,7 @@ "name": "latest", "annotations": { "openshift.io/display-name": "Jenkins (Latest)", - "description": "Provides a Jenkins server on RHEL 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Ruby available on OpenShift, including major versions updates.", + "description": "Provides a Jenkins server on RHEL 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Jenkins available on OpenShift, including major versions updates.", "iconClass": "icon-jenkins", "tags": "jenkins" }, diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 537b6c60f..d797eb4d3 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -7,7 +7,13 @@ """Ansible module for retrieving and setting openshift related facts""" -import ConfigParser +try: + # python2 + import ConfigParser +except ImportError: + # python3 + import configparser as ConfigParser + import copy import io import os @@ -148,6 +154,7 @@ def hostname_valid(hostname): if (not hostname or hostname.startswith('localhost') or hostname.endswith('localdomain') or + hostname.endswith('novalocal') or len(hostname.split('.')) < 2): return False @@ -201,9 +208,9 @@ def query_metadata(metadata_url, headers=None, expect_json=False): if info['status'] != 200: raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable") if expect_json: - return module.from_json(result.read()) + return module.from_json(to_native(result.read())) else: - return [line.strip() for line in result.readlines()] + return [to_native(line.strip()) for line in result.readlines()] def walk_metadata(metadata_url, headers=None, expect_json=False): @@ -311,7 +318,7 @@ def normalize_aws_facts(metadata, facts): ): int_info = dict() var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'} - for ips_var, int_var in var_map.iteritems(): + for ips_var, int_var in iteritems(var_map): ips = interface.get(int_var) if isinstance(ips, basestring): int_info[ips_var] = [ips] @@ -362,12 +369,15 @@ def normalize_openstack_facts(metadata, facts): facts['network']['ip'] = local_ipv4 facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4'] - # TODO: verify local hostname makes sense and is resolvable - facts['network']['hostname'] = metadata['hostname'] - - # TODO: verify that public hostname makes sense and is resolvable - pub_h = metadata['ec2_compat']['public-hostname'] - facts['network']['public_hostname'] = pub_h + for f_var, h_var, ip_var in [('hostname', 'hostname', 'local-ipv4'), + ('public_hostname', 'public-hostname', 'public-ipv4')]: + try: + if socket.gethostbyname(metadata['ec2_compat'][h_var]) == metadata['ec2_compat'][ip_var]: + facts['network'][f_var] = metadata['ec2_compat'][h_var] + else: + facts['network'][f_var] = metadata['ec2_compat'][ip_var] + except socket.gaierror: + facts['network'][f_var] = metadata['ec2_compat'][ip_var] return facts @@ -828,23 +838,29 @@ def set_version_facts_if_unset(facts): version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('1.1.1') version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('1.2.0') version_gte_3_3_or_1_3 = LooseVersion(version) >= LooseVersion('1.3.0') + version_gte_3_4_or_1_4 = LooseVersion(version) >= LooseVersion('1.4.0') else: version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('3.0.2.905') version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('3.1.1') version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('3.1.1.901') version_gte_3_3_or_1_3 = LooseVersion(version) >= LooseVersion('3.3.0') + version_gte_3_4_or_1_4 = LooseVersion(version) >= LooseVersion('3.4.0') else: version_gte_3_1_or_1_1 = True version_gte_3_1_1_or_1_1_1 = True version_gte_3_2_or_1_2 = True - version_gte_3_3_or_1_3 = False + version_gte_3_3_or_1_3 = True + version_gte_3_4_or_1_4 = False facts['common']['version_gte_3_1_or_1_1'] = version_gte_3_1_or_1_1 facts['common']['version_gte_3_1_1_or_1_1_1'] = version_gte_3_1_1_or_1_1_1 facts['common']['version_gte_3_2_or_1_2'] = version_gte_3_2_or_1_2 facts['common']['version_gte_3_3_or_1_3'] = version_gte_3_3_or_1_3 + facts['common']['version_gte_3_4_or_1_4'] = version_gte_3_4_or_1_4 - if version_gte_3_3_or_1_3: + if version_gte_3_4_or_1_4: + examples_content_version = 'v1.4' + elif version_gte_3_3_or_1_3: examples_content_version = 'v1.3' elif version_gte_3_2_or_1_2: examples_content_version = 'v1.2' @@ -927,7 +943,7 @@ def set_sdn_facts_if_unset(facts, system_facts): # default MTU if interface MTU cannot be detected facts['node']['sdn_mtu'] = '1450' - for val in system_facts.itervalues(): + for val in itervalues(system_facts): if isinstance(val, dict) and 'mtu' in val: mtu = val['mtu'] @@ -936,6 +952,14 @@ def set_sdn_facts_if_unset(facts, system_facts): return facts +def set_nodename(facts): + if 'node' in facts and 'common' in facts: + if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack': + facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '') + else: + facts['node']['nodename'] = facts['common']['hostname'].lower() + return facts + def migrate_oauth_template_facts(facts): """ Migrate an old oauth template fact to a newer format if it's present. @@ -1110,7 +1134,7 @@ def build_controller_args(facts): controller_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf'] if facts['cloudprovider']['kind'] == 'gce': controller_args['cloud-provider'] = ['gce'] - kubelet_args['cloud-config'] = [cloud_cfg_path + '/gce.conf'] + controller_args['cloud-config'] = [cloud_cfg_path + '/gce.conf'] if controller_args != {}: facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [], []) return facts @@ -1131,7 +1155,7 @@ def build_api_server_args(facts): api_server_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf'] if facts['cloudprovider']['kind'] == 'gce': api_server_args['cloud-provider'] = ['gce'] - kubelet_args['cloud-config'] = [cloud_cfg_path + '/gce.conf'] + api_server_args['cloud-config'] = [cloud_cfg_path + '/gce.conf'] if api_server_args != {}: facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [], []) return facts @@ -1312,7 +1336,7 @@ def apply_provider_facts(facts, provider_facts): facts['common'][h_var] = choose_hostname( [provider_facts['network'].get(h_var)], - facts['common'][ip_var] + facts['common'][h_var] ) facts['provider'] = provider_facts @@ -1346,7 +1370,7 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw 'image_policy_config'] facts = dict() - for key, value in orig.iteritems(): + for key, value in iteritems(orig): # Key exists in both old and new facts. if key in new: if key in inventory_json_facts: @@ -1571,7 +1595,7 @@ def set_container_facts_if_unset(facts): cli_image = master_image node_image = 'openshift3/node' ovs_image = 'openshift3/openvswitch' - etcd_image = 'registry.access.redhat.com/rhel7/etcd' + etcd_image = 'registry.access.redhat.com/rhel7/etcd3' pod_image = 'openshift3/ose-pod' router_image = 'openshift3/ose-haproxy-router' registry_image = 'openshift3/ose-docker-registry' @@ -1581,7 +1605,7 @@ def set_container_facts_if_unset(facts): cli_image = master_image node_image = 'aep3_beta/node' ovs_image = 'aep3_beta/openvswitch' - etcd_image = 'registry.access.redhat.com/rhel7/etcd' + etcd_image = 'registry.access.redhat.com/rhel7/etcd3' pod_image = 'aep3_beta/aep-pod' router_image = 'aep3_beta/aep-haproxy-router' registry_image = 'aep3_beta/aep-docker-registry' @@ -1591,7 +1615,7 @@ def set_container_facts_if_unset(facts): cli_image = master_image node_image = 'openshift/node' ovs_image = 'openshift/openvswitch' - etcd_image = 'registry.access.redhat.com/rhel7/etcd' + etcd_image = 'registry.access.redhat.com/rhel7/etcd3' pod_image = 'openshift/origin-pod' router_image = 'openshift/origin-haproxy-router' registry_image = 'openshift/origin-docker-registry' @@ -1795,6 +1819,7 @@ class OpenShiftFacts(object): facts = set_proxy_facts(facts) if not safe_get_bool(facts['common']['is_containerized']): facts = set_installed_variant_rpm_facts(facts) + facts = set_nodename(facts) return dict(openshift=facts) def get_defaults(self, roles, deployment_type, deployment_subtype): @@ -2099,7 +2124,7 @@ class OpenShiftFacts(object): facts_to_set[self.role] = facts if openshift_env != {} and openshift_env != None: - for fact, value in openshift_env.iteritems(): + for fact, value in iteritems(openshift_env): oo_env_facts = dict() current_level = oo_env_facts keys = self.split_openshift_env_fact_keys(fact, openshift_env_structures)[1:] @@ -2157,7 +2182,7 @@ class OpenShiftFacts(object): facts (dict): facts to clean """ facts_to_remove = [] - for fact, value in facts.iteritems(): + for fact, value in iteritems(facts): if isinstance(facts[fact], dict): facts[fact] = self.remove_empty_facts(facts[fact]) else: @@ -2288,6 +2313,9 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.facts import * from ansible.module_utils.urls import * +from ansible.module_utils.six import iteritems, itervalues +from ansible.module_utils._text import to_native +from ansible.module_utils.six import b if __name__ == '__main__': main() diff --git a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml index 0162d1fb0..625af9acd 100644 --- a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml +++ b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml @@ -40,7 +40,7 @@ - name: "Create templates for logging accounts and the deployer" command: > - {{ openshift.common.client_binary }} create + {{ openshift.common.client_binary }} create --config={{ mktemp.stdout }}/admin.kubeconfig -f {{ hosted_base }}/logging-deployer.yaml --config={{ mktemp.stdout }}/admin.kubeconfig -n logging @@ -49,28 +49,41 @@ changed_when: "'created' in logging_import_template.stdout" - name: "Process the logging accounts template" - shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig process logging-deployer-account-template | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f -" + shell: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + process logging-deployer-account-template | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f - register: process_deployer_accounts failed_when: process_deployer_accounts.rc == 1 and 'already exists' not in process_deployer_accounts.stderr - name: "Set permissions for logging-deployer service account" command: > - {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer + {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig + policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer register: permiss_output failed_when: "permiss_output.rc == 1 and 'exists' not in permiss_output.stderr" - name: "Set permissions for fluentd" command: > - {{ openshift.common.client_binary }} adm policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd + {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig + policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd register: fluentd_output failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr" - name: "Set additional permissions for fluentd" command: > - {{ openshift.common.client_binary }} adm policy add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd + {{ openshift.common.client_binary }} adm policy --config={{ mktemp.stdout }}/admin.kubeconfig + add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd register: fluentd2_output failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr" + - name: "Add rolebinding-reader to aggregated-logging-elasticsearch" + command: > + {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig + policy add-cluster-role-to-user rolebinding-reader \ + system:serviceaccount:logging:aggregated-logging-elasticsearch + register: rolebinding_reader_output + failed_when: "rolebinding_reader_output == 1 and 'exists' not in rolebinding_reader_output.stderr" + - name: "Create ConfigMap for deployer parameters" command: > {{ openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-deployer {{ deployer_cmap_params }} diff --git a/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml index 9cff9daca..ddfda1272 100644 --- a/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml @@ -66,6 +66,15 @@ items: - watch - delete - update + - apiVersion: v1 + kind: ClusterRole + metadata: + name: rolebinding-reader + rules: + - resources: + - clusterrolebindings + verbs: + - get - apiVersion: v1 kind: RoleBinding @@ -88,6 +97,17 @@ items: subjects: - kind: ServiceAccount name: logging-deployer + - + apiVersion: v1 + kind: RoleBinding + metadata: + name: logging-elasticsearch-view-role + roleRef: + kind: ClusterRole + name: view + subjects: + - kind: ServiceAccount + name: aggregated-logging-elasticsearch - apiVersion: "v1" kind: "Template" diff --git a/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml index 97a86c11d..66051755c 100644 --- a/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml @@ -86,6 +86,8 @@ objects: value: ${HEAPSTER_NODE_ID} - name: METRIC_RESOLUTION value: ${METRIC_RESOLUTION} + - name: STARTUP_TIMEOUT + value: ${STARTUP_TIMEOUT} dnsPolicy: ClusterFirst restartPolicy: Never serviceAccount: metrics-deployer @@ -160,3 +162,7 @@ parameters: description: "How often metrics should be gathered. Defaults value of '15s' for 15 seconds" name: METRIC_RESOLUTION value: "15s" +- + description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart" + name: STARTUP_TIMEOUT + value: "500" diff --git a/roles/openshift_hosted_templates/files/v1.4/origin/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.4/origin/metrics-deployer.yaml index 5f2290419..54691572a 100644 --- a/roles/openshift_hosted_templates/files/v1.4/origin/metrics-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.4/origin/metrics-deployer.yaml @@ -86,6 +86,8 @@ objects: value: ${HEAPSTER_NODE_ID} - name: METRIC_RESOLUTION value: ${METRIC_RESOLUTION} + - name: STARTUP_TIMEOUT + value: ${STARTUP_TIMEOUT} dnsPolicy: ClusterFirst restartPolicy: Never serviceAccount: metrics-deployer @@ -160,3 +162,7 @@ parameters: description: "How often metrics should be gathered. Defaults value of '15s' for 15 seconds" name: METRIC_RESOLUTION value: "15s" +- + description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart" + name: STARTUP_TIMEOUT + value: "500" diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml index a1f42f8c4..28e4e46e9 100644 --- a/roles/openshift_manage_node/tasks/main.yml +++ b/roles/openshift_manage_node/tasks/main.yml @@ -14,7 +14,7 @@ - name: Wait for Node Registration command: > - {{ openshift.common.client_binary }} get node {{ hostvars[item].openshift.common.hostname }} + {{ openshift.common.client_binary }} get node {{ hostvars[item].openshift.node.nodename }} --config={{ openshift_manage_node_kubeconfig }} -n default register: omd_get_node @@ -26,19 +26,19 @@ - name: Set node schedulability command: > - {{ openshift.common.client_binary }} adm manage-node {{ hostvars[item].openshift.common.hostname | lower }} --schedulable={{ 'true' if hostvars[item].openshift.node.schedulable | bool else 'false' }} + {{ openshift.common.client_binary }} adm manage-node {{ hostvars[item].openshift.node.nodename }} --schedulable={{ 'true' if hostvars[item].openshift.node.schedulable | bool else 'false' }} --config={{ openshift_manage_node_kubeconfig }} -n default with_items: "{{ openshift_nodes }}" - when: hostvars[item].openshift.common.hostname is defined + when: hostvars[item].openshift.node.nodename is defined - name: Label nodes command: > - {{ openshift.common.client_binary }} label --overwrite node {{ hostvars[item].openshift.common.hostname | lower }} {{ hostvars[item].openshift.node.labels | oo_combine_dict }} + {{ openshift.common.client_binary }} label --overwrite node {{ hostvars[item].openshift.node.nodename }} {{ hostvars[item].openshift.node.labels | oo_combine_dict }} --config={{ openshift_manage_node_kubeconfig }} -n default with_items: "{{ openshift_nodes }}" - when: hostvars[item].openshift.common.hostname is defined and 'labels' in hostvars[item].openshift.node and hostvars[item].openshift.node.labels != {} + when: hostvars[item].openshift.node.nodename is defined and 'labels' in hostvars[item].openshift.node and hostvars[item].openshift.node.labels != {} - name: Delete temp directory file: diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index dd3bca3eb..55ae4bf54 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -33,7 +33,7 @@ networkConfig: {% if openshift.node.set_node_ip | bool %} nodeIP: {{ openshift.common.ip }} {% endif %} -nodeName: {{ openshift.common.hostname | lower }} +nodeName: {{ openshift.node.nodename }} podManifestConfig: servingInfo: bindAddress: 0.0.0.0:10250 diff --git a/roles/openshift_version/tasks/set_version_containerized.yml b/roles/openshift_version/tasks/set_version_containerized.yml index 8e2702391..718537287 100644 --- a/roles/openshift_version/tasks/set_version_containerized.yml +++ b/roles/openshift_version/tasks/set_version_containerized.yml @@ -37,3 +37,8 @@ openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0:2][1:] | join('-') if openshift.common.deployment_type == 'origin' else cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}" when: openshift_version is defined and openshift_version.split('.') | length == 2 +# We finally have the specific version. Now we clean up any strange +# dangly +c0mm1t-offset tags in the version. See also, +# openshift_facts.py +- set_fact: + openshift_version: "{{ openshift_version | oo_chomp_commit_offset }}" diff --git a/roles/os_firewall/README.md b/roles/os_firewall/README.md index 187d74b06..c6c70b81d 100644 --- a/roles/os_firewall/README.md +++ b/roles/os_firewall/README.md @@ -14,7 +14,7 @@ Role Variables | Name | Default | | |---------------------------|---------|----------------------------------------| -| os_firewall_use_firewalld | True | If false, use iptables | +| os_firewall_use_firewalld | False | If false, use iptables | | os_firewall_allow | [] | List of service,port mappings to allow | | os_firewall_deny | [] | List of service, port mappings to deny | diff --git a/roles/os_firewall/meta/main.yml b/roles/os_firewall/meta/main.yml index c93335b7b..6df7c9f2b 100644 --- a/roles/os_firewall/meta/main.yml +++ b/roles/os_firewall/meta/main.yml @@ -11,5 +11,6 @@ galaxy_info: - 7 categories: - system +allow_duplicates: yes dependencies: - { role: openshift_facts } diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index 8470fb527..7e5ad4144 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -318,6 +318,7 @@ hostname. def set_cluster_hostname(oo_cfg): + first_master = next((host for host in oo_cfg.deployment.hosts if host.is_master()), None) message = """ You have chosen to install a single master cluster (non-HA). @@ -329,8 +330,9 @@ If you want to override the cluster host name now to something other than the de """ click.echo(message) cluster_hostname = click.prompt('Enter hostname or IP address', - default='') + default=str(first_master)) oo_cfg.deployment.variables['openshift_master_cluster_hostname'] = cluster_hostname + oo_cfg.deployment.variables['openshift_master_cluster_public_hostname'] = cluster_hostname def collect_storage_host(hosts): diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index 764cc1e56..f542fb376 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -314,6 +314,10 @@ def run_uninstall_playbook(hosts, verbose=False): facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path'] if 'ansible_config' in CFG.settings: facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config'] + # override the ansible config for our main playbook run + if 'ansible_quiet_config' in CFG.settings: + facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_quiet_config'] + return run_ansible(playbook, inventory_file, facts_env, verbose) @@ -328,4 +332,8 @@ def run_upgrade_playbook(hosts, playbook, verbose=False): facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path'] if 'ansible_config' in CFG.settings: facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config'] + # override the ansible config for our main playbook run + if 'ansible_quiet_config' in CFG.settings: + facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_quiet_config'] + return run_ansible(playbook, inventory_file, facts_env, verbose) |