diff options
133 files changed, 1953 insertions, 715 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index d231e0c7f..fdcdcb9dd 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.4.8-1 ./ +3.4.12-1 ./ diff --git a/README_vagrant.md b/README_vagrant.md index bda474f14..cb62e31d8 100644 --- a/README_vagrant.md +++ b/README_vagrant.md @@ -1,52 +1 @@ -:warning: **WARNING** :warning: This feature is community supported and has not been tested by Red Hat. Visit [docs.openshift.com](https://docs.openshift.com) for [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/index.html) or [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/index.html) supported installation docs. - -Requirements ------------- -- ansible (the latest 1.9 release is preferred, but any version greater than 1.9.1 should be sufficient). -- vagrant (tested against version 1.7.2) -- vagrant-hostmanager plugin (tested against version 1.5.0) -- vagrant-libvirt (tested against version 0.0.26) - - Only required if using libvirt instead of virtualbox - -For ``enterprise`` deployment types the base RHEL box has to be added to Vagrant: - -1. Download the RHEL7 vagrant image (libvirt or virtualbox) available from the [Red Hat Container Development Kit downloads in the customer portal](https://access.redhat.com/downloads/content/293/ver=1/rhel---7/1.0.1/x86_64/product-downloads) - -2. Install it into vagrant - - ``$ vagrant box add --name rhel-7 /path/to/rhel-server-libvirt-7.1-3.x86_64.box`` - -3. (optional, recommended) Increase the disk size of the image to 20GB - This is a two step process. (these instructions are specific to libvirt) - - Resize the actual qcow2 image: - - ``$ qemu-img resize ~/.vagrant.d/boxes/rhel-7/0/libvirt/box.img 20GB`` - - Edit `~/.vagrant.d/boxes/rhel-7/0/libvirt/metadata.json` to reflect the new size. A corrected metadata.json looks like this: - - ``{"provider": "libvirt", "format": "qcow2", "virtual_size": 20}`` - -Usage ------ -``` -vagrant up --no-provision -vagrant provision -``` - -Using libvirt: -``` -vagrant up --provider=libvirt --no-provision -vagrant provision -``` - -Environment Variables ---------------------- -The following environment variables can be overriden: -- ``OPENSHIFT_DEPLOYMENT_TYPE`` (defaults to origin, choices: origin, openshift-enterprise) -- ``OPENSHIFT_NUM_NODES`` (the number of nodes to create, defaults to 2) - -Note that if ``OPENSHIFT_DEPLOYMENT_TYPE`` is ``enterprise`` you should also specify environment variables related to ``subscription-manager`` which are used by the ``rhel_subscribe`` role: - -- ``rhel_subscription_user``: rhsm user -- ``rhel_subscription_pass``: rhsm password -- (optional) ``rhel_subscription_pool``: poolID to attach a specific subscription besides what auto-attach detects +The Vagrant-based installation has been moved to: https://github.com/openshift/openshift-ansible-contrib/tree/master/vagrant diff --git a/Vagrantfile b/Vagrantfile deleted file mode 100644 index a38378289..000000000 --- a/Vagrantfile +++ /dev/null @@ -1,71 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : -VAGRANTFILE_API_VERSION = "2" - -unless Vagrant.has_plugin?("vagrant-hostmanager") - raise 'vagrant-hostmanager plugin is required' -end - -Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| - - deployment_type = ENV['OPENSHIFT_DEPLOYMENT_TYPE'] || 'origin' - num_nodes = (ENV['OPENSHIFT_NUM_NODES'] || 2).to_i - - config.hostmanager.enabled = true - config.hostmanager.manage_host = true - config.hostmanager.include_offline = true - config.ssh.insert_key = false - - config.vm.provider "virtualbox" do |vbox, override| - override.vm.box = "centos/7" - vbox.memory = 1024 - vbox.cpus = 2 - - # Enable multiple guest CPUs if available - vbox.customize ["modifyvm", :id, "--ioapic", "on"] - end - - config.vm.provider "libvirt" do |libvirt, override| - libvirt.cpus = 2 - libvirt.memory = 1024 - libvirt.driver = 'kvm' - case deployment_type - when "openshift-enterprise" - override.vm.box = "rhel-7" - when "atomic-enterprise" - override.vm.box = "rhel-7" - when "origin" - override.vm.box = "centos/7" - override.vm.box_download_checksum = "b2a9f7421e04e73a5acad6fbaf4e9aba78b5aeabf4230eebacc9942e577c1e05" - override.vm.box_download_checksum_type = "sha256" - end - end - - num_nodes.times do |n| - node_index = n+1 - config.vm.define "node#{node_index}" do |node| - node.vm.hostname = "ose3-node#{node_index}.example.com" - node.vm.network :private_network, ip: "192.168.100.#{200 + n}" - config.vm.provision "shell", inline: "nmcli connection reload; systemctl restart NetworkManager.service" - end - end - - config.vm.define "master" do |master| - master.vm.hostname = "ose3-master.example.com" - master.vm.network :private_network, ip: "192.168.100.100" - master.vm.network :forwarded_port, guest: 8443, host: 8443 - config.vm.provision "shell", inline: "nmcli connection reload; systemctl restart NetworkManager.service" - master.vm.provision "ansible" do |ansible| - ansible.limit = 'all' - ansible.sudo = true - ansible.groups = { - "masters" => ["master"], - "nodes" => ["master", "node1", "node2"], - } - ansible.extra_vars = { - deployment_type: deployment_type, - } - ansible.playbook = "playbooks/byo/vagrant.yml" - end - end -end diff --git a/bin/cluster b/bin/cluster index 68d2a7cd4..b9b2ab15f 100755 --- a/bin/cluster +++ b/bin/cluster @@ -314,7 +314,7 @@ This wrapper is overriding the following ansible variables: * ANSIBLE_SSH_PIPELINING: If not set in the environment, this wrapper will set it to `True`. - If you experience issue with Ansible ssh pipelining, you can disable it by explicitely set this environment variable to `False`. + If you experience issues with Ansible SSH pipelining, you can disable it by explicitly setting this environment variable to `False`. ''' ) parser.add_argument('-v', '--verbose', action='count', diff --git a/callback_plugins/default.py b/callback_plugins/default.py index bc0b207bb..c64145b5c 100644 --- a/callback_plugins/default.py +++ b/callback_plugins/default.py @@ -45,6 +45,9 @@ class CallbackModule(DEFAULT_MODULE.CallbackModule): # pylint: disable=too-few- CALLBACK_TYPE = 'stdout' CALLBACK_NAME = 'default' + def __init__(self, *args, **kwargs): + BASECLASS.__init__(self, *args, **kwargs) + def _dump_results(self, result): '''Return the text to output for a result.''' result['_ansible_verbose_always'] = True diff --git a/docs/best_practices_guide.adoc b/docs/best_practices_guide.adoc index 267aa850d..cac9645a6 100644 --- a/docs/best_practices_guide.adoc +++ b/docs/best_practices_guide.adoc @@ -76,7 +76,7 @@ def add_person(first_name, last_name, age=None): === PyLint -http://www.pylint.org/[PyLint] is used in an attempt to keep the python code as clean and as managable as possible. The build bot runs each pull request through PyLint and any warnings or errors cause the build bot to fail the pull request. +http://www.pylint.org/[PyLint] is used in an attempt to keep the python code as clean and as manageable as possible. The build bot runs each pull request through PyLint and any warnings or errors cause the build bot to fail the pull request. ''' [[PyLint-rules-MUST-NOT-be-disabled-on-a-whole-file]] diff --git a/docs/style_guide.adoc b/docs/style_guide.adoc index 72eaedcf9..2c2cb8610 100644 --- a/docs/style_guide.adoc +++ b/docs/style_guide.adoc @@ -103,7 +103,7 @@ Ansible role variables are defined as variables contained in (or passed into) a [cols="2v,v"] |=== | <<Role-variables-MUST-have-a-prefix-of-atleast-3-characters-See.below.for.specific.naming.rules, Rule>> -| Role variables MUST have a prefix of atleast 3 characters. See below for specific naming rules. +| Role variables MUST have a prefix of at least 3 characters. See below for specific naming rules. |=== ==== Role with 3 (or more) words in the name diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index 3541d5471..93fdd5ae4 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -234,7 +234,7 @@ class FilterModule(object): arrange them as a string 'key=value key=value' """ if not isinstance(data, dict): - raise errors.AnsibleFilterError("|failed expects first param is a dict") + raise errors.AnsibleFilterError("|failed expects first param is a dict [oo_combine_dict]. Got %s. Type: %s" % (str(data), str(type(data)))) return out_joiner.join([in_joiner.join([k, str(v)]) for k, v in data.items()]) @@ -286,7 +286,7 @@ class FilterModule(object): } """ if not isinstance(data, dict): - raise errors.AnsibleFilterError("|failed expects first param is a dict") + raise errors.AnsibleFilterError("|failed expects first param is a dict [oo_ec2_volume_def]. Got %s. Type: %s" % (str(data), str(type(data)))) if host_type not in ['master', 'node', 'etcd']: raise errors.AnsibleFilterError("|failed expects etcd, master or node" " as the host type") diff --git a/filter_plugins/openshift_master.py b/filter_plugins/openshift_master.py index ee6a62ba5..8d3f31169 100644 --- a/filter_plugins/openshift_master.py +++ b/filter_plugins/openshift_master.py @@ -75,7 +75,7 @@ class IdentityProviderBase(object): valid_mapping_methods = ['add', 'claim', 'generate', 'lookup'] if self.mapping_method not in valid_mapping_methods: - raise errors.AnsibleFilterError("|failed unkown mapping method " + raise errors.AnsibleFilterError("|failed unknown mapping method " "for provider {0}".format(self.__class__.__name__)) self._required = [] self._optional = [] diff --git a/inventory/aws/hosts/ec2.ini b/inventory/aws/hosts/ec2.ini index aa0f9090f..5ee51c84f 100644 --- a/inventory/aws/hosts/ec2.ini +++ b/inventory/aws/hosts/ec2.ini @@ -60,7 +60,7 @@ all_instances = False # By default, only EC2 instances in the 'running' state are returned. Specify # EC2 instance states to return as a comma-separated list. This -# option is overriden when 'all_instances' is True. +# option is overridden when 'all_instances' is True. # instance_states = pending, running, shutting-down, terminated, stopping, stopped # By default, only RDS instances in the 'available' state are returned. Set diff --git a/openshift-ansible.spec b/openshift-ansible.spec index b2e81eaf2..4bc6cef7d 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -5,7 +5,7 @@ } Name: openshift-ansible -Version: 3.4.8 +Version: 3.4.12 Release: 1%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 @@ -249,6 +249,54 @@ Atomic OpenShift Utilities includes %changelog +* Mon Oct 24 2016 Troy Dawson <tdawson@redhat.com> 3.4.12-1 +- Move infrastructure templates into openshift_hosted_templates role. + (abutcher@redhat.com) +- Unit tests for the debug_env logger thing (tbielawa@redhat.com) +- a-o-i: Separate install and scaleup workflows (smunilla@redhat.com) +- Reference full vars for registry object storage. (abutcher@redhat.com) + +* Fri Oct 21 2016 Troy Dawson <tdawson@redhat.com> 3.4.11-1 +- trouble creating service signer while running upgrade dockerized + (henning.fjellheim@nb.no) +- Don't freak out if the oc command doesn't exist. (tbielawa@redhat.com) +- Make the json template filter-driven. (tbielawa@redhat.com) +- Add JSON result CLI parsing notes to the README (tbielawa@redhat.com) +- The JSON result saving template now includes a summary of expired/warned + certs for easier parsing. (tbielawa@redhat.com) +- Clean up lint and other little things (polish++) (tbielawa@redhat.com) +- Fix playbooks, update readme, update default vars (tbielawa@redhat.com) +- Refactor into a role (tbielawa@redhat.com) +- Get router/registry certs. Collect common names and subjectAltNames + (tbielawa@redhat.com) +- Support etcd certs now. Fix lint. Generate HTML report. (tbielawa@redhat.com) +- Try to make boiler plate for cert expiry checking (tbielawa@redhat.com) +- Override __init__ in default callback to avoid infinite loop. + (abutcher@redhat.com) +- Drop pacemaker restart logic. (dgoodwin@redhat.com) +- Fix typos (rhcarvalho@gmail.com) +- Switch from "oadm" to "oc adm" and fix bug in binary sync. + (dgoodwin@redhat.com) +- Remove uneeded import of ansible.module_utils.splitter (misc@redhat.com) + +* Wed Oct 19 2016 Troy Dawson <tdawson@redhat.com> 3.4.10-1 +- Get rid of openshift_node_config_file entirely (sdodson@redhat.com) +- [logging] Fix NFS volume binding (sdodson@redhat.com) +- Build full node config path in systemd_units tasks. (abutcher@redhat.com) +- Default [] (abutcher@afrolegs.com) +- Template with_items for upstream ansible-2.2 compat. (abutcher@redhat.com) + +* Mon Oct 17 2016 Troy Dawson <tdawson@redhat.com> 3.4.9-1 +- formatting updates in template (tobias@tobru.ch) +- Do not error on node labels set too non-string values. (manuel@hutter.io) +- Use inventory variables rather than facts (sdodson@redhat.com) +- Resume restarting node after upgrading node rpms. (dgoodwin@redhat.com) +- upgrade: Don't check avail docker version if not already installed. + (dgoodwin@redhat.com) +- revise docs (tobias@tobru.ch) +- adjustments in docs and j2 template (tobias@tobru.ch) +- add regionendpoint parameter for registry s3 (tobias.brunner@vshn.ch) + * Fri Oct 14 2016 Troy Dawson <tdawson@redhat.com> 3.4.8-1 - update handling of use_dnsmasq (jdetiber@redhat.com) - Fix standalone docker upgrade playbook skipping nodes. (dgoodwin@redhat.com) diff --git a/playbooks/adhoc/noc/get_zabbix_problems.yml b/playbooks/adhoc/noc/get_zabbix_problems.yml index 79cae24ab..32fc7ce68 100644 --- a/playbooks/adhoc/noc/get_zabbix_problems.yml +++ b/playbooks/adhoc/noc/get_zabbix_problems.yml @@ -33,7 +33,7 @@ - add_host: name: "{{ item }}" groups: problem_hosts_group - with_items: problem_hosts + with_items: "{{ problem_hosts }}" - name: "Run on problem hosts" hosts: problem_hosts_group diff --git a/playbooks/aws/openshift-cluster/list.yml b/playbooks/aws/openshift-cluster/list.yml index a542b4ca3..4934ae6d0 100644 --- a/playbooks/aws/openshift-cluster/list.yml +++ b/playbooks/aws/openshift-cluster/list.yml @@ -16,7 +16,7 @@ groups: oo_list_hosts ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: groups[scratch_group] | default([]) | difference(['localhost']) + with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}" - name: List Hosts hosts: oo_list_hosts diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml index d22c86cda..4d76d3bfe 100644 --- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml @@ -99,8 +99,8 @@ - name: Add Name tag to instances ec2_tag: resource={{ item.1.id }} region={{ deployment_vars[deployment_type].region }} state=present with_together: - - instances - - ec2.instances + - "{{ instances }}" + - "{{ ec2.instances }}" args: tags: Name: "{{ item.0 }}" @@ -154,8 +154,8 @@ openshift_node_labels: "{{ node_label }}" logrotate_scripts: "{{ logrotate }}" with_together: - - instances - - ec2.instances + - "{{ instances }}" + - "{{ ec2.instances }}" - name: Add new instances to nodes_to_add group if needed add_host: @@ -169,13 +169,13 @@ openshift_node_labels: "{{ node_label }}" logrotate_scripts: "{{ logrotate }}" with_together: - - instances - - ec2.instances + - "{{ instances }}" + - "{{ ec2.instances }}" when: oo_extend_env is defined and oo_extend_env | bool - name: Wait for ssh wait_for: "port=22 host={{ item.dns_name }}" - with_items: ec2.instances + with_items: "{{ ec2.instances }}" - name: Wait for user setup command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.0].ansible_ssh_user }}@{{ item.1.dns_name }} echo {{ hostvars[item.0].ansible_ssh_user }} user is setup" @@ -184,5 +184,5 @@ retries: 20 delay: 10 with_together: - - instances - - ec2.instances + - "{{ instances }}" + - "{{ ec2.instances }}" diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml index fb13e1839..7a8375d0e 100644 --- a/playbooks/aws/openshift-cluster/terminate.yml +++ b/playbooks/aws/openshift-cluster/terminate.yml @@ -12,7 +12,7 @@ groups: oo_hosts_to_terminate ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: (groups['tag_clusterid_' ~ cluster_id] | default([])) | difference(['localhost']) + with_items: "{{ (groups['tag_clusterid_' ~ cluster_id] | default([])) | difference(['localhost']) }}" - name: Unsubscribe VMs hosts: oo_hosts_to_terminate @@ -39,7 +39,7 @@ clusterid: "{{ hostvars[item]['ec2_tag_clusterid'] }}" host-type: "{{ hostvars[item]['ec2_tag_host-type'] }}" sub_host_type: "{{ hostvars[item]['ec2_tag_sub-host-type'] }}" - with_items: groups.oo_hosts_to_terminate + with_items: "{{ groups.oo_hosts_to_terminate }}" when: "'oo_hosts_to_terminate' in groups" - name: Terminate instances @@ -49,7 +49,7 @@ region: "{{ hostvars[item].ec2_region }}" ignore_errors: yes register: ec2_term - with_items: groups.oo_hosts_to_terminate + with_items: "{{ groups.oo_hosts_to_terminate }}" when: "'oo_hosts_to_terminate' in groups" # Fail if any of the instances failed to terminate with an error other @@ -57,7 +57,7 @@ - fail: msg: "Terminating instance {{ item.ec2_id }} failed with message {{ item.msg }}" when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed" - with_items: ec2_term.results + with_items: "{{ ec2_term.results }}" - name: Stop instance if termination failed ec2: @@ -66,12 +66,12 @@ region: "{{ item.item.ec2_region }}" register: ec2_stop when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed" - with_items: ec2_term.results + with_items: "{{ ec2_term.results }}" - name: Rename stopped instances ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present args: tags: Name: "{{ item.item.item.ec2_tag_Name }}-terminate" - with_items: ec2_stop.results + with_items: "{{ ec2_stop.results }}" when: ec2_stop | changed diff --git a/playbooks/aws/openshift-cluster/update.yml b/playbooks/aws/openshift-cluster/update.yml index d762203b2..ed05d61ed 100644 --- a/playbooks/aws/openshift-cluster/update.yml +++ b/playbooks/aws/openshift-cluster/update.yml @@ -7,7 +7,7 @@ - add_host: name: "{{ item }}" groups: l_oo_all_hosts - with_items: g_all_hosts + with_items: "{{ g_all_hosts }}" - hosts: l_oo_all_hosts gather_facts: no @@ -27,7 +27,7 @@ groups: oo_hosts_to_update ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: g_all_hosts | default([]) + with_items: "{{ g_all_hosts | default([]) }}" - include: ../../common/openshift-cluster/update_repos_and_packages.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml index 9be6becc1..834461e14 100644 --- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -25,13 +25,13 @@ tasks: - name: Prepare for Node evacuation command: > - {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --schedulable=false + {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=false delegate_to: "{{ groups.oo_first_master.0 }}" when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade - name: Evacuate Node for Kubelet upgrade command: > - {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --evacuate --force + {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --evacuate --force delegate_to: "{{ groups.oo_first_master.0 }}" when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade @@ -40,7 +40,7 @@ - name: Set node schedulability command: > - {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --schedulable=true + {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=true delegate_to: "{{ groups.oo_first_master.0 }}" when: openshift.node.schedulable | bool when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade and openshift.node.schedulable | bool diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml index 1755203a4..a3ab78ccf 100644 --- a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml @@ -10,7 +10,7 @@ - add_host: name: "{{ item }}" groups: l_oo_all_hosts - with_items: g_all_hosts | default([]) + with_items: "{{ g_all_hosts | default([]) }}" changed_when: false - hosts: l_oo_all_hosts diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_2/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_2/upgrade.yml index 5d549eee7..d92761e48 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_2/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_2/upgrade.yml @@ -10,7 +10,7 @@ - add_host: name: "{{ item }}" groups: l_oo_all_hosts - with_items: g_all_hosts | default([]) + with_items: "{{ g_all_hosts | default([]) }}" - hosts: l_oo_all_hosts gather_facts: no diff --git a/playbooks/byo/openshift-master/restart.yml b/playbooks/byo/openshift-master/restart.yml index 0cf669ae3..0a163526a 100644 --- a/playbooks/byo/openshift-master/restart.yml +++ b/playbooks/byo/openshift-master/restart.yml @@ -8,7 +8,7 @@ - add_host: name: "{{ item }}" groups: l_oo_all_hosts - with_items: g_all_hosts + with_items: "{{ g_all_hosts }}" - hosts: l_oo_all_hosts gather_facts: no diff --git a/playbooks/byo/openshift-master/scaleup.yml b/playbooks/byo/openshift-master/scaleup.yml index fced79262..279eeab21 100644 --- a/playbooks/byo/openshift-master/scaleup.yml +++ b/playbooks/byo/openshift-master/scaleup.yml @@ -8,7 +8,7 @@ - add_host: name: "{{ item }}" groups: l_oo_all_hosts - with_items: g_all_hosts + with_items: "{{ g_all_hosts }}" - hosts: l_oo_all_hosts gather_facts: no diff --git a/playbooks/byo/openshift-node/scaleup.yml b/playbooks/byo/openshift-node/scaleup.yml index 5737bb0e0..902221931 100644 --- a/playbooks/byo/openshift-node/scaleup.yml +++ b/playbooks/byo/openshift-node/scaleup.yml @@ -8,7 +8,7 @@ - add_host: name: "{{ item }}" groups: l_oo_all_hosts - with_items: g_all_hosts + with_items: "{{ g_all_hosts }}" - hosts: l_oo_all_hosts gather_facts: no diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml index f093411ef..f36caeb36 100644 --- a/playbooks/byo/rhel_subscribe.yml +++ b/playbooks/byo/rhel_subscribe.yml @@ -8,7 +8,7 @@ - add_host: name: "{{ item }}" groups: l_oo_all_hosts - with_items: g_all_hosts + with_items: "{{ g_all_hosts }}" - hosts: l_oo_all_hosts gather_facts: no diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-cluster/additional_config.yml index 26b31d313..825f46415 100644 --- a/playbooks/common/openshift-cluster/additional_config.yml +++ b/playbooks/common/openshift-cluster/additional_config.yml @@ -11,6 +11,8 @@ - role: openshift_examples registry_url: "{{ openshift.master.registry_url }}" when: openshift.common.install_examples | bool + - role: openshift_hosted_templates + registry_url: "{{ openshift.master.registry_url }}" - role: openshift_manageiq when: openshift.common.use_manageiq | bool - role: cockpit diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml index 3cc23f9c1..352d266a5 100644 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -56,13 +56,13 @@ openshift_hosted_logging_ops_hostname: "{{ logging_ops_hostname }}" openshift_hosted_logging_master_public_url: "{{ logging_master_public_url }}" openshift_hosted_logging_elasticsearch_cluster_size: "{{ logging_elasticsearch_cluster_size }}" - openshift_hosted_logging_elasticsearch_pvc_dynamic: "{{ 'true' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else 'false' }}" - openshift_hosted_logging_elasticsearch_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}" - openshift_hosted_logging_elasticsearch_pvc_prefix: "{{ 'logging-es' if openshift_hosted_logging_storage_kind | default(none) is not none else '' }}" + openshift_hosted_logging_elasticsearch_pvc_dynamic: "{{ 'true' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}" + openshift_hosted_logging_elasticsearch_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) in ['dynamic','nfs'] else '' }}" + openshift_hosted_logging_elasticsearch_pvc_prefix: "{{ 'logging-es' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}" openshift_hosted_logging_elasticsearch_ops_cluster_size: "{{ logging_elasticsearch_ops_cluster_size }}" - openshift_hosted_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else 'false' }}" - openshift_hosted_logging_elasticsearch_ops_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}" - openshift_hosted_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es' if openshift_hosted_logging_storage_kind | default(none) is not none else '' }}" + openshift_hosted_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}" + openshift_hosted_logging_elasticsearch_ops_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) in ['dynamic','nfs' ] else '' }}" + openshift_hosted_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es' if openshift_hosted_logging_storage_kind | default(none) =='dynamic' else '' }}" - role: cockpit-ui when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) diff --git a/playbooks/common/openshift-cluster/redeploy-certificates.yml b/playbooks/common/openshift-cluster/redeploy-certificates.yml index 4996c56a7..5f008a045 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates.yml @@ -224,7 +224,7 @@ - name: Prepare for node evacuation command: > - {{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig + {{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig manage-node {{ openshift.node.nodename }} --schedulable=false delegate_to: "{{ groups.oo_first_master.0 }}" @@ -232,7 +232,7 @@ - name: Evacuate node command: > - {{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig + {{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig manage-node {{ openshift.node.nodename }} --evacuate --force delegate_to: "{{ groups.oo_first_master.0 }}" @@ -240,7 +240,7 @@ - name: Set node schedulability command: > - {{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig + {{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig manage-node {{ openshift.node.nodename }} --schedulable=true delegate_to: "{{ groups.oo_first_master.0 }}" when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool diff --git a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml index 32a3636aa..439df5ffd 100644 --- a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml @@ -1,5 +1,3 @@ -- include_vars: ../../../../roles/openshift_node/vars/main.yml - - name: Update systemd units include: ../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version={{ openshift_image_tag }} diff --git a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml index 78f6c46f3..23cf8cf76 100644 --- a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml +++ b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml @@ -22,11 +22,11 @@ - name: Create service signer certificate command: > - {{ openshift.common.admin_binary }} ca create-signer-cert - --cert=service-signer.crt - --key=service-signer.key - --name=openshift-service-serving-signer - --serial=service-signer.serial.txt + {{ openshift.common.client_binary }} adm ca create-signer-cert + --cert="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.crt + --key="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.key + --name="{{ remote_cert_create_tmpdir.stdout }}/"openshift-service-serving-signer + --serial="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.serial.txt args: chdir: "{{ remote_cert_create_tmpdir.stdout }}/" when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool) diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml index 46ff421fd..ee75aa853 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml @@ -36,7 +36,7 @@ - set_fact: l_docker_upgrade: False -# Make sure a docker_verison is set if none was requested: +# Make sure a docker_version is set if none was requested: - set_fact: docker_version: "{{ avail_docker_version.stdout }}" when: pkg_check.rc == 0 and docker_version is not defined diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml index f3b3abe0d..fbdb7900a 100644 --- a/playbooks/common/openshift-cluster/upgrades/init.yml +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -10,7 +10,7 @@ - add_host: name: "{{ item }}" groups: l_oo_all_hosts - with_items: g_all_hosts | default([]) + with_items: "{{ g_all_hosts | default([]) }}" - hosts: l_oo_all_hosts gather_facts: no diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index e43954453..2bbcbe1f8 100644 --- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -17,10 +17,14 @@ # not already exist. We could have potentially done a replace --force to # create and update in one step. - openshift_examples + - openshift_hosted_templates # Update the existing templates - role: openshift_examples registry_url: "{{ openshift.master.registry_url }}" openshift_examples_import_command: replace + - role: openshift_hosted_templates + registry_url: "{{ openshift.master.registry_url }}" + openshift_hosted_templates_import_command: replace pre_tasks: - name: Collect all routers command: > @@ -41,7 +45,7 @@ {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -n {{ item['namespace'] }} -p '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}' --api-version=v1 - with_items: haproxy_routers + with_items: "{{ haproxy_routers }}" - name: Check for default registry command: > diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index c80e9e74d..927d9b4ca 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -11,7 +11,7 @@ add_host: name: "{{ item }}" groups: etcd_hosts_to_backup - with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master + with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}" - name: Backup etcd hosts: etcd_hosts_to_backup @@ -197,19 +197,15 @@ # restart. skip_docker_role: True tasks: - - name: Verifying the correct commandline tools are available - shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}} - when: openshift.common.is_containerized | bool and verify_upgrade_version is defined - - name: Reconcile Cluster Roles command: > - {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-roles --additive-only=true --confirm run_once: true - name: Reconcile Cluster Role Bindings command: > - {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings --exclude-groups=system:authenticated --exclude-groups=system:authenticated:oauth @@ -221,7 +217,7 @@ - name: Reconcile Security Context Constraints command: > - {{ openshift.common.admin_binary}} policy reconcile-sccs --confirm --additive-only=true + {{ openshift.common.client_binary }} adm policy reconcile-sccs --confirm --additive-only=true run_once: true - set_fact: diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index 9b572dcdf..1f314c854 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -29,7 +29,7 @@ - name: Mark unschedulable if host is a node command: > - {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --schedulable=false + {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false delegate_to: "{{ groups.oo_first_master.0 }}" when: inventory_hostname in groups.oo_nodes_to_upgrade # NOTE: There is a transient "object has been modified" error here, allow a couple @@ -41,7 +41,7 @@ - name: Evacuate Node for Kubelet upgrade command: > - {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --evacuate --force + {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --evacuate --force delegate_to: "{{ groups.oo_first_master.0 }}" when: inventory_hostname in groups.oo_nodes_to_upgrade tasks: @@ -64,7 +64,7 @@ - name: Set node schedulability command: > - {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --schedulable=true + {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true delegate_to: "{{ groups.oo_first_master.0 }}" when: inventory_hostname in groups.oo_nodes_to_upgrade and was_schedulable | bool register: node_sched diff --git a/playbooks/common/openshift-etcd/service.yml b/playbooks/common/openshift-etcd/service.yml index fd2bc24ae..f460612ba 100644 --- a/playbooks/common/openshift-etcd/service.yml +++ b/playbooks/common/openshift-etcd/service.yml @@ -10,7 +10,7 @@ - name: Evaluate g_service_etcd add_host: name={{ item }} groups=g_service_etcd - with_items: oo_host_group_exp | default([]) + with_items: "{{ oo_host_group_exp | default([]) }}" - name: Change etcd state on etcd instance(s) hosts: g_service_etcd diff --git a/playbooks/common/openshift-loadbalancer/service.yml b/playbooks/common/openshift-loadbalancer/service.yml index e06a14c89..efc80edf9 100644 --- a/playbooks/common/openshift-loadbalancer/service.yml +++ b/playbooks/common/openshift-loadbalancer/service.yml @@ -10,7 +10,7 @@ - name: Evaluate g_service_lb add_host: name={{ item }} groups=g_service_lb - with_items: oo_host_group_exp | default([]) + with_items: "{{ oo_host_group_exp | default([]) }}" - name: Change state on lb instance(s) hosts: g_service_lb diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml index 57a63cfee..5769ef5cd 100644 --- a/playbooks/common/openshift-master/restart.yml +++ b/playbooks/common/openshift-master/restart.yml @@ -66,63 +66,8 @@ current_host: "{{ exists.stat.exists }}" when: openshift.common.rolling_restart_mode == 'system' -- name: Determine which masters are currently active - hosts: oo_masters_to_config - any_errors_fatal: true - tasks: - - name: Check master service status - command: > - systemctl is-active {{ openshift.common.service_type }}-master - register: active_check_output - when: openshift.master.cluster_method | default(None) == 'pacemaker' - failed_when: false - changed_when: false - - set_fact: - is_active: "{{ active_check_output.stdout == 'active' }}" - when: openshift.master.cluster_method | default(None) == 'pacemaker' - -- name: Evaluate master groups - hosts: localhost - become: no - tasks: - - fail: - msg: > - Did not receive active status from any masters. Please verify pacemaker cluster. - when: "{{ hostvars[groups.oo_first_master.0].openshift.master.cluster_method | default(None) == 'pacemaker' and 'True' not in (hostvars - | oo_select_keys(groups['oo_masters_to_config']) - | oo_collect('is_active') - | list) }}" - - name: Evaluate oo_active_masters - add_host: - name: "{{ item }}" - groups: oo_active_masters - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ groups.oo_masters_to_config | default([]) }}" - when: (hostvars[item]['is_active'] | default(false)) | bool - - name: Evaluate oo_current_masters - add_host: - name: "{{ item }}" - groups: oo_current_masters - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ groups.oo_masters_to_config | default([]) }}" - when: (hostvars[item]['current_host'] | default(false)) | bool - -- name: Validate pacemaker cluster - hosts: oo_active_masters - tasks: - - name: Retrieve pcs status - command: pcs status - register: pcs_status_output - changed_when: false - - fail: - msg: > - Pacemaker cluster validation failed. One or more nodes are not online. - when: not (pcs_status_output.stdout | validate_pcs_cluster(groups.oo_masters_to_config)) | bool - - name: Restart masters - hosts: oo_masters_to_config:!oo_active_masters:!oo_current_masters + hosts: oo_masters_to_config vars: openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" serial: 1 @@ -132,20 +77,3 @@ - include: restart_services.yml when: openshift.common.rolling_restart_mode == 'services' -- name: Restart active masters - hosts: oo_active_masters - serial: 1 - tasks: - - include: restart_hosts_pacemaker.yml - when: openshift.common.rolling_restart_mode == 'system' - - include: restart_services_pacemaker.yml - when: openshift.common.rolling_restart_mode == 'services' - -- name: Restart current masters - hosts: oo_current_masters - serial: 1 - tasks: - - include: restart_hosts.yml - when: openshift.common.rolling_restart_mode == 'system' - - include: restart_services.yml - when: openshift.common.rolling_restart_mode == 'services' diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml index ff206f5a2..b1c36718c 100644 --- a/playbooks/common/openshift-master/restart_hosts.yml +++ b/playbooks/common/openshift-master/restart_hosts.yml @@ -5,8 +5,8 @@ poll: 0 ignore_errors: true become: yes -# When cluster_method != pacemaker we can ensure the api_port is -# available. + +# Ensure the api_port is available. - name: Wait for master API to come back online become: no local_action: @@ -15,25 +15,3 @@ state=started delay=10 port="{{ openshift.master.api_port }}" - when: openshift.master.cluster_method != 'pacemaker' -- name: Wait for master to start - become: no - local_action: - module: wait_for - host="{{ inventory_hostname }}" - state=started - delay=10 - port=22 - when: openshift.master.cluster_method == 'pacemaker' -- name: Wait for master to become available - command: pcs status - register: pcs_status_output - until: pcs_status_output.stdout | validate_pcs_cluster([inventory_hostname]) | bool - retries: 15 - delay: 2 - changed_when: false - when: openshift.master.cluster_method == 'pacemaker' -- fail: - msg: > - Pacemaker cluster validation failed {{ inventory hostname }} is not online. - when: openshift.master.cluster_method == 'pacemaker' and not (pcs_status_output.stdout | validate_pcs_cluster([inventory_hostname])) | bool diff --git a/playbooks/common/openshift-master/restart_hosts_pacemaker.yml b/playbooks/common/openshift-master/restart_hosts_pacemaker.yml deleted file mode 100644 index c9219e8de..000000000 --- a/playbooks/common/openshift-master/restart_hosts_pacemaker.yml +++ /dev/null @@ -1,25 +0,0 @@ -- name: Fail over master resource - command: > - pcs resource move master {{ hostvars | oo_select_keys(groups['oo_masters_to_config']) | oo_collect('openshift.common.hostname', {'is_active': 'False'}) | list | first }} -- name: Wait for master API to come back online - become: no - local_action: - module: wait_for - host="{{ openshift.master.cluster_hostname }}" - state=started - delay=10 - port="{{ openshift.master.api_port }}" -- name: Restart master system - # https://github.com/ansible/ansible/issues/10616 - shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart" - async: 1 - poll: 0 - ignore_errors: true - become: yes -- name: Wait for master to start - become: no - local_action: - module: wait_for - host="{{ inventory_hostname }}" - state=started - delay=10 diff --git a/playbooks/common/openshift-master/restart_services_pacemaker.yml b/playbooks/common/openshift-master/restart_services_pacemaker.yml deleted file mode 100644 index e738f3fb6..000000000 --- a/playbooks/common/openshift-master/restart_services_pacemaker.yml +++ /dev/null @@ -1,10 +0,0 @@ -- name: Restart master services - command: pcs resource restart master -- name: Wait for master API to come back online - become: no - local_action: - module: wait_for - host="{{ openshift.master.cluster_hostname }}" - state=started - delay=10 - port="{{ openshift.master.api_port }}" diff --git a/playbooks/common/openshift-master/service.yml b/playbooks/common/openshift-master/service.yml index f60c5a2b5..5e5198335 100644 --- a/playbooks/common/openshift-master/service.yml +++ b/playbooks/common/openshift-master/service.yml @@ -10,7 +10,7 @@ - name: Evaluate g_service_masters add_host: name={{ item }} groups=g_service_masters - with_items: oo_host_group_exp | default([]) + with_items: "{{ oo_host_group_exp | default([]) }}" - name: Change state on master instance(s) hosts: g_service_masters diff --git a/playbooks/common/openshift-nfs/service.yml b/playbooks/common/openshift-nfs/service.yml index 20c8ca248..8468014da 100644 --- a/playbooks/common/openshift-nfs/service.yml +++ b/playbooks/common/openshift-nfs/service.yml @@ -8,7 +8,7 @@ - name: Evaluate g_service_nfs add_host: name={{ item }} groups=g_service_nfs - with_items: oo_host_group_exp | default([]) + with_items: "{{ oo_host_group_exp | default([]) }}" - name: Change state on nfs instance(s) hosts: g_service_nfs diff --git a/playbooks/common/openshift-node/service.yml b/playbooks/common/openshift-node/service.yml index 0f07add2a..33095c9fb 100644 --- a/playbooks/common/openshift-node/service.yml +++ b/playbooks/common/openshift-node/service.yml @@ -10,7 +10,7 @@ - name: Evaluate g_service_nodes add_host: name={{ item }} groups=g_service_nodes - with_items: oo_host_group_exp | default([]) + with_items: "{{ oo_host_group_exp | default([]) }}" - name: Change state on node instance(s) hosts: g_service_nodes diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml index c29cac272..34dcd2496 100644 --- a/playbooks/gce/openshift-cluster/list.yml +++ b/playbooks/gce/openshift-cluster/list.yml @@ -16,7 +16,7 @@ groups: oo_list_hosts ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true)) + with_items: "{{ groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true)) }}" - name: List Hosts hosts: oo_list_hosts diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml index 60cf21a5b..7c8189224 100644 --- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml @@ -49,11 +49,11 @@ gce_public_ip: "{{ item.public_ip }}" gce_private_ip: "{{ item.private_ip }}" openshift_node_labels: "{{ node_label }}" - with_items: gce.instance_data | default([], true) + with_items: "{{ gce.instance_data | default([], true) }}" - name: Wait for ssh wait_for: port=22 host={{ item.public_ip }} - with_items: gce.instance_data | default([], true) + with_items: "{{ gce.instance_data | default([], true) }}" - name: Wait for user setup command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.name].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.name].ansible_ssh_user }} user is setup" @@ -61,4 +61,4 @@ until: result.rc == 0 retries: 30 delay: 5 - with_items: gce.instance_data | default([], true) + with_items: "{{ gce.instance_data | default([], true) }}" diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml index 6a0ac088a..68e60f9d4 100644 --- a/playbooks/gce/openshift-cluster/terminate.yml +++ b/playbooks/gce/openshift-cluster/terminate.yml @@ -12,7 +12,7 @@ groups: oo_hosts_to_terminate ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: (groups['tag_clusterid-' ~ cluster_id] | default([])) | difference(['localhost']) + with_items: "{{ (groups['tag_clusterid-' ~ cluster_id] | default([])) | difference(['localhost']) }}" - name: Unsubscribe VMs hosts: oo_hosts_to_terminate @@ -43,7 +43,7 @@ pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" project_id: "{{ lookup('env', 'gce_project_id') }}" zone: "{{ lookup('env', 'zone') }}" - with_items: groups['oo_hosts_to_terminate'] | default([], true) + with_items: "{{ groups['oo_hosts_to_terminate'] | default([], true) }}" when: item is defined #- include: ../openshift-node/terminate.yml diff --git a/playbooks/gce/openshift-cluster/update.yml b/playbooks/gce/openshift-cluster/update.yml index 332f27da7..6d2af3d26 100644 --- a/playbooks/gce/openshift-cluster/update.yml +++ b/playbooks/gce/openshift-cluster/update.yml @@ -7,7 +7,7 @@ - add_host: name: "{{ item }}" groups: l_oo_all_hosts - with_items: g_all_hosts + with_items: "{{ g_all_hosts }}" - hosts: l_oo_all_hosts gather_facts: no @@ -27,7 +27,7 @@ groups: oo_hosts_to_update ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: g_all_hosts | default([]) + with_items: "{{ g_all_hosts | default([]) }}" - include: ../../common/openshift-cluster/update_repos_and_packages.yml diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml index eb64544db..86d5d0aad 100644 --- a/playbooks/libvirt/openshift-cluster/list.yml +++ b/playbooks/libvirt/openshift-cluster/list.yml @@ -16,7 +16,7 @@ groups: oo_list_hosts ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: groups[scratch_group] | default([]) | difference(['localhost']) + with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}" - name: List Hosts hosts: oo_list_hosts diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml index df5c52f2d..81e6d8f05 100644 --- a/playbooks/libvirt/openshift-cluster/terminate.yml +++ b/playbooks/libvirt/openshift-cluster/terminate.yml @@ -1,5 +1,5 @@ --- -# TODO: does not handle a non-existant cluster gracefully +# TODO: does not handle a non-existent cluster gracefully - name: Terminate instance(s) hosts: localhost diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml index 755090f94..20ce47c07 100644 --- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml +++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml @@ -45,7 +45,7 @@ parameters: node_port_incoming: type: string label: Source of node port connections - description: Authorized sources targetting node ports + description: Authorized sources targeting node ports default: 0.0.0.0/0 num_etcd: diff --git a/playbooks/openstack/openshift-cluster/update.yml b/playbooks/openstack/openshift-cluster/update.yml index 332f27da7..6d2af3d26 100644 --- a/playbooks/openstack/openshift-cluster/update.yml +++ b/playbooks/openstack/openshift-cluster/update.yml @@ -7,7 +7,7 @@ - add_host: name: "{{ item }}" groups: l_oo_all_hosts - with_items: g_all_hosts + with_items: "{{ g_all_hosts }}" - hosts: l_oo_all_hosts gather_facts: no @@ -27,7 +27,7 @@ groups: oo_hosts_to_update ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: g_all_hosts | default([]) + with_items: "{{ g_all_hosts | default([]) }}" - include: ../../common/openshift-cluster/update_repos_and_packages.yml diff --git a/roles/docker/tasks/udev_workaround.yml b/roles/docker/tasks/udev_workaround.yml index 3c236f698..aa7af0cb3 100644 --- a/roles/docker/tasks/udev_workaround.yml +++ b/roles/docker/tasks/udev_workaround.yml @@ -14,7 +14,7 @@ copy: content: | [Service] - #Need blank ExecStart to "clear" pre-exising one + #Need blank ExecStart to "clear" pre-existing one ExecStart= {{ udevw_udev_start_cmd.stdout }} --event-timeout=300 dest: "{{ udevw_udevd_dir }}/override.conf" diff --git a/roles/etcd_common/library/delegated_serial_command.py b/roles/etcd_common/library/delegated_serial_command.py index 3969edfdd..84d4f97c2 100755 --- a/roles/etcd_common/library/delegated_serial_command.py +++ b/roles/etcd_common/library/delegated_serial_command.py @@ -270,6 +270,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -from ansible.module_utils.splitter import * main() diff --git a/roles/kube_nfs_volumes/library/partitionpool.py b/roles/kube_nfs_volumes/library/partitionpool.py index 1ac8eed4d..9bd3228c1 100644 --- a/roles/kube_nfs_volumes/library/partitionpool.py +++ b/roles/kube_nfs_volumes/library/partitionpool.py @@ -60,7 +60,7 @@ options: - Example 3: size=200G:1,100G:2 says that the ratio of space occupied by 200 GiB partitions and 100GiB partition is 1:2. Therefore, on 1 TiB disk, 1/3 (300 GiB) should be occupied by 200 GiB partitions. Only one fits there, - so only one is created (we always round nr. of partitions *down*). Teh rest + so only one is created (we always round nr. of partitions *down*). The rest (800 GiB) is split into eight 100 GiB partitions, even though it's more than 2/3 of total space - free space is always allocated as much as possible. - size=200G:1,100G:2 = 1x 200 GiB and 8x 100 GiB partitions (on 1 TiB disk). diff --git a/roles/kube_nfs_volumes/tasks/main.yml b/roles/kube_nfs_volumes/tasks/main.yml index 5432a5e2f..5eff30f6f 100644 --- a/roles/kube_nfs_volumes/tasks/main.yml +++ b/roles/kube_nfs_volumes/tasks/main.yml @@ -12,11 +12,11 @@ - name: create filesystem filesystem: fstype=ext4 dev=/dev/{{ item.name }} - with_items: partition_pool + with_items: "{{ partition_pool }}" - name: mount mount: name={{mount_dir}}/{{ item.name }} src=/dev/{{ item.name }} state=mounted fstype=ext4 passno=2 - with_items: partition_pool + with_items: "{{ partition_pool }}" - include: nfs.yml @@ -28,4 +28,4 @@ body_format: json status_code: 201 HEADER_Authorization: "Bearer {{ kubernetes_token }}" - with_items: partition_pool + with_items: "{{ partition_pool }}" diff --git a/roles/kube_nfs_volumes/tasks/nfs.yml b/roles/kube_nfs_volumes/tasks/nfs.yml index 9a68ceb8d..474ec69e5 100644 --- a/roles/kube_nfs_volumes/tasks/nfs.yml +++ b/roles/kube_nfs_volumes/tasks/nfs.yml @@ -13,5 +13,5 @@ lineinfile: dest=/etc/exports regexp="^{{ mount_dir }}/{{ item.name }} " line="{{ mount_dir }}/{{ item.name }} {{nfs_export_options}}" - with_items: partition_pool + with_items: "{{ partition_pool }}" notify: restart nfs diff --git a/roles/nickhammond.logrotate/tasks/main.yml b/roles/nickhammond.logrotate/tasks/main.yml index e2c51a903..1979c851f 100644 --- a/roles/nickhammond.logrotate/tasks/main.yml +++ b/roles/nickhammond.logrotate/tasks/main.yml @@ -7,5 +7,5 @@ template: src: logrotate.d.j2 dest: /etc/logrotate.d/{{ item.name }} - with_items: logrotate_scripts + with_items: "{{ logrotate_scripts | default([]) }}" when: logrotate_scripts is defined diff --git a/roles/nuage_common/defaults/main.yaml b/roles/nuage_common/defaults/main.yaml index 9b777213e..16dac8720 100644 --- a/roles/nuage_common/defaults/main.yaml +++ b/roles/nuage_common/defaults/main.yaml @@ -10,4 +10,4 @@ nuage_master_mon_dir: /usr/share/nuage-openshift-monitor nuage_node_plugin_dir: /usr/share/vsp-openshift nuage_mon_rest_server_port: "{{ nuage_openshift_monitor_rest_server_port | default('9443') }}" - +nuage_mon_cert_validity_period: "{{ nuage_cert_validity_period | default('3650') }}" diff --git a/roles/nuage_master/tasks/certificates.yml b/roles/nuage_master/tasks/certificates.yml index 32b024487..0a2f375cd 100644 --- a/roles/nuage_master/tasks/certificates.yml +++ b/roles/nuage_master/tasks/certificates.yml @@ -15,7 +15,7 @@ - name: Generate the crt file command: > - openssl x509 -req -in "{{ nuage_mon_rest_server_crt_dir }}/restServer.req" -CA "{{ nuage_ca_crt }}" -CAkey "{{ nuage_ca_key }}" -CAserial "{{ nuage_ca_serial }}" -out "{{ nuage_ca_master_rest_server_crt }}" + openssl x509 -req -in "{{ nuage_mon_rest_server_crt_dir }}/restServer.req" -CA "{{ nuage_ca_crt }}" -CAkey "{{ nuage_ca_key }}" -CAserial "{{ nuage_ca_serial }}" -out "{{ nuage_ca_master_rest_server_crt }}" -days "{{ nuage_mon_cert_validity_period }}" delegate_to: "{{ nuage_ca_master }}" - name: Remove the req file diff --git a/roles/nuage_master/tasks/serviceaccount.yml b/roles/nuage_master/tasks/serviceaccount.yml index 5b4af5824..2b3ae0454 100644 --- a/roles/nuage_master/tasks/serviceaccount.yml +++ b/roles/nuage_master/tasks/serviceaccount.yml @@ -16,7 +16,7 @@ shell: > echo {{ nuage_service_account_config | to_json | quote }} | {{ openshift.common.client_binary }} create - -n default + -n default --config={{nuage_tmp_conf}} -f - register: osnuage_create_service_account @@ -25,7 +25,7 @@ - name: Configure role/user permissions command: > - {{ openshift.common.admin_binary }} {{item}} + {{ openshift.common.client_binary }} adm {{item}} --config={{nuage_tmp_conf}} with_items: "{{nuage_tasks}}" register: osnuage_perm_task @@ -34,7 +34,7 @@ - name: Generate the node client config command: > - {{ openshift.common.admin_binary }} create-api-client-config + {{ openshift.common.client_binary }} adm create-api-client-config --certificate-authority={{ openshift_master_ca_cert }} --client-dir={{ cert_output_dir }} --master={{ openshift.master.api_url }} diff --git a/roles/nuage_master/templates/nuage-openshift-monitor.j2 b/roles/nuage_master/templates/nuage-openshift-monitor.j2 index 63117adc0..de2a97e37 100644 --- a/roles/nuage_master/templates/nuage-openshift-monitor.j2 +++ b/roles/nuage_master/templates/nuage-openshift-monitor.j2 @@ -23,7 +23,7 @@ enterpriseAdminUser: {{ nuage_master_adminusername }} enterpriseAdminPassword: {{ nuage_master_adminuserpasswd }} # Location where logs should be saved log_dir: {{ nuage_mon_rest_server_logdir }} -# Monitor rest server paramters +# Monitor rest server parameters # Logging level for the nuage openshift monitor # allowed options are: 0 => INFO, 1 => WARNING, 2 => ERROR, 3 => FATAL logLevel: {{ nuage_mon_log_level }} diff --git a/roles/nuage_node/tasks/certificates.yml b/roles/nuage_node/tasks/certificates.yml index 0fe6f7bac..7fcd4274d 100644 --- a/roles/nuage_node/tasks/certificates.yml +++ b/roles/nuage_node/tasks/certificates.yml @@ -15,7 +15,7 @@ - name: Generate the crt file command: > - openssl x509 -req -in "{{ nuage_plugin_rest_client_crt_dir }}/restClient.req" -CA "{{ nuage_ca_crt }}" -CAkey "{{ nuage_ca_key }}" -CAserial "{{ nuage_ca_serial }}" -out "{{ nuage_ca_master_plugin_crt }}" -extensions clientauth -extfile "{{ nuage_ca_dir }}"/openssl.cnf + openssl x509 -req -in "{{ nuage_plugin_rest_client_crt_dir }}/restClient.req" -CA "{{ nuage_ca_crt }}" -CAkey "{{ nuage_ca_key }}" -CAserial "{{ nuage_ca_serial }}" -out "{{ nuage_ca_master_plugin_crt }}" -extensions clientauth -extfile "{{ nuage_ca_dir }}"/openssl.cnf -days {{ nuage_mon_cert_validity_period }} delegate_to: "{{ nuage_ca_master }}" - name: Remove the req file diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml index bb89b65a6..b6d403067 100644 --- a/roles/openshift_ca/tasks/main.yml +++ b/roles/openshift_ca/tasks/main.yml @@ -80,7 +80,7 @@ - name: Create the master certificates if they do not already exist command: > - {{ openshift.common.admin_binary }} create-master-certs + {{ openshift.common.client_binary }} adm create-master-certs {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %} --certificate-authority {{ named_ca_certificate }} {% endfor %} diff --git a/roles/openshift_certificate_expiry/README.md b/roles/openshift_certificate_expiry/README.md new file mode 100644 index 000000000..d44438332 --- /dev/null +++ b/roles/openshift_certificate_expiry/README.md @@ -0,0 +1,250 @@ +OpenShift Certificate Expiration Checker +======================================== + +OpenShift certificate expiration checking. Be warned of certificates +expiring within a configurable window of days, and notified of +certificates which have already expired. Certificates examined +include: + +* Master/Node Service Certificates +* Router/Registry Service Certificates from etcd secrets +* Master/Node/Router/Registry/Admin `kubeconfig`s +* Etcd certificates + +This role pairs well with the redeploy certificates playbook: + +* [Redeploying Certificates Documentation](https://docs.openshift.com/container-platform/latest/install_config/redeploying_certificates.html) + +Just like the redeploying certificates playbook, this role is intended +to be used with an inventory that is representative of the +cluster. For best results run `ansible-playbook` with the `-v` option. + + + +Role Variables +-------------- + +Core variables in this role: + +| Name | Default value | Description | +|-------------------------------------------------------|--------------------------------|-----------------------------------------------------------------------| +| `openshift_certificate_expiry_config_base` | `/etc/origin` | Base openshift config directory | +| `openshift_certificate_expiry_warning_days` | `30` | Flag certificates which will expire in this many days from now | +| `openshift_certificate_expiry_show_all` | `no` | Include healthy (non-expired and non-warning) certificates in results | + +Optional report/result saving variables in this role: + +| Name | Default value | Description | +|-------------------------------------------------------|--------------------------------|-----------------------------------------------------------------------| +| `openshift_certificate_expiry_generate_html_report` | `no` | Generate an HTML report of the expiry check results | +| `openshift_certificate_expiry_html_report_path` | `/tmp/cert-expiry-report.html` | The full path to save the HTML report as | +| `openshift_certificate_expiry_save_json_results` | `no` | Save expiry check results as a json file | +| `openshift_certificate_expiry_json_results_path` | `/tmp/cert-expiry-report.json` | The full path to save the json report as | + + +Example Playbook +---------------- + +Default behavior: + +```yaml +--- +- name: Check cert expirys + hosts: nodes:masters:etcd + become: yes + gather_facts: no + roles: + - role: openshift_certificate_expiry +``` + +Generate HTML and JSON artifacts in their default paths: + +```yaml +--- +- name: Check cert expirys + hosts: nodes:masters:etcd + become: yes + gather_facts: no + vars: + openshift_certificate_expiry_generate_html_report: yes + openshift_certificate_expiry_save_json_results: yes + roles: + - role: openshift_certificate_expiry +``` + +Change the expiration warning window to 1500 days (good for testing +the module out): + +```yaml +--- +- name: Check cert expirys + hosts: nodes:masters:etcd + become: yes + gather_facts: no + vars: + openshift_certificate_expiry_warning_days: 1500 + roles: + - role: openshift_certificate_expiry +``` + +Change the expiration warning window to 1500 days (good for testing +the module out) and save the results as a JSON file: + +```yaml +--- +- name: Check cert expirys + hosts: nodes:masters:etcd + become: yes + gather_facts: no + vars: + openshift_certificate_expiry_warning_days: 1500 + openshift_certificate_expiry_save_json_results: yes + roles: + - role: openshift_certificate_expiry +``` + + +JSON Output +----------- + +There are two top-level keys in the saved JSON results, `data` and +`summary`. + +The `data` key is a hash where the keys are the names of each host +examined and the values are the check results for each respective +host. + +The `summary` key is a hash that summarizes the number of certificates +expiring within the configured warning window and the number of +already expired certificates. + +The example below is abbreviated to save space: + +```json +{ + "data": { + "192.168.124.148": { + "etcd": [ + { + "cert_cn": "CN:etcd-signer@1474563722", + "days_remaining": 350, + "expiry": "2017-09-22 17:02:25", + "health": "warning", + "path": "/etc/etcd/ca.crt" + }, + ], + "kubeconfigs": [ + { + "cert_cn": "O:system:nodes, CN:system:node:m01.example.com", + "days_remaining": 715, + "expiry": "2018-09-22 17:08:57", + "health": "warning", + "path": "/etc/origin/node/system:node:m01.example.com.kubeconfig" + }, + { + "cert_cn": "O:system:cluster-admins, CN:system:admin", + "days_remaining": 715, + "expiry": "2018-09-22 17:04:40", + "health": "warning", + "path": "/etc/origin/master/admin.kubeconfig" + } + ], + "meta": { + "checked_at_time": "2016-10-07 15:26:47.608192", + "show_all": "True", + "warn_before_date": "2020-11-15 15:26:47.608192", + "warning_days": 1500 + }, + "ocp_certs": [ + { + "cert_cn": "CN:172.30.0.1, DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc, DNS:kubernetes.default.svc.cluster.local, DNS:m01.example.com, DNS:openshift, DNS:openshift.default, DNS:openshift.default.svc, DNS:openshift.default.svc.cluster.local, DNS:172.30.0.1, DNS:192.168.124.148, IP Address:172.30.0.1, IP Address:192.168.124.148", + "days_remaining": 715, + "expiry": "2018-09-22 17:04:39", + "health": "warning", + "path": "/etc/origin/master/master.server.crt" + }, + { + "cert_cn": "CN:openshift-signer@1474563878", + "days_remaining": 1810, + "expiry": "2021-09-21 17:04:38", + "health": "ok", + "path": "/etc/origin/node/ca.crt" + } + ], + "registry": [ + { + "cert_cn": "CN:172.30.101.81, DNS:docker-registry-default.router.default.svc.cluster.local, DNS:docker-registry.default.svc.cluster.local, DNS:172.30.101.81, IP Address:172.30.101.81", + "days_remaining": 728, + "expiry": "2018-10-05 18:54:29", + "health": "warning", + "path": "/api/v1/namespaces/default/secrets/registry-certificates" + } + ], + "router": [ + { + "cert_cn": "CN:router.default.svc, DNS:router.default.svc, DNS:router.default.svc.cluster.local", + "days_remaining": 715, + "expiry": "2018-09-22 17:48:23", + "health": "warning", + "path": "/api/v1/namespaces/default/secrets/router-certs" + } + ] + } + }, + "summary": { + "warning": 6, + "expired": 0 + } +} +``` + +The `summary` from the json data can be easily checked for +warnings/expirations using a variety of command-line tools. + +For exampe, using `grep` we can look for the word `summary` and print +out the 2 lines **after** the match (`-A2`): + +``` +$ grep -A2 summary /tmp/cert-expiry-report.json + "summary": { + "warning": 16, + "expired": 0 +``` + +If available, the [jq](https://stedolan.github.io/jq/) tool can also +be used to pick out specific values. Example 1 and 2 below show how to +select just one value, either `warning` or `expired`. Example 3 shows +how to select both values at once: + +``` +$ jq '.summary.warning' /tmp/cert-expiry-report.json +16 +$ jq '.summary.expired' /tmp/cert-expiry-report.json +0 +$ jq '.summary.warning,.summary.expired' /tmp/cert-expiry-report.json +16 +0 +``` + + +Requirements +------------ + +* None + + +Dependencies +------------ + +* None + + +License +------- + +Apache License, Version 2.0 + +Author Information +------------------ + +Tim Bielawa (tbielawa@redhat.com) diff --git a/roles/openshift_certificate_expiry/defaults/main.yml b/roles/openshift_certificate_expiry/defaults/main.yml new file mode 100644 index 000000000..6d7b19298 --- /dev/null +++ b/roles/openshift_certificate_expiry/defaults/main.yml @@ -0,0 +1,8 @@ +--- +openshift_certificate_expiry_config_base: "/etc/origin" +openshift_certificate_expiry_warning_days: 30 +openshift_certificate_expiry_show_all: no +openshift_certificate_expiry_generate_html_report: no +openshift_certificate_expiry_html_report_path: "/tmp/cert-expiry-report.html" +openshift_certificate_expiry_save_json_results: no +openshift_certificate_expiry_json_results_path: "/tmp/cert-expiry-report.json" diff --git a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py b/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py new file mode 100644 index 000000000..2e2430ee6 --- /dev/null +++ b/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py @@ -0,0 +1,88 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# vim: expandtab:tabstop=4:shiftwidth=4 +""" +Custom filters for use in openshift-ansible +""" + +from ansible import errors +from collections import Mapping +from distutils.util import strtobool +from distutils.version import LooseVersion +from operator import itemgetter +import OpenSSL.crypto +import os +import pdb +import pkg_resources +import re +import json +import yaml +from ansible.parsing.yaml.dumper import AnsibleDumper +from urlparse import urlparse + +try: + # ansible-2.2 + # ansible.utils.unicode.to_unicode is deprecated in ansible-2.2, + # ansible.module_utils._text.to_text should be used instead. + from ansible.module_utils._text import to_text +except ImportError: + # ansible-2.1 + from ansible.utils.unicode import to_unicode as to_text + +# Disabling too-many-public-methods, since filter methods are necessarily +# public +# pylint: disable=too-many-public-methods +class FilterModule(object): + """ Custom ansible filters """ + + @staticmethod + def oo_cert_expiry_results_to_json(hostvars, play_hosts): + """Takes results (`hostvars`) from the openshift_cert_expiry role +check and serializes them into proper machine-readable JSON +output. This filter parameter **MUST** be the playbook `hostvars` +variable. The `play_hosts` parameter is so we know what to loop over +when we're extrating the values. + +Returns: + +Results are collected into two top-level keys under the `json_results` +dict: + +* `json_results.data` [dict] - Each individual host check result, keys are hostnames +* `json_results.summary` [dict] - Summary of number of `warning` and `expired` +certificates + +Example playbook usage: + + - name: Generate expiration results JSON + become: no + run_once: yes + delegate_to: localhost + when: "{{ openshift_certificate_expiry_save_json_results|bool }}" + copy: + content: "{{ hostvars|oo_cert_expiry_results_to_json() }}" + dest: "{{ openshift_certificate_expiry_json_results_path }}" + + """ + json_result = { + 'data': {}, + 'summary': {}, + } + + for host in play_hosts: + json_result['data'][host] = hostvars[host]['check_results']['check_results'] + + total_warnings = sum([hostvars[h]['check_results']['summary']['warning'] for h in play_hosts]) + total_expired = sum([hostvars[h]['check_results']['summary']['expired'] for h in play_hosts]) + + json_result['summary']['warning'] = total_warnings + json_result['summary']['expired'] = total_expired + + return json_result + + + def filters(self): + """ returns a mapping of filters to methods """ + return { + "oo_cert_expiry_results_to_json": self.oo_cert_expiry_results_to_json, + } diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py new file mode 100644 index 000000000..2cdb87dc1 --- /dev/null +++ b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py @@ -0,0 +1,637 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# pylint: disable=line-too-long,invalid-name + +"""For details on this module see DOCUMENTATION (below)""" + +# router/registry cert grabbing +import subprocess +# etcd config file +import ConfigParser +# Expiration parsing +import datetime +# File path stuff +import os +# Config file parsing +import yaml +# Certificate loading +import OpenSSL.crypto + +DOCUMENTATION = ''' +--- +module: openshift_cert_expiry +short_description: Check OpenShift Container Platform (OCP) and Kube certificate expirations on a cluster +description: + - The M(openshift_cert_expiry) module has two basic functions: to flag certificates which will expire in a set window of time from now, and to notify you about certificates which have already expired. + - When the module finishes, a summary of the examination is returned. Each certificate in the summary has a C(health) key with a value of one of the following: + - C(ok) - not expired, and outside of the expiration C(warning_days) window. + - C(warning) - not expired, but will expire between now and the C(warning_days) window. + - C(expired) - an expired certificate. + - Certificate flagging follow this logic: + - If the expiration date is before now then the certificate is classified as C(expired). + - The certificates time to live (expiration date - now) is calculated, if that time window is less than C(warning_days) the certificate is classified as C(warning). + - All other conditions are classified as C(ok). + - The following keys are ALSO present in the certificate summary: + - C(cert_cn) - The common name of the certificate (additional CNs present in SAN extensions are omitted) + - C(days_remaining) - The number of days until the certificate expires. + - C(expiry) - The date the certificate expires on. + - C(path) - The full path to the certificate on the examined host. +version_added: "1.0" +options: + config_base: + description: + - Base path to OCP system settings. + required: false + default: /etc/origin + warning_days: + description: + - Flag certificates which will expire in C(warning_days) days from now. + required: false + default: 30 + show_all: + description: + - Enable this option to show analysis of ALL certificates examined by this module. + - By default only certificates which have expired, or will expire within the C(warning_days) window will be reported. + required: false + default: false + +author: "Tim Bielawa (@tbielawa) <tbielawa@redhat.com>" +''' + +EXAMPLES = ''' +# Default invocation, only notify about expired certificates or certificates which will expire within 30 days from now +- openshift_cert_expiry: + +# Expand the warning window to show certificates expiring within a year from now +- openshift_cert_expiry: warning_days=365 + +# Show expired, soon to expire (now + 30 days), and all other certificates examined +- openshift_cert_expiry: show_all=true +''' + + +# We only need this for one thing, we don't care if it doesn't have +# that many public methods +# +# pylint: disable=too-few-public-methods +class FakeSecHead(object): + """etcd does not begin their config file with an opening [section] as +required by the Python ConfigParser module. We hack around it by +slipping one in ourselves prior to parsing. + +Source: Alex Martelli - http://stackoverflow.com/a/2819788/6490583 + """ + def __init__(self, fp): + self.fp = fp + self.sechead = '[ETCD]\n' + + def readline(self): + """Make this look like a file-type object""" + if self.sechead: + try: + return self.sechead + finally: + self.sechead = None + else: + return self.fp.readline() + + +###################################################################### +def filter_paths(path_list): + """`path_list` - A list of file paths to check. Only files which exist +will be returned + """ + return [p for p in path_list if os.path.exists(os.path.realpath(p))] + + +def load_and_handle_cert(cert_string, now, base64decode=False): + """Load a certificate, split off the good parts, and return some +useful data + +Params: + +- `cert_string` (string) - a certificate loaded into a string object +- `now` (datetime) - a datetime object of the time to calculate the certificate 'time_remaining' against +- `base64decode` (bool) - run .decode('base64') on the input? + +Returns: +A 3-tuple of the form: (certificate_common_name, certificate_expiry_date, certificate_time_remaining) + + """ + if base64decode: + _cert_string = cert_string.decode('base-64') + else: + _cert_string = cert_string + + cert_loaded = OpenSSL.crypto.load_certificate( + OpenSSL.crypto.FILETYPE_PEM, _cert_string) + + ###################################################################### + # Read all possible names from the cert + cert_subjects = [] + for name, value in cert_loaded.get_subject().get_components(): + cert_subjects.append('{}:{}'.format(name, value)) + + # To read SANs from a cert we must read the subjectAltName + # extension from the X509 Object. What makes this more difficult + # is that pyOpenSSL does not give extensions as a list, nor does + # it provide a count of all loaded extensions. + # + # Rather, extensions are REQUESTED by index. We must iterate over + # all extensions until we find the one called 'subjectAltName'. If + # we don't find that extension we'll eventually request an + # extension at an index where no extension exists (IndexError is + # raised). When that happens we know that the cert has no SANs so + # we break out of the loop. + i = 0 + checked_all_extensions = False + while not checked_all_extensions: + try: + # Read the extension at index 'i' + ext = cert_loaded.get_extension(i) + except IndexError: + # We tried to read an extension but it isn't there, that + # means we ran out of extensions to check. Abort + san = None + checked_all_extensions = True + else: + # We were able to load the extension at index 'i' + if ext.get_short_name() == 'subjectAltName': + san = ext + checked_all_extensions = True + else: + # Try reading the next extension + i += 1 + + if san is not None: + # The X509Extension object for subjectAltName prints as a + # string with the alt names separated by a comma and a + # space. Split the string by ', ' and then add our new names + # to the list of existing names + cert_subjects.extend(str(san).split(', ')) + + cert_subject = ', '.join(cert_subjects) + ###################################################################### + + # Grab the expiration date + cert_expiry = cert_loaded.get_notAfter() + cert_expiry_date = datetime.datetime.strptime( + cert_expiry, + # example get_notAfter() => 20180922170439Z + '%Y%m%d%H%M%SZ') + + time_remaining = cert_expiry_date - now + + return (cert_subject, cert_expiry_date, time_remaining) + + +def classify_cert(cert_meta, now, time_remaining, expire_window, cert_list): + """Given metadata about a certificate under examination, classify it + into one of three categories, 'ok', 'warning', and 'expired'. + +Params: + +- `cert_meta` dict - A dict with certificate metadata. Required fields + include: 'cert_cn', 'path', 'expiry', 'days_remaining', 'health'. +- `now` (datetime) - a datetime object of the time to calculate the certificate 'time_remaining' against +- `time_remaining` (datetime.timedelta) - a timedelta for how long until the cert expires +- `expire_window` (datetime.timedelta) - a timedelta for how long the warning window is +- `cert_list` list - A list to shove the classified cert into + +Return: +- `cert_list` - The updated list of classified certificates + """ + expiry_str = str(cert_meta['expiry']) + # Categorization + if cert_meta['expiry'] < now: + # This already expired, must NOTIFY + cert_meta['health'] = 'expired' + elif time_remaining < expire_window: + # WARN about this upcoming expirations + cert_meta['health'] = 'warning' + else: + # Not expired or about to expire + cert_meta['health'] = 'ok' + + cert_meta['expiry'] = expiry_str + cert_list.append(cert_meta) + return cert_list + + +def tabulate_summary(certificates, kubeconfigs, etcd_certs, router_certs, registry_certs): + """Calculate the summary text for when the module finishes +running. This includes counts of each classification and what have +you. + +Params: + +- `certificates` (list of dicts) - Processed `expire_check_result` + dicts with filled in `health` keys for system certificates. +- `kubeconfigs` - as above for kubeconfigs +- `etcd_certs` - as above for etcd certs + +Return: + +- `summary_results` (dict) - Counts of each cert type classification + and total items examined. + """ + items = certificates + kubeconfigs + etcd_certs + router_certs + registry_certs + + summary_results = { + 'system_certificates': len(certificates), + 'kubeconfig_certificates': len(kubeconfigs), + 'etcd_certificates': len(etcd_certs), + 'router_certs': len(router_certs), + 'registry_certs': len(registry_certs), + 'total': len(items), + 'ok': 0, + 'warning': 0, + 'expired': 0 + } + + summary_results['expired'] = len([c for c in items if c['health'] == 'expired']) + summary_results['warning'] = len([c for c in items if c['health'] == 'warning']) + summary_results['ok'] = len([c for c in items if c['health'] == 'ok']) + + return summary_results + + +###################################################################### +# This is our module MAIN function after all, so there's bound to be a +# lot of code bundled up into one block +# +# pylint: disable=too-many-locals,too-many-locals,too-many-statements,too-many-branches +def main(): + """This module examines certificates (in various forms) which compose +an OpenShift Container Platform cluster + """ + + module = AnsibleModule( + argument_spec=dict( + config_base=dict( + required=False, + default="/etc/origin", + type='str'), + warning_days=dict( + required=False, + default=30, + type='int'), + show_all=dict( + required=False, + default=False, + type='bool') + ), + supports_check_mode=True, + ) + + # Basic scaffolding for OpenShift specific certs + openshift_base_config_path = module.params['config_base'] + openshift_master_config_path = os.path.normpath( + os.path.join(openshift_base_config_path, "master/master-config.yaml") + ) + openshift_node_config_path = os.path.normpath( + os.path.join(openshift_base_config_path, "node/node-config.yaml") + ) + openshift_cert_check_paths = [ + openshift_master_config_path, + openshift_node_config_path, + ] + + # Paths for Kubeconfigs. Additional kubeconfigs are conditionally + # checked later in the code + master_kube_configs = ['admin', 'openshift-master', + 'openshift-node', 'openshift-router', + 'openshift-registry'] + + kubeconfig_paths = [] + for m_kube_config in master_kube_configs: + kubeconfig_paths.append( + os.path.normpath( + os.path.join(openshift_base_config_path, "master/%s.kubeconfig" % m_kube_config) + ) + ) + + # Validate some paths we have the ability to do ahead of time + openshift_cert_check_paths = filter_paths(openshift_cert_check_paths) + kubeconfig_paths = filter_paths(kubeconfig_paths) + + # etcd, where do you hide your certs? Used when parsing etcd.conf + etcd_cert_params = [ + "ETCD_CA_FILE", + "ETCD_CERT_FILE", + "ETCD_PEER_CA_FILE", + "ETCD_PEER_CERT_FILE", + ] + + # Expiry checking stuff + now = datetime.datetime.now() + # todo, catch exception for invalid input and return a fail_json + warning_days = int(module.params['warning_days']) + expire_window = datetime.timedelta(days=warning_days) + + # Module stuff + # + # The results of our cert checking to return from the task call + check_results = {} + check_results['meta'] = {} + check_results['meta']['warning_days'] = warning_days + check_results['meta']['checked_at_time'] = str(now) + check_results['meta']['warn_before_date'] = str(now + expire_window) + check_results['meta']['show_all'] = str(module.params['show_all']) + # All the analyzed certs accumulate here + ocp_certs = [] + + ###################################################################### + # Sure, why not? Let's enable check mode. + if module.check_mode: + check_results['ocp_certs'] = [] + module.exit_json( + check_results=check_results, + msg="Checked 0 total certificates. Expired/Warning/OK: 0/0/0. Warning window: %s days" % module.params['warning_days'], + rc=0, + changed=False + ) + + ###################################################################### + # Check for OpenShift Container Platform specific certs + ###################################################################### + for os_cert in filter_paths(openshift_cert_check_paths): + # Open up that config file and locate the cert and CA + with open(os_cert, 'r') as fp: + cert_meta = {} + cfg = yaml.load(fp) + # cert files are specified in parsed `fp` as relative to the path + # of the original config file. 'master-config.yaml' with certFile + # = 'foo.crt' implies that 'foo.crt' is in the same + # directory. certFile = '../foo.crt' is in the parent directory. + cfg_path = os.path.dirname(fp.name) + cert_meta['certFile'] = os.path.join(cfg_path, cfg['servingInfo']['certFile']) + cert_meta['clientCA'] = os.path.join(cfg_path, cfg['servingInfo']['clientCA']) + + ###################################################################### + # Load the certificate and the CA, parse their expiration dates into + # datetime objects so we can manipulate them later + for _, v in cert_meta.iteritems(): + with open(v, 'r') as fp: + cert = fp.read() + cert_subject, cert_expiry_date, time_remaining = load_and_handle_cert(cert, now) + + expire_check_result = { + 'cert_cn': cert_subject, + 'path': fp.name, + 'expiry': cert_expiry_date, + 'days_remaining': time_remaining.days, + 'health': None, + } + + classify_cert(expire_check_result, now, time_remaining, expire_window, ocp_certs) + + ###################################################################### + # /Check for OpenShift Container Platform specific certs + ###################################################################### + + ###################################################################### + # Check service Kubeconfigs + ###################################################################### + kubeconfigs = [] + + # There may be additional kubeconfigs to check, but their naming + # is less predictable than the ones we've already assembled. + + try: + # Try to read the standard 'node-config.yaml' file to check if + # this host is a node. + with open(openshift_node_config_path, 'r') as fp: + cfg = yaml.load(fp) + + # OK, the config file exists, therefore this is a + # node. Nodes have their own kubeconfig files to + # communicate with the master API. Let's read the relative + # path to that file from the node config. + node_masterKubeConfig = cfg['masterKubeConfig'] + # As before, the path to the 'masterKubeConfig' file is + # relative to `fp` + cfg_path = os.path.dirname(fp.name) + node_kubeconfig = os.path.join(cfg_path, node_masterKubeConfig) + + with open(node_kubeconfig, 'r') as fp: + # Read in the nodes kubeconfig file and grab the good stuff + cfg = yaml.load(fp) + + c = cfg['users'][0]['user']['client-certificate-data'] + (cert_subject, + cert_expiry_date, + time_remaining) = load_and_handle_cert(c, now, base64decode=True) + + expire_check_result = { + 'cert_cn': cert_subject, + 'path': fp.name, + 'expiry': cert_expiry_date, + 'days_remaining': time_remaining.days, + 'health': None, + } + + classify_cert(expire_check_result, now, time_remaining, expire_window, kubeconfigs) + except IOError: + # This is not a node + pass + + for kube in filter_paths(kubeconfig_paths): + with open(kube, 'r') as fp: + # TODO: Maybe consider catching exceptions here? + cfg = yaml.load(fp) + + # Per conversation, "the kubeconfigs you care about: + # admin, router, registry should all be single + # value". Following that advice we only grab the data for + # the user at index 0 in the 'users' list. There should + # not be more than one user. + c = cfg['users'][0]['user']['client-certificate-data'] + (cert_subject, + cert_expiry_date, + time_remaining) = load_and_handle_cert(c, now, base64decode=True) + + expire_check_result = { + 'cert_cn': cert_subject, + 'path': fp.name, + 'expiry': cert_expiry_date, + 'days_remaining': time_remaining.days, + 'health': None, + } + + classify_cert(expire_check_result, now, time_remaining, expire_window, kubeconfigs) + + ###################################################################### + # /Check service Kubeconfigs + ###################################################################### + + ###################################################################### + # Check etcd certs + ###################################################################### + # Some values may be duplicated, make this a set for now so we + # unique them all + etcd_certs_to_check = set([]) + etcd_certs = [] + etcd_cert_params.append('dne') + try: + with open('/etc/etcd/etcd.conf', 'r') as fp: + etcd_config = ConfigParser.ConfigParser() + etcd_config.readfp(FakeSecHead(fp)) + + for param in etcd_cert_params: + try: + etcd_certs_to_check.add(etcd_config.get('ETCD', param)) + except ConfigParser.NoOptionError: + # That parameter does not exist, oh well... + pass + except IOError: + # No etcd to see here, move along + pass + + for etcd_cert in filter_paths(etcd_certs_to_check): + with open(etcd_cert, 'r') as fp: + c = fp.read() + (cert_subject, + cert_expiry_date, + time_remaining) = load_and_handle_cert(c, now) + + expire_check_result = { + 'cert_cn': cert_subject, + 'path': fp.name, + 'expiry': cert_expiry_date, + 'days_remaining': time_remaining.days, + 'health': None, + } + + classify_cert(expire_check_result, now, time_remaining, expire_window, etcd_certs) + + ###################################################################### + # /Check etcd certs + ###################################################################### + + ###################################################################### + # Check router/registry certs + # + # These are saved as secrets in etcd. That means that we can not + # simply read a file to grab the data. Instead we're going to + # subprocess out to the 'oc get' command. On non-masters this + # command will fail, that is expected so we catch that exception. + ###################################################################### + router_certs = [] + registry_certs = [] + + ###################################################################### + # First the router certs + try: + router_secrets_raw = subprocess.Popen('oc get secret router-certs -o yaml'.split(), + stdout=subprocess.PIPE) + router_ds = yaml.load(router_secrets_raw.communicate()[0]) + router_c = router_ds['data']['tls.crt'] + router_path = router_ds['metadata']['selfLink'] + except TypeError: + # YAML couldn't load the result, this is not a master + pass + except OSError: + # The OC command doesn't exist here. Move along. + pass + else: + (cert_subject, + cert_expiry_date, + time_remaining) = load_and_handle_cert(router_c, now, base64decode=True) + + expire_check_result = { + 'cert_cn': cert_subject, + 'path': router_path, + 'expiry': cert_expiry_date, + 'days_remaining': time_remaining.days, + 'health': None, + } + + classify_cert(expire_check_result, now, time_remaining, expire_window, router_certs) + + ###################################################################### + # Now for registry + try: + registry_secrets_raw = subprocess.Popen('oc get secret registry-certificates -o yaml'.split(), + stdout=subprocess.PIPE) + registry_ds = yaml.load(registry_secrets_raw.communicate()[0]) + registry_c = registry_ds['data']['registry.crt'] + registry_path = registry_ds['metadata']['selfLink'] + except TypeError: + # YAML couldn't load the result, this is not a master + pass + except OSError: + # The OC command doesn't exist here. Move along. + pass + else: + (cert_subject, + cert_expiry_date, + time_remaining) = load_and_handle_cert(registry_c, now, base64decode=True) + + expire_check_result = { + 'cert_cn': cert_subject, + 'path': registry_path, + 'expiry': cert_expiry_date, + 'days_remaining': time_remaining.days, + 'health': None, + } + + classify_cert(expire_check_result, now, time_remaining, expire_window, registry_certs) + + ###################################################################### + # /Check router/registry certs + ###################################################################### + + res = tabulate_summary(ocp_certs, kubeconfigs, etcd_certs, router_certs, registry_certs) + + msg = "Checked {count} total certificates. Expired/Warning/OK: {exp}/{warn}/{ok}. Warning window: {window} days".format( + count=res['total'], + exp=res['expired'], + warn=res['warning'], + ok=res['ok'], + window=int(module.params['warning_days']), + ) + + # By default we only return detailed information about expired or + # warning certificates. If show_all is true then we will print all + # the certificates examined. + if not module.params['show_all']: + check_results['ocp_certs'] = [crt for crt in ocp_certs if crt['health'] in ['expired', 'warning']] + check_results['kubeconfigs'] = [crt for crt in kubeconfigs if crt['health'] in ['expired', 'warning']] + check_results['etcd'] = [crt for crt in etcd_certs if crt['health'] in ['expired', 'warning']] + check_results['registry'] = [crt for crt in registry_certs if crt['health'] in ['expired', 'warning']] + check_results['router'] = [crt for crt in router_certs if crt['health'] in ['expired', 'warning']] + else: + check_results['ocp_certs'] = ocp_certs + check_results['kubeconfigs'] = kubeconfigs + check_results['etcd'] = etcd_certs + check_results['registry'] = registry_certs + check_results['router'] = router_certs + + # Sort the final results to report in order of ascending safety + # time. That is to say, the certificates which will expire sooner + # will be at the front of the list and certificates which will + # expire later are at the end. Router and registry certs should be + # limited to just 1 result, so don't bother sorting those. + check_results['ocp_certs'] = sorted(check_results['ocp_certs'], cmp=lambda x, y: cmp(x['days_remaining'], y['days_remaining'])) + check_results['kubeconfigs'] = sorted(check_results['kubeconfigs'], cmp=lambda x, y: cmp(x['days_remaining'], y['days_remaining'])) + check_results['etcd'] = sorted(check_results['etcd'], cmp=lambda x, y: cmp(x['days_remaining'], y['days_remaining'])) + + # This module will never change anything, but we might want to + # change the return code parameter if there is some catastrophic + # error we noticed earlier + module.exit_json( + check_results=check_results, + summary=res, + msg=msg, + rc=0, + changed=False + ) + +###################################################################### +# It's just the way we do things in Ansible. So disable this warning +# +# pylint: disable=wrong-import-position,import-error +from ansible.module_utils.basic import AnsibleModule +if __name__ == '__main__': + main() diff --git a/roles/openshift_certificate_expiry/meta/main.yml b/roles/openshift_certificate_expiry/meta/main.yml new file mode 100644 index 000000000..c13b29ba5 --- /dev/null +++ b/roles/openshift_certificate_expiry/meta/main.yml @@ -0,0 +1,16 @@ +--- +galaxy_info: + author: Tim Bielawa + description: OpenShift Certificate Expiry Checker + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 2.1 + version: 1.0 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud + - system +dependencies: [] diff --git a/roles/openshift_certificate_expiry/tasks/main.yml b/roles/openshift_certificate_expiry/tasks/main.yml new file mode 100644 index 000000000..139d5de6e --- /dev/null +++ b/roles/openshift_certificate_expiry/tasks/main.yml @@ -0,0 +1,30 @@ +--- +- name: Check cert expirys on host + openshift_cert_expiry: + warning_days: "{{ openshift_certificate_expiry_warning_days|int }}" + config_base: "{{ openshift_certificate_expiry_config_base }}" + show_all: "{{ openshift_certificate_expiry_show_all|bool }}" + register: check_results + +- name: Generate expiration report HTML + become: no + run_once: yes + template: + src: cert-expiry-table.html.j2 + dest: "{{ openshift_certificate_expiry_html_report_path }}" + delegate_to: localhost + when: "{{ openshift_certificate_expiry_generate_html_report|bool }}" + +- name: Generate the result JSON string + run_once: yes + set_fact: json_result_string="{{ hostvars|oo_cert_expiry_results_to_json(play_hosts) }}" + when: "{{ openshift_certificate_expiry_save_json_results|bool }}" + +- name: Generate results JSON file + become: no + run_once: yes + template: + src: save_json_results.j2 + dest: "{{ openshift_certificate_expiry_json_results_path }}" + delegate_to: localhost + when: "{{ openshift_certificate_expiry_save_json_results|bool }}" diff --git a/roles/openshift_certificate_expiry/templates/cert-expiry-table.html.j2 b/roles/openshift_certificate_expiry/templates/cert-expiry-table.html.j2 new file mode 100644 index 000000000..b05110336 --- /dev/null +++ b/roles/openshift_certificate_expiry/templates/cert-expiry-table.html.j2 @@ -0,0 +1,124 @@ +<!DOCTYPE html> +<html> + <head> + <meta charset="UTF-8" /> + <title>OCP Certificate Expiry Report</title> + {# For fancy icons and a pleasing font #} + <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css" /> + <link href="https://fonts.googleapis.com/css?family=Source+Sans+Pro:300,400,700" rel="stylesheet" /> + <style type="text/css"> + body { + font-family: 'Source Sans Pro', sans-serif; + margin-left: 50px; + margin-right: 50px; + margin-bottom: 20px; + padding-top: 70px; + } + table { + border-collapse: collapse; + margin-bottom: 20px; + } + table, th, td { + border: 1px solid black; + } + th, td { + padding: 5px; + } + .cert-kind { + margin-top: 5px; + margin-bottom: 5px; + } + footer { + font-size: small; + text-align: center; + } + tr.odd { + background-color: #f2f2f2; + } + </style> + </head> + <body> + <nav class="navbar navbar-default navbar-fixed-top"> + <div class="container-fluid"> + <div class="navbar-header"> + <a class="navbar-brand" href="#">OCP Certificate Expiry Report</a> + </div> + <div class="collapse navbar-collapse"> + <p class="navbar-text navbar-right"> + <a href="https://docs.openshift.com/container-platform/latest/install_config/redeploying_certificates.html" + target="_blank" + class="navbar-link"> + <i class="glyphicon glyphicon-book"></i> Redeploying Certificates + </a> + </p> + </div> + </div> + </nav> + + {# Each host has a header and table to itself #} + {% for host in play_hosts %} + <h1>{{ host }}</h1> + + <p> + {{ hostvars[host].check_results.msg }} + </p> + <ul> + <li><b>Expirations checked at:</b> {{ hostvars[host].check_results.check_results.meta.checked_at_time }}</li> + <li><b>Warn after date:</b> {{ hostvars[host].check_results.check_results.meta.warn_before_date }}</li> + </ul> + + <table border="1" width="100%"> + {# These are hard-coded right now, but should be grabbed dynamically from the registered results #} + {%- for kind in ['ocp_certs', 'etcd', 'kubeconfigs', 'router', 'registry'] -%} + <tr> + <th colspan="6" style="text-align:center"><h2 class="cert-kind">{{ kind }}</h2></th> + </tr> + + <tr> + <th> </th> + <th style="width:33%">Certificate Common/Alt Name(s)</th> + <th>Health</th> + <th>Days Remaining</th> + <th>Expiration Date</th> + <th>Path</th> + </tr> + + {# A row for each certificate examined #} + {%- for v in hostvars[host].check_results.check_results[kind] -%} + + {# Let's add some flair and show status visually with fancy icons #} + {% if v.health == 'ok' %} + {% set health_icon = 'glyphicon glyphicon-ok' %} + {% elif v.health == 'warning' %} + {% set health_icon = 'glyphicon glyphicon-alert' %} + {% else %} + {% set health_icon = 'glyphicon glyphicon-remove' %} + {% endif %} + + <tr class="{{ loop.cycle('odd', 'even') }}"> + <td style="text-align:center"><i class="{{ health_icon }}"></i></td> + <td style="width:33%">{{ v.cert_cn }}</td> + <td>{{ v.health }}</td> + <td>{{ v.days_remaining }}</td> + <td>{{ v.expiry }}</td> + <td>{{ v.path }}</td> + </tr> + {% endfor %} + {# end row generation per cert of this type #} + {% endfor %} + {# end generation for each kind of cert block #} + </table> + <hr /> + {% endfor %} + {# end section generation for each host #} + + <footer> + <p> + Expiration report generated by <a href="https://github.com/openshift/openshift-ansible" target="_blank">openshift-ansible</a> + </p> + <p> + Status icons from bootstrap/glyphicon + </p> + </footer> + </body> +</html> diff --git a/roles/openshift_certificate_expiry/templates/save_json_results.j2 b/roles/openshift_certificate_expiry/templates/save_json_results.j2 new file mode 100644 index 000000000..c1173d9ea --- /dev/null +++ b/roles/openshift_certificate_expiry/templates/save_json_results.j2 @@ -0,0 +1 @@ +{{ json_result_string | to_nice_json(indent=2)}} diff --git a/roles/openshift_cli/library/openshift_container_binary_sync.py b/roles/openshift_cli/library/openshift_container_binary_sync.py index fd290c6fc..9ff738d14 100644 --- a/roles/openshift_cli/library/openshift_container_binary_sync.py +++ b/roles/openshift_cli/library/openshift_container_binary_sync.py @@ -83,8 +83,13 @@ class BinarySyncer(object): def _sync_symlink(self, binary_name, link_to): """ Ensure the given binary name exists and links to the expected binary. """ + + # The symlink we are creating: link_path = os.path.join(self.bin_dir, binary_name) - link_dest = os.path.join(self.bin_dir, binary_name) + + # The expected file we should be linking to: + link_dest = os.path.join(self.bin_dir, link_to) + if not os.path.exists(link_path) or \ not os.path.islink(link_path) or \ os.path.realpath(link_path) != os.path.realpath(link_dest): diff --git a/roles/openshift_examples/tasks/main.yml b/roles/openshift_examples/tasks/main.yml index 82536e8af..551e21e72 100644 --- a/roles/openshift_examples/tasks/main.yml +++ b/roles/openshift_examples/tasks/main.yml @@ -106,22 +106,6 @@ failed_when: "'already exists' not in oex_import_quickstarts.stderr and oex_import_quickstarts.rc != 0" changed_when: false -- name: Import origin infrastructure-templates - command: > - {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ infrastructure_origin_base }} - when: openshift_examples_load_centos | bool - register: oex_import_infrastructure - failed_when: "'already exists' not in oex_import_infrastructure.stderr and oex_import_infrastructure.rc != 0" - changed_when: false - -- name: Import enterprise infrastructure-templates - command: > - {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ infrastructure_enterprise_base }} - when: openshift_examples_load_rhel | bool - register: oex_import_infrastructure - failed_when: "'already exists' not in oex_import_infrastructure.stderr and oex_import_infrastructure.rc != 0" - changed_when: false - - name: Remove old xPaas template files file: path: "{{ item }}" diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index cb642e12e..61ce55b7f 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -59,7 +59,7 @@ def migrate_docker_facts(facts): facts['docker']['hosted_registry_network'] = facts['node'].pop('portal_net') # log_options was originally meant to be a comma separated string, but - # we now prefer an actual list, with backward compatability: + # we now prefer an actual list, with backward compatibility: if 'log_options' in facts['docker'] and \ isinstance(facts['docker']['log_options'], basestring): facts['docker']['log_options'] = facts['docker']['log_options'].split(",") @@ -1035,12 +1035,23 @@ def get_current_config(facts): return current_config def build_kubelet_args(facts): - """ Build node kubelet_args """ - cloud_cfg_path = os.path.join(facts['common']['config_base'], - 'cloudprovider') + """Build node kubelet_args + +In the node-config.yaml file, kubeletArgument sub-keys have their +values provided as a list. Hence the gratuitous use of ['foo'] below. + """ + cloud_cfg_path = os.path.join( + facts['common']['config_base'], + 'cloudprovider') + + # We only have to do this stuff on hosts that are nodes if 'node' in facts: + # Any changes to the kubeletArguments parameter are stored + # here first. kubelet_args = {} + if 'cloudprovider' in facts: + # EVERY cloud is special <3 if 'kind' in facts['cloudprovider']: if facts['cloudprovider']['kind'] == 'aws': kubelet_args['cloud-provider'] = ['aws'] @@ -1050,6 +1061,28 @@ def build_kubelet_args(facts): kubelet_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf'] if facts['cloudprovider']['kind'] == 'gce': kubelet_args['cloud-provider'] = ['gce'] + + # Automatically add node-labels to the kubeletArguments + # parameter. See BZ1359848 for additional details. + # + # Ref: https://bugzilla.redhat.com/show_bug.cgi?id=1359848 + if 'labels' in facts['node'] and isinstance(facts['node']['labels'], dict): + # tl;dr: os_node_labels="{'foo': 'bar', 'a': 'b'}" turns + # into ['foo=bar', 'a=b'] + # + # On the openshift_node_labels inventory variable we loop + # over each key-value tuple (from .items()) and join the + # key to the value with an '=' character, this produces a + # list. + # + # map() seems to be returning an itertools.imap object + # instead of a list. We cast it to a list ourselves. + labels_str = list(map(lambda x: '='.join(x), facts['node']['labels'].items())) + if labels_str != '': + kubelet_args['node-labels'] = labels_str + + # If we've added items to the kubelet_args dict then we need + # to merge the new items back into the main facts object. if kubelet_args != {}: facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [], []) return facts diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml index 4e525a2da..c29df1873 100644 --- a/roles/openshift_hosted/tasks/registry/registry.yml +++ b/roles/openshift_hosted/tasks/registry/registry.yml @@ -30,7 +30,7 @@ - name: Create OpenShift registry command: > - {{ openshift.common.admin_binary }} registry --create + {{ openshift.common.client_binary }} adm registry --create --config={{ openshift_hosted_kubeconfig }} {% if replicas > 1 -%} --replicas={{ replicas }} diff --git a/roles/openshift_hosted/tasks/registry/secure.yml b/roles/openshift_hosted/tasks/registry/secure.yml index 664edef41..d2f6ba5f6 100644 --- a/roles/openshift_hosted/tasks/registry/secure.yml +++ b/roles/openshift_hosted/tasks/registry/secure.yml @@ -33,7 +33,7 @@ - name: Create registry certificates if they do not exist command: > - {{ openshift.common.admin_binary }} ca create-server-cert + {{ openshift.common.client_binary }} adm ca create-server-cert --signer-cert=/etc/origin/master/ca.crt --signer-key=/etc/origin/master/ca.key --signer-serial=/etc/origin/master/ca.serial.txt diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml index 0cad19c34..b944fa522 100644 --- a/roles/openshift_hosted/tasks/router/router.yml +++ b/roles/openshift_hosted/tasks/router/router.yml @@ -48,7 +48,7 @@ - name: Create OpenShift router command: > - {{ openshift.common.admin_binary }} router --create + {{ openshift.common.client_binary }} adm router --create --config={{ openshift_hosted_kubeconfig }} {% if replicas > 1 -%} --replicas={{ replicas }} @@ -73,7 +73,7 @@ {% if openshift.hosted.router.name | default(none) is not none -%} {{ openshift.hosted.router.name }} {% endif -%} - + register: openshift_hosted_router_results changed_when: "'service exists' not in openshift_hosted_router_results.stdout" failed_when: "openshift_hosted_router_results.rc != 0 and 'service exists' not in openshift_hosted_router_results.stdout and 'deployment_config' not in openshift_hosted_router_results.stderr and 'service' not in openshift_hosted_router_results.stderr" diff --git a/roles/openshift_hosted/templates/registry_config.j2 b/roles/openshift_hosted/templates/registry_config.j2 index 75d8f7fa6..557fd03af 100644 --- a/roles/openshift_hosted/templates/registry_config.j2 +++ b/roles/openshift_hosted/templates/registry_config.j2 @@ -8,55 +8,55 @@ storage: enabled: true cache: blobdescriptor: inmemory -{% if openshift.hosted.registry.storage.provider == 's3' %} +{% if openshift_hosted_registry_storage_provider | default('') == 's3' %} s3: - accesskey: {{ openshift.hosted.registry.storage.s3.accesskey }} - secretkey: {{ openshift.hosted.registry.storage.s3.secretkey }} - region: {{ openshift.hosted.registry.storage.s3.region }} -{% if 'regionendpoint' in openshift.hosted.registry.storage.s3 %} - regionendpoint: {{ openshift.hosted.registry.storage.s3.regionendpoint }} + accesskey: {{ openshift_hosted_registry_storage_s3_accesskey }} + secretkey: {{ openshift_hosted_registry_storage_s3_secretkey }} + region: {{ openshift_hosted_registry_storage_s3_region }} +{% if openshift_hosted_registry_storage_s3_regionendpoint is defined %} + regionendpoint: {{ openshift_hosted_registry_storage_s3_regionendpoint }} {% endif %} - bucket: {{ openshift.hosted.registry.storage.s3.bucket }} + bucket: {{ openshift_hosted_registry_storage_s3_bucket }} encrypt: false secure: true v4auth: true - rootdirectory: {{ openshift.hosted.registry.storage.s3.rootdirectory | default('/registry') }} - chunksize: "{{ openshift.hosted.registry.storage.s3.chunksize | default(26214400) }}" -{% elif openshift.hosted.registry.storage.provider == 'azure_blob' %} + rootdirectory: {{ openshift_hosted_registry_storage_s3_rootdirectory | default('/registry') }} + chunksize: "{{ openshift_hosted_registry_storage_s3_chunksize | default(26214400) }}" +{% elif openshift_hosted_registry_storage_provider | default('') == 'azure_blob' %} azure: - accountname: {{ openshift.hosted.registry.storage.azure_blob.accountname }} - accountkey: {{ openshift.hosted.registry.storage.azure_blob.accountkey }} - container: {{ openshift.hosted.registry.storage.azure_blob.container }} - realm: {{ openshift.hosted.registry.storage.azure_blob.realm }} -{% elif openshift.hosted.registry.storage.provider == 'swift' %} + accountname: {{ openshift_hosted_registry_storage_azure_blob_accountname }} + accountkey: {{ openshift_hosted_registry_storage_azure_blob_accountkey }} + container: {{ openshift_hosted_registry_storage_azure_blob_container }} + realm: {{ openshift_hosted_registry_storage_azure_blob_realm }} +{% elif openshift_hosted_registry_storage_provider | default('') == 'swift' %} swift: - authurl: {{ openshift.hosted.registry.storage.swift.authurl }} - username: {{ openshift.hosted.registry.storage.swift.username }} - password: {{ openshift.hosted.registry.storage.swift.password }} - container: {{ openshift.hosted.registry.storage.swift.container }} -{% if 'region' in openshift.hosted.registry.storage.swift %} - region: {{ openshift.hosted.registry.storage.swift.region }} + authurl: {{ openshift_hosted_registry_storage_swift_authurl }} + username: {{ openshift_hosted_registry_storage_swift_username }} + password: {{ openshift_hosted_registry_storage_swift_password }} + container: {{ openshift_hosted_registry_storage_swift_container }} +{% if openshift_hosted_registry_storage_swift_region is defined %} + region: {{ openshift_hosted_registry_storage_swift_region }} {% endif -%} -{% if 'tenant' in openshift.hosted.registry.storage.swift %} - tenant: {{ openshift.hosted.registry.storage.swift.tenant }} +{% if openshift_hosted_registry_storage_swift_tenant is defined %} + tenant: {{ openshift_hosted_registry_storage_swift_tenant }} {% endif -%} -{% if 'tenantid' in openshift.hosted.registry.storage.swift %} - tenantid: {{ openshift.hosted.registry.storage.swift.tenantid }} +{% if openshift_hosted_registry_storage_swift_tenantid is defined %} + tenantid: {{ openshift_hosted_registry_storage_swift_tenantid }} {% endif -%} -{% if 'domain' in openshift.hosted.registry.storage.swift %} - domain: {{ openshift.hosted.registry.storage.swift.domain }} +{% if openshift_hosted_registry_storage_swift_domain is defined %} + domain: {{ openshift_hosted_registry_storage_swift_domain }} {% endif -%} -{% if 'domainid' in openshift.hosted.registry.storage.swift %} - domainid: {{ openshift.hosted.registry.storage.swift.domainid }} +{% if openshift_hosted_registry_storage_swift_domainid %} + domainid: {{ openshift_hosted_registry_storage_swift_domainid }} {% endif -%} -{% elif openshift.hosted.registry.storage.provider == 'gcs' %} +{% elif openshift_hosted_registry_storage_provider | default('') == 'gcs' %} gcs: - bucket: {{ openshift.hosted.registry.storage.gcs.bucket }} -{% if 'keyfile' in openshift.hosted.registry.storage.gcs %} - keyfile: {{ openshift.hosted.registry.storage.gcs.keyfile }} + bucket: {{ openshift_hosted_registry_storage_gcs_bucket }} +{% if openshift_hosted_registry_storage_gcs_keyfile is defined %} + keyfile: {{ openshift_hosted_registry_storage_gcs_keyfile }} {% endif -%} -{% if 'rootdirectory' in openshift.hosted.registry.storage.gcs %} - rootdirectory: {{ openshift.hosted.registry.storage.gcs.rootdirectory }} +{% if openshift_hosted_registry_storage_gcs_rootdirectory is defined %} + rootdirectory: {{ openshift_hosted_registry_storage_gcs_rootdirectory }} {% endif -%} {% endif -%} auth: @@ -70,16 +70,16 @@ middleware: repository: - name: openshift options: - pullthrough: {{ openshift.hosted.registry.pullthrough | default(true) }} - acceptschema2: {{ openshift.hosted.registry.acceptschema2 | default(false) }} - enforcequota: {{ openshift.hosted.registry.enforcequota | default(false) }} -{% if openshift.hosted.registry.storage.provider == 's3' and 'cloudfront' in openshift.hosted.registry.storage.s3 %} + pullthrough: {{ openshift_hosted_registry_pullthrough | default(true) }} + acceptschema2: {{ openshift_hosted_registry_acceptschema2 | default(false) }} + enforcequota: {{ openshift_hosted_registry_enforcequota | default(false) }} +{% if openshift_hosted_registry_storage_provider | default('') == 's3' and openshift_hosted_registry_storage_s3_cloudfront_baseurl is defined %} storage: - name: cloudfront options: - baseurl: {{ openshift.hosted.registry.storage.s3.cloudfront.baseurl }} - privatekey: {{ openshift.hosted.registry.storage.s3.cloudfront.privatekeyfile }} - keypairid: {{ openshift.hosted.registry.storage.s3.cloudfront.keypairid }} + baseurl: {{ openshift_hosted_registry_storage_s3_cloudfront_baseurl }} + privatekey: {{ openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile }} + keypairid: {{ openshift_hosted_registry_storage_s3_cloudfront_keypairid }} {% elif openshift.common.version_gte_3_3_or_1_3 | bool %} storage: - name: openshift diff --git a/roles/openshift_hosted_logging/defaults/main.yml b/roles/openshift_hosted_logging/defaults/main.yml index e357899e5..a01f24df8 100644 --- a/roles/openshift_hosted_logging/defaults/main.yml +++ b/roles/openshift_hosted_logging/defaults/main.yml @@ -1,2 +1,2 @@ --- -examples_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/examples" +hosted_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/hosted" diff --git a/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml b/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml index 8331f0389..8754616d9 100644 --- a/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml +++ b/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml @@ -46,8 +46,8 @@ - name: "Remove deployer template" command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete template logging-deployer-template -n openshift" - register: delete_ouput - failed_when: delete_ouput.rc == 1 and 'exists' not in delete_ouput.stderr + register: delete_output + failed_when: delete_output.rc == 1 and 'exists' not in delete_output.stderr - name: Delete temp directory diff --git a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml index 65af1c08e..0162d1fb0 100644 --- a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml +++ b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml @@ -17,7 +17,7 @@ cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig changed_when: False - - name: Check for logging project already exists + - name: "Check for logging project already exists" command: > {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging -o jsonpath='{.metadata.name}' register: logging_project_result @@ -25,7 +25,7 @@ - name: "Create logging project" command: > - {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project logging + {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig new-project logging when: logging_project_result.stdout == "" - name: "Changing projects" @@ -40,9 +40,13 @@ - name: "Create templates for logging accounts and the deployer" command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ examples_base }}/infrastructure-templates/{{ 'enterprise' if openshift_deployment_type == 'openshift-enterprise' else 'origin' }}/logging-deployer.yaml - register: template_output - failed_when: "template_output.rc == 1 and 'exists' not in template_output.stderr" + {{ openshift.common.client_binary }} create + -f {{ hosted_base }}/logging-deployer.yaml + --config={{ mktemp.stdout }}/admin.kubeconfig + -n logging + register: logging_import_template + failed_when: "'already exists' not in logging_import_template.stderr and logging_import_template.rc != 0" + changed_when: "'created' in logging_import_template.stdout" - name: "Process the logging accounts template" shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig process logging-deployer-account-template | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f -" @@ -51,19 +55,19 @@ - name: "Set permissions for logging-deployer service account" command: > - {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer + {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer register: permiss_output failed_when: "permiss_output.rc == 1 and 'exists' not in permiss_output.stderr" - name: "Set permissions for fluentd" command: > - {{ openshift.common.admin_binary}} policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd + {{ openshift.common.client_binary }} adm policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd register: fluentd_output failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr" - name: "Set additional permissions for fluentd" command: > - {{ openshift.common.admin_binary}} policy add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd + {{ openshift.common.client_binary }} adm policy add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd register: fluentd2_output failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr" diff --git a/roles/openshift_hosted_templates/defaults/main.yml b/roles/openshift_hosted_templates/defaults/main.yml new file mode 100644 index 000000000..f4fd15089 --- /dev/null +++ b/roles/openshift_hosted_templates/defaults/main.yml @@ -0,0 +1,10 @@ +--- +hosted_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/hosted" +hosted_deployment_type: "{{ 'origin' if openshift_deployment_type == 'origin' else 'enterprise' }}" + +content_version: "{{ openshift.common.examples_content_version }}" + +registry_url: "" +registry_host: "{{ registry_url.split('/')[0] if '.' in registry_url.split('/')[0] else '' }}" + +openshift_hosted_templates_import_command: 'create' diff --git a/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.0/enterprise/logging-deployer.yaml index b3b60bf9b..b3b60bf9b 100644 --- a/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/enterprise/logging-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.0/enterprise/logging-deployer.yaml diff --git a/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.0/enterprise/metrics-deployer.yaml index ddd9f2f75..ddd9f2f75 100644 --- a/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/enterprise/metrics-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.0/enterprise/metrics-deployer.yaml diff --git a/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/origin/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.0/origin/logging-deployer.yaml index 4c798e148..4c798e148 100644 --- a/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/origin/logging-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.0/origin/logging-deployer.yaml diff --git a/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/origin/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.0/origin/metrics-deployer.yaml index 3e9bcde5b..3e9bcde5b 100644 --- a/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/origin/metrics-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.0/origin/metrics-deployer.yaml diff --git a/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.1/enterprise/logging-deployer.yaml index 9c8f1071a..9c8f1071a 100644 --- a/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/enterprise/logging-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.1/enterprise/logging-deployer.yaml diff --git a/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.1/enterprise/metrics-deployer.yaml index 99f2df4fa..99f2df4fa 100644 --- a/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/enterprise/metrics-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.1/enterprise/metrics-deployer.yaml diff --git a/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/origin/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.1/origin/logging-deployer.yaml index 9257b1f28..9257b1f28 100644 --- a/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/origin/logging-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.1/origin/logging-deployer.yaml diff --git a/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/origin/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.1/origin/metrics-deployer.yaml index 30d79acee..30d79acee 100644 --- a/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/origin/metrics-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.1/origin/metrics-deployer.yaml diff --git a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.2/enterprise/logging-deployer.yaml index b6975eead..b6975eead 100644 --- a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/logging-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.2/enterprise/logging-deployer.yaml diff --git a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.2/enterprise/metrics-deployer.yaml index 032f94a18..032f94a18 100644 --- a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/metrics-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.2/enterprise/metrics-deployer.yaml diff --git a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.2/origin/logging-deployer.yaml index 8b28f872f..8b28f872f 100644 --- a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/logging-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.2/origin/logging-deployer.yaml diff --git a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.2/origin/metrics-deployer.yaml index ab62ae76f..ab62ae76f 100644 --- a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/metrics-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.2/origin/metrics-deployer.yaml diff --git a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml index a8d4b1cbb..13cef2d66 100644 --- a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/logging-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml @@ -200,13 +200,13 @@ items: name: MODE value: "install" - - description: 'Specify prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set prefix "registry.access.redhat.com/openshift3/"' + description: 'Specify prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.1", set prefix "registry.access.redhat.com/openshift3/"' name: IMAGE_PREFIX value: "registry.access.redhat.com/openshift3/" - - description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set version "3.3.0"' + description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.1", set version "3.3.1"' name: IMAGE_VERSION - value: "3.3.0" + value: "3.3.1" - description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry." name: IMAGE_PULL_SECRET diff --git a/roles/openshift_examples/files/examples/v1.4/infrastructure-templates/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.3/enterprise/metrics-deployer.yaml index afd47ec7c..5e21e3a7a 100644 --- a/roles/openshift_examples/files/examples/v1.4/infrastructure-templates/enterprise/metrics-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.3/enterprise/metrics-deployer.yaml @@ -101,7 +101,7 @@ parameters: - description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"' name: IMAGE_VERSION - value: "3.3.0" + value: "3.3.1" - description: "Internal URL for the master, for authentication retrieval" name: MASTER_URL diff --git a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml index 11478263c..11478263c 100644 --- a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml diff --git a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.3/origin/logging-deployer.yaml index 8b28f872f..8b28f872f 100644 --- a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/logging-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.3/origin/logging-deployer.yaml diff --git a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.3/origin/metrics-deployer.yaml index 5f2290419..5f2290419 100644 --- a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/metrics-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.3/origin/metrics-deployer.yaml diff --git a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml index 80cc4233b..80cc4233b 100644 --- a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml diff --git a/roles/openshift_examples/files/examples/v1.4/infrastructure-templates/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml index a8d4b1cbb..9cff9daca 100644 --- a/roles/openshift_examples/files/examples/v1.4/infrastructure-templates/enterprise/logging-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml @@ -200,13 +200,13 @@ items: name: MODE value: "install" - - description: 'Specify prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set prefix "registry.access.redhat.com/openshift3/"' + description: 'Specify prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.4.0", set prefix "registry.access.redhat.com/openshift3/"' name: IMAGE_PREFIX value: "registry.access.redhat.com/openshift3/" - - description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set version "3.3.0"' + description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.4.0", set version "3.4.0"' name: IMAGE_VERSION - value: "3.3.0" + value: "3.4.0" - description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry." name: IMAGE_PULL_SECRET diff --git a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml index afd47ec7c..1b46d6ac7 100644 --- a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/metrics-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml @@ -101,7 +101,7 @@ parameters: - description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"' name: IMAGE_VERSION - value: "3.3.0" + value: "3.4.0" - description: "Internal URL for the master, for authentication retrieval" name: MASTER_URL diff --git a/roles/openshift_examples/files/examples/v1.4/infrastructure-templates/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml index 11478263c..11478263c 100644 --- a/roles/openshift_examples/files/examples/v1.4/infrastructure-templates/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml diff --git a/roles/openshift_examples/files/examples/v1.4/infrastructure-templates/origin/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.4/origin/logging-deployer.yaml index 8b28f872f..8b28f872f 100644 --- a/roles/openshift_examples/files/examples/v1.4/infrastructure-templates/origin/logging-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.4/origin/logging-deployer.yaml diff --git a/roles/openshift_examples/files/examples/v1.4/infrastructure-templates/origin/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.4/origin/metrics-deployer.yaml index 5f2290419..5f2290419 100644 --- a/roles/openshift_examples/files/examples/v1.4/infrastructure-templates/origin/metrics-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.4/origin/metrics-deployer.yaml diff --git a/roles/openshift_examples/files/examples/v1.4/infrastructure-templates/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml index 80cc4233b..80cc4233b 100644 --- a/roles/openshift_examples/files/examples/v1.4/infrastructure-templates/origin/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml diff --git a/roles/openshift_hosted_templates/meta/main.yml b/roles/openshift_hosted_templates/meta/main.yml new file mode 100644 index 000000000..9c12865bf --- /dev/null +++ b/roles/openshift_hosted_templates/meta/main.yml @@ -0,0 +1,15 @@ +--- +galaxy_info: + author: Andrew Butcher + description: OpenShift Hosted Templates + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 2.1 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud +dependencies: +- role: openshift_common diff --git a/roles/openshift_hosted_templates/tasks/main.yml b/roles/openshift_hosted_templates/tasks/main.yml new file mode 100644 index 000000000..7d176bce3 --- /dev/null +++ b/roles/openshift_hosted_templates/tasks/main.yml @@ -0,0 +1,65 @@ +--- +- name: Create local temp dir for OpenShift hosted templates copy + local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX + become: False + register: copy_hosted_templates_mktemp + run_once: True + +- name: Create tar of OpenShift examples + local_action: command tar -C "{{ role_path }}/files/{{ content_version }}/{{ hosted_deployment_type }}" -cvf "{{ copy_hosted_templates_mktemp.stdout }}/openshift-hosted-templates.tar" . + args: + # Disables the following warning: + # Consider using unarchive module rather than running tar + warn: no + become: False + register: copy_hosted_templates_tar + +- name: Create remote OpenShift hosted templates directory + file: + dest: "{{ hosted_base }}" + state: directory + mode: 0755 + +- name: Unarchive the OpenShift hosted templates on the remote + unarchive: + src: "{{ copy_hosted_templates_mktemp.stdout }}/openshift-hosted-templates.tar" + dest: "{{ hosted_base }}/" + +- name: Cleanup the OpenShift hosted templates temp dir + become: False + local_action: file dest="{{ copy_hosted_templates_mktemp.stdout }}" state=absent + +- name: Modify registry paths if registry_url is not registry.access.redhat.com + shell: > + find {{ hosted_base }} -type f | xargs -n 1 sed -i 's|registry.access.redhat.com|{{ registry_host | quote }}|g' + when: registry_host != '' and openshift_hosted_modify_imagestreams | default(openshift_examples_modify_imagestreams | default(False)) | bool + +- name: Create temp directory for kubeconfig + command: mktemp -d /tmp/openshift-ansible-XXXXXX + register: mktemp + changed_when: False + +- name: Record kubeconfig tmp dir + set_fact: + openshift_hosted_templates_kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig" + +- name: Copy the admin client config(s) + command: > + cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{ openshift_hosted_templates_kubeconfig }} + changed_when: False + +- name: Create or update hosted templates + command: > + {{ openshift.common.client_binary }} {{ openshift_hosted_templates_import_command }} + -f {{ hosted_base }} + --config={{ openshift_hosted_templates_kubeconfig }} + -n openshift + register: oht_import_templates + failed_when: "'already exists' not in oht_import_templates.stderr and oht_import_templates.rc != 0" + changed_when: "'created' in oht_import_templates.stdout" + +- name: Delete temp directory + file: + name: "{{ mktemp.stdout }}" + state: absent + changed_when: False diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml index d1cc5b274..28e4e46e9 100644 --- a/roles/openshift_manage_node/tasks/main.yml +++ b/roles/openshift_manage_node/tasks/main.yml @@ -26,7 +26,7 @@ - name: Set node schedulability command: > - {{ openshift.common.admin_binary }} manage-node {{ hostvars[item].openshift.node.nodename }} --schedulable={{ 'true' if hostvars[item].openshift.node.schedulable | bool else 'false' }} + {{ openshift.common.client_binary }} adm manage-node {{ hostvars[item].openshift.node.nodename }} --schedulable={{ 'true' if hostvars[item].openshift.node.schedulable | bool else 'false' }} --config={{ openshift_manage_node_kubeconfig }} -n default with_items: "{{ openshift_nodes }}" diff --git a/roles/openshift_manageiq/tasks/main.yaml b/roles/openshift_manageiq/tasks/main.yaml index 5d7a3c038..bdaf64b3f 100644 --- a/roles/openshift_manageiq/tasks/main.yaml +++ b/roles/openshift_manageiq/tasks/main.yaml @@ -8,9 +8,9 @@ cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{manage_iq_tmp_conf}} changed_when: false -- name: Add Managment Infrastructure project +- name: Add Management Infrastructure project command: > - {{ openshift.common.admin_binary }} new-project + {{ openshift.common.client_binary }} adm new-project management-infra --description="Management Infrastructure" --config={{manage_iq_tmp_conf}} @@ -52,7 +52,7 @@ - name: Configure role/user permissions command: > - {{ openshift.common.admin_binary }} {{item}} + {{ openshift.common.client_binary }} adm {{item}} --config={{manage_iq_tmp_conf}} with_items: "{{manage_iq_tasks}}" register: osmiq_perm_task @@ -61,7 +61,7 @@ - name: Configure 3_2 role/user permissions command: > - {{ openshift.common.admin_binary }} {{item}} + {{ openshift.common.client_binary }} adm {{item}} --config={{manage_iq_tmp_conf}} with_items: "{{manage_iq_openshift_3_2_tasks}}" register: osmiq_perm_3_2_task diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 1a59717c7..1d6758c4a 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -57,7 +57,7 @@ - name: Create the policy file if it does not already exist command: > - {{ openshift.common.admin_binary }} create-bootstrap-policy-file + {{ openshift.common.client_binary }} adm create-bootstrap-policy-file --filename={{ openshift_master_policy }} args: creates: "{{ openshift_master_policy }}" diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml index ffde59358..e9b7de330 100644 --- a/roles/openshift_master_certificates/tasks/main.yml +++ b/roles/openshift_master_certificates/tasks/main.yml @@ -52,7 +52,7 @@ - name: Create the master certificates if they do not already exist command: > - {{ openshift.common.admin_binary }} create-master-certs + {{ openshift.common.client_binary }} adm create-master-certs {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %} --certificate-authority {{ named_ca_certificate }} {% endfor %} diff --git a/roles/openshift_metrics/defaults/main.yml b/roles/openshift_metrics/defaults/main.yml new file mode 100644 index 000000000..a01f24df8 --- /dev/null +++ b/roles/openshift_metrics/defaults/main.yml @@ -0,0 +1,2 @@ +--- +hosted_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/hosted" diff --git a/roles/openshift_metrics/tasks/install.yml b/roles/openshift_metrics/tasks/install.yml index 9c4eb22d7..4976c7153 100644 --- a/roles/openshift_metrics/tasks/install.yml +++ b/roles/openshift_metrics/tasks/install.yml @@ -30,7 +30,7 @@ - name: Add edit permission to the openshift-infra project to metrics-deployer SA command: > - {{ openshift.common.admin_binary }} + {{ openshift.common.client_binary }} adm --config={{ openshift_metrics_kubeconfig }} --namespace openshift-infra policy add-role-to-user edit @@ -48,7 +48,7 @@ - name: Add cluster-reader permission to the openshift-infra project to heapster SA command: > - {{ openshift.common.admin_binary }} + {{ openshift.common.client_binary }} adm --config={{ openshift_metrics_kubeconfig }} --namespace openshift-infra policy add-cluster-role-to-user cluster-reader @@ -70,7 +70,7 @@ - name: Build metrics deployer command set_fact: deployer_cmd: "{{ openshift.common.client_binary }} process -f \ - {{ metrics_template_dir }}/metrics-deployer.yaml -v \ + {{ hosted_base }}/metrics-deployer.yaml -v \ HAWKULAR_METRICS_HOSTNAME={{ metrics_hostname }},USE_PERSISTENT_STORAGE={{metrics_persistence | string | lower }},DYNAMICALLY_PROVISION_STORAGE={{metrics_dynamic_vol | string | lower }},METRIC_DURATION={{ openshift.hosted.metrics.duration }},METRIC_RESOLUTION={{ openshift.hosted.metrics.resolution }}{{ image_prefix }}{{ image_version }},MODE={{ deployment_mode }} \ | {{ openshift.common.client_binary }} --namespace openshift-infra \ --config={{ openshift_metrics_kubeconfig }} \ diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 64c90db50..8b3145785 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -9,6 +9,10 @@ role: "{{ item.role }}" local_facts: "{{ item.local_facts }}" with_items: + # Reset node labels to an empty dictionary. + - role: node + local_facts: + labels: {} - role: node local_facts: annotations: "{{ openshift_node_annotations | default(none) }}" @@ -76,7 +80,7 @@ # TODO: add the validate parameter when there is a validation command to run - name: Create the Node config template: - dest: "{{ openshift_node_config_file }}" + dest: "{{ openshift.common.config_base }}/node/node-config.yaml" src: node.yaml.v1.j2 backup: true owner: root diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml index 40d1dd50b..27c9b48f0 100644 --- a/roles/openshift_node/tasks/systemd_units.yml +++ b/roles/openshift_node/tasks/systemd_units.yml @@ -42,7 +42,7 @@ - regex: '^OPTIONS=' line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}" - regex: '^CONFIG_FILE=' - line: "CONFIG_FILE={{ openshift_node_config_file }}" + line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml" - regex: '^IMAGE_VERSION=' line: "IMAGE_VERSION={{ openshift_image_tag }}" notify: diff --git a/roles/openshift_node/vars/main.yml b/roles/openshift_node/vars/main.yml deleted file mode 100644 index 77a9694de..000000000 --- a/roles/openshift_node/vars/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -openshift_node_config_dir: "{{ openshift.common.config_base }}/node" -openshift_node_config_file: "{{ openshift_node_config_dir }}/node-config.yaml" diff --git a/roles/openshift_node_certificates/tasks/main.yml b/roles/openshift_node_certificates/tasks/main.yml index 80ab4bb1d..69bcd3668 100644 --- a/roles/openshift_node_certificates/tasks/main.yml +++ b/roles/openshift_node_certificates/tasks/main.yml @@ -44,7 +44,7 @@ - name: Generate the node client config command: > - {{ openshift.common.admin_binary }} create-api-client-config + {{ openshift.common.client_binary }} adm create-api-client-config {% for named_ca_certificate in hostvars[openshift_ca_host].openshift.master.named_certificates | default([]) | oo_collect('cafile') %} --certificate-authority {{ named_ca_certificate }} {% endfor %} @@ -63,7 +63,7 @@ - name: Generate the node server certificate command: > - {{ openshift.common.admin_binary }} ca create-server-cert + {{ openshift.common.client_binary }} adm ca create-server-cert --cert={{ openshift_node_generated_config_dir }}/server.crt --key={{ openshift_generated_configs_dir }}/node-{{ openshift.common.hostname }}/server.key --overwrite=true diff --git a/roles/openshift_node_dnsmasq/tasks/main.yml b/roles/openshift_node_dnsmasq/tasks/main.yml index bd9a0ffb6..396c27295 100644 --- a/roles/openshift_node_dnsmasq/tasks/main.yml +++ b/roles/openshift_node_dnsmasq/tasks/main.yml @@ -29,6 +29,12 @@ when: openshift_node_dnsmasq_additional_config_file is defined notify: restart dnsmasq +- name: Enable dnsmasq + service: + name: dnsmasq + enabled: yes + state: started + # Dynamic NetworkManager based dispatcher - include: ./network-manager.yml when: network_manager_active | bool diff --git a/roles/openshift_projects/tasks/main.yml b/roles/openshift_projects/tasks/main.yml index 62a357cf7..30d58afd3 100644 --- a/roles/openshift_projects/tasks/main.yml +++ b/roles/openshift_projects/tasks/main.yml @@ -20,7 +20,7 @@ - name: Create projects command: > - {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{ item.item.key }} {% if item.item.value.default_node_selector | default(none) != none %} {{ '--node-selector=' ~ item.item.value.default_node_selector }} diff --git a/roles/openshift_serviceaccounts/tasks/main.yml b/roles/openshift_serviceaccounts/tasks/main.yml index e90384d37..1ff9e6dcb 100644 --- a/roles/openshift_serviceaccounts/tasks/main.yml +++ b/roles/openshift_serviceaccounts/tasks/main.yml @@ -26,7 +26,7 @@ - name: Grant the user access to the appropriate scc command: > - {{ openshift.common.admin_binary }} policy add-scc-to-user + {{ openshift.common.client_binary }} adm policy add-scc-to-user {{ item.1.item }} system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item.0 }} when: "openshift.common.version_gte_3_1_or_1_1 and item.1.rc == 0 and 'system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item.0 }}' not in {{ (item.1.stdout | from_yaml).users | default([]) }}" with_nested: diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/os_firewall/library/os_firewall_manage_iptables.py index 190016c14..bd638b69b 100755 --- a/roles/os_firewall/library/os_firewall_manage_iptables.py +++ b/roles/os_firewall/library/os_firewall_manage_iptables.py @@ -50,8 +50,8 @@ class IpTablesCreateJumpRuleError(IpTablesError): self.chain = chain -# TODO: impliment rollbacks for any events that where successful and an -# exception was thrown later. for example, when the chain is created +# TODO: implement rollbacks for any events that were successful and an +# exception was thrown later. For example, when the chain is created # successfully, but the add/remove rule fails. class IpTablesManager(object): # pylint: disable=too-many-instance-attributes def __init__(self, module): diff --git a/utils/Makefile b/utils/Makefile index 59aff92fd..62f08f74b 100644 --- a/utils/Makefile +++ b/utils/Makefile @@ -31,6 +31,8 @@ ASCII2MAN = a2x -D $(dir $@) -d manpage -f manpage $< MANPAGES := docs/man/man1/atomic-openshift-installer.1 VERSION := 1.3 +PEPEXCLUDES := E501,E121,E124 + sdist: clean python setup.py sdist rm -fR $(SHORTNAME).egg-info @@ -80,7 +82,7 @@ ci-pylint: @echo "#############################################" @echo "# Running PyLint Tests in virtualenv" @echo "#############################################" - . $(NAME)env/bin/activate && python -m pylint --rcfile ../git/.pylintrc src/ooinstall/cli_installer.py src/ooinstall/oo_config.py src/ooinstall/openshift_ansible.py src/ooinstall/variants.py ../callback_plugins/openshift_quick_installer.py + . $(NAME)env/bin/activate && python -m pylint --rcfile ../git/.pylintrc src/ooinstall/cli_installer.py src/ooinstall/oo_config.py src/ooinstall/openshift_ansible.py src/ooinstall/variants.py ../callback_plugins/openshift_quick_installer.py ../roles/openshift_certificate_expiry/library/openshift_cert_expiry.py ci-list-deps: @echo "#############################################" @@ -94,13 +96,17 @@ ci-pyflakes: @echo "#################################################" . $(NAME)env/bin/activate && pyflakes src/ooinstall/*.py . $(NAME)env/bin/activate && pyflakes ../callback_plugins/openshift_quick_installer.py + . $(NAME)env/bin/activate && pyflakes ../roles/openshift_certificate_expiry/library/openshift_cert_expiry.py ci-pep8: @echo "#############################################" @echo "# Running PEP8 Compliance Tests in virtualenv" @echo "#############################################" - . $(NAME)env/bin/activate && pep8 --ignore=E501,E121,E124 src/$(SHORTNAME)/ - . $(NAME)env/bin/activate && pep8 --ignore=E501,E121,E124 ../callback_plugins/openshift_quick_installer.py + . $(NAME)env/bin/activate && pep8 --ignore=$(PEPEXCLUDES) src/$(SHORTNAME)/ + . $(NAME)env/bin/activate && pep8 --ignore=$(PEPEXCLUDES) ../callback_plugins/openshift_quick_installer.py +# This one excludes E402 because it is an ansible module and the +# boilerplate import statement is expected to be at the bottom + . $(NAME)env/bin/activate && pep8 --ignore=$(PEPEXCLUDES),E402 ../roles/openshift_certificate_expiry/library/openshift_cert_expiry.py ci: clean virtualenv ci-list-deps ci-pep8 ci-pylint ci-pyflakes ci-unittests : diff --git a/utils/docs/man/man1/atomic-openshift-installer.1 b/utils/docs/man/man1/atomic-openshift-installer.1 index 4da82191b..072833ce8 100644 --- a/utils/docs/man/man1/atomic-openshift-installer.1 +++ b/utils/docs/man/man1/atomic-openshift-installer.1 @@ -2,12 +2,12 @@ .\" Title: atomic-openshift-installer .\" Author: [see the "AUTHOR" section] .\" Generator: DocBook XSL Stylesheets v1.78.1 <http://docbook.sf.net/> -.\" Date: 09/28/2016 +.\" Date: 10/20/2016 .\" Manual: atomic-openshift-installer .\" Source: atomic-openshift-utils 1.3 .\" Language: English .\" -.TH "ATOMIC\-OPENSHIFT\-I" "1" "09/28/2016" "atomic\-openshift\-utils 1\&.3" "atomic\-openshift\-installer" +.TH "ATOMIC\-OPENSHIFT\-I" "1" "10/20/2016" "atomic\-openshift\-utils 1\&.3" "atomic\-openshift\-installer" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- @@ -121,6 +121,17 @@ Show the usage help and exit\&. \fBupgrade\fR .RE .sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +.sp -1 +.IP \(bu 2.3 +.\} +\fBscaleup\fR +.RE +.sp The options specific to each command are described in the following sections\&. .SH "INSTALL" .sp @@ -158,6 +169,9 @@ Upgrade to the latest major version\&. For example, if you are running version then this could upgrade you to \fB3\&.3\fR\&. .RE +.SH "SCALEUP" +.sp +The \fBscaleup\fR command is used to add new nodes to an existing cluster\&. This command has no additional options\&. .SH "FILES" .sp \fB~/\&.config/openshift/installer\&.cfg\&.yml\fR \(em Installer configuration file\&. Can be used to generate an inventory later or start an unattended installation\&. diff --git a/utils/docs/man/man1/atomic-openshift-installer.1.asciidoc.in b/utils/docs/man/man1/atomic-openshift-installer.1.asciidoc.in index 64e5d14a3..9b02c4d14 100644 --- a/utils/docs/man/man1/atomic-openshift-installer.1.asciidoc.in +++ b/utils/docs/man/man1/atomic-openshift-installer.1.asciidoc.in @@ -73,6 +73,7 @@ COMMANDS * **install** * **uninstall** * **upgrade** +* **scaleup** The options specific to each command are described in the following sections. @@ -122,6 +123,11 @@ Upgrade to the latest major version. For example, if you are running version **3.2** then this could upgrade you to **3.3**. +SCALEUP +------- + +The **scaleup** command is used to add new nodes to an existing cluster. +This command has no additional options. FILES ----- diff --git a/utils/site_assets/oo-install-bootstrap.sh b/utils/site_assets/oo-install-bootstrap.sh index 3847c029a..3c5614d39 100755 --- a/utils/site_assets/oo-install-bootstrap.sh +++ b/utils/site_assets/oo-install-bootstrap.sh @@ -67,7 +67,7 @@ pip install --no-index -f file:///$(readlink -f deps) ansible 2>&1 >> $OO_INSTAL # TODO: these deps should technically be handled as part of installing ooinstall pip install --no-index -f file:///$(readlink -f deps) click 2>&1 >> $OO_INSTALL_LOG pip install --no-index ./src/ 2>&1 >> $OO_INSTALL_LOG -echo "Installation preperation done!" 2>&1 >> $OO_INSTALL_LOG +echo "Installation preparation done!" 2>&1 >> $OO_INSTALL_LOG echo "Using `ansible --version`" 2>&1 >> $OO_INSTALL_LOG diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index 85f18d5d3..7c7770207 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -1,28 +1,24 @@ -# TODO: Temporarily disabled due to importing old code into openshift-ansible -# repo. We will work on these over time. -# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,no-value-for-parameter,too-many-lines +# pylint: disable=missing-docstring,no-self-use,no-value-for-parameter,too-many-lines +import logging import os -import re import sys -import logging + import click from pkg_resources import parse_version -from ooinstall import openshift_ansible -from ooinstall.oo_config import OOConfig -from ooinstall.oo_config import OOConfigInvalidHostError -from ooinstall.oo_config import Host, Role +from ooinstall import openshift_ansible, utils +from ooinstall.oo_config import Host, OOConfig, OOConfigInvalidHostError, Role from ooinstall.variants import find_variant, get_variant_version_combos -installer_log = logging.getLogger('installer') -installer_log.setLevel(logging.CRITICAL) -installer_file_handler = logging.FileHandler('/tmp/installer.txt') -installer_file_handler.setFormatter( +INSTALLER_LOG = logging.getLogger('installer') +INSTALLER_LOG.setLevel(logging.CRITICAL) +INSTALLER_FILE_HANDLER = logging.FileHandler('/tmp/installer.txt') +INSTALLER_FILE_HANDLER.setFormatter( logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')) # Example output: # 2016-08-23 07:34:58,480 - installer - DEBUG - Going to 'load_system_facts' -installer_file_handler.setLevel(logging.DEBUG) -installer_log.addHandler(installer_file_handler) +INSTALLER_FILE_HANDLER.setLevel(logging.DEBUG) +INSTALLER_LOG.addHandler(INSTALLER_FILE_HANDLER) DEFAULT_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible.cfg' QUIET_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible-quiet.cfg' @@ -58,17 +54,8 @@ def validate_ansible_dir(path): # raise click.BadParameter("Path \"{}\" doesn't exist".format(path)) -def is_valid_hostname(hostname): - if not hostname or len(hostname) > 255: - return False - if hostname[-1] == ".": - hostname = hostname[:-1] # strip exactly one dot from the right, if present - allowed = re.compile(r"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE) - return all(allowed.match(x) for x in hostname.split(".")) - - def validate_prompt_hostname(hostname): - if hostname == '' or is_valid_hostname(hostname): + if hostname == '' or utils.is_valid_hostname(hostname): return hostname raise click.BadParameter('Invalid hostname. Please double-check this value and re-enter it.') @@ -84,7 +71,7 @@ passwordless sudo access. return click.prompt('User for ssh access', default='root') -def get_master_routingconfig_subdomain(): +def get_routingconfig_subdomain(): click.clear() message = """ You might want to override the default subdomain used for exposed routes. If you don't know what this is, use the default value. @@ -183,8 +170,9 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen if masters_set or num_masters != 2: more_hosts = click.confirm('Do you want to add additional hosts?') - if num_masters >= 3: - collect_master_lb(hosts) + master_lb = collect_master_lb(hosts) + if master_lb: + hosts.append(master_lb) roles.add('master_lb') if not existing_env: @@ -193,7 +181,8 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen return hosts, roles -def print_installation_summary(hosts, version=None): +# pylint: disable=too-many-branches +def print_installation_summary(hosts, version=None, verbose=True): """ Displays a summary of all hosts configured thus far, and what role each will play. @@ -214,35 +203,36 @@ def print_installation_summary(hosts, version=None): click.echo('Total OpenShift masters: %s' % len(masters)) click.echo('Total OpenShift nodes: %s' % len(nodes)) - if len(masters) == 1 and version != '3.0': - ha_hint_message = """ + if verbose: + if len(masters) == 1 and version != '3.0': + ha_hint_message = """ NOTE: Add a total of 3 or more masters to perform an HA installation.""" - click.echo(ha_hint_message) - elif len(masters) == 2: - min_masters_message = """ + click.echo(ha_hint_message) + elif len(masters) == 2: + min_masters_message = """ WARNING: A minimum of 3 masters are required to perform an HA installation. Please add one more to proceed.""" - click.echo(min_masters_message) - elif len(masters) >= 3: - ha_message = """ + click.echo(min_masters_message) + elif len(masters) >= 3: + ha_message = """ NOTE: Multiple masters specified, this will be an HA deployment with a separate etcd cluster. You will be prompted to provide the FQDN of a load balancer and a host for storage once finished entering hosts. -""" - click.echo(ha_message) + """ + click.echo(ha_message) - dedicated_nodes_message = """ + dedicated_nodes_message = """ WARNING: Dedicated nodes are recommended for an HA deployment. If no dedicated nodes are specified, each configured master will be marked as a schedulable node.""" - min_ha_nodes_message = """ + min_ha_nodes_message = """ WARNING: A minimum of 3 dedicated nodes are recommended for an HA deployment.""" - if len(dedicated_nodes) == 0: - click.echo(dedicated_nodes_message) - elif len(dedicated_nodes) < 3: - click.echo(min_ha_nodes_message) + if len(dedicated_nodes) == 0: + click.echo(dedicated_nodes_message) + elif len(dedicated_nodes) < 3: + click.echo(min_ha_nodes_message) click.echo('') @@ -270,6 +260,8 @@ def print_host_summary(all_hosts, host): click.echo(" - Etcd (Embedded)") if host.is_storage(): click.echo(" - Storage") + if host.new_host: + click.echo(" - NEW") def collect_master_lb(hosts): @@ -307,14 +299,18 @@ hostname. 'please specify a separate host' % hostname) return hostname - host_props['connect_to'] = click.prompt('Enter hostname or IP address', - value_proc=validate_prompt_lb) - install_haproxy = \ - click.confirm('Should the reference HAProxy load balancer be installed on this host?') - host_props['preconfigured'] = not install_haproxy - host_props['roles'] = ['master_lb'] - master_lb = Host(**host_props) - hosts.append(master_lb) + lb_hostname = click.prompt('Enter hostname or IP address', + value_proc=validate_prompt_lb, + default='') + if lb_hostname: + host_props['connect_to'] = lb_hostname + install_haproxy = \ + click.confirm('Should the reference HAProxy load balancer be installed on this host?') + host_props['preconfigured'] = not install_haproxy + host_props['roles'] = ['master_lb'] + return Host(**host_props) + else: + return None def collect_storage_host(hosts): @@ -395,29 +391,29 @@ Notes: default_facts_lines = [] default_facts = {} - for h in hosts: - if h.preconfigured: + for host in hosts: + if host.preconfigured: continue try: - default_facts[h.connect_to] = {} - h.ip = callback_facts[h.connect_to]["common"]["ip"] - h.public_ip = callback_facts[h.connect_to]["common"]["public_ip"] - h.hostname = callback_facts[h.connect_to]["common"]["hostname"] - h.public_hostname = callback_facts[h.connect_to]["common"]["public_hostname"] + default_facts[host.connect_to] = {} + host.ip = callback_facts[host.connect_to]["common"]["ip"] + host.public_ip = callback_facts[host.connect_to]["common"]["public_ip"] + host.hostname = callback_facts[host.connect_to]["common"]["hostname"] + host.public_hostname = callback_facts[host.connect_to]["common"]["public_hostname"] except KeyError: - click.echo("Problem fetching facts from {}".format(h.connect_to)) + click.echo("Problem fetching facts from {}".format(host.connect_to)) continue - default_facts_lines.append(",".join([h.connect_to, - h.ip, - h.public_ip, - h.hostname, - h.public_hostname])) - output = "%s\n%s" % (output, ",".join([h.connect_to, - h.ip, - h.public_ip, - h.hostname, - h.public_hostname])) + default_facts_lines.append(",".join([host.connect_to, + host.ip, + host.public_ip, + host.hostname, + host.public_hostname])) + output = "%s\n%s" % (output, ",".join([host.connect_to, + host.ip, + host.public_ip, + host.hostname, + host.public_hostname])) output = "%s\n%s" % (output, notes) click.echo(output) @@ -534,7 +530,7 @@ def error_if_missing_info(oo_cfg): oo_cfg.settings['variant_version'] = version.name # check that all listed host roles are included - listed_roles = get_host_roles_set(oo_cfg) + listed_roles = oo_cfg.get_host_roles_set() configured_roles = set([role for role in oo_cfg.deployment.roles]) if listed_roles != configured_roles: missing_info = True @@ -544,16 +540,7 @@ def error_if_missing_info(oo_cfg): sys.exit(1) -def get_host_roles_set(oo_cfg): - roles_set = set() - for host in oo_cfg.deployment.hosts: - for role in host.roles: - roles_set.add(role) - - return roles_set - - -def get_proxy_hostnames_and_excludes(): +def get_proxy_hosts_excludes(): message = """ If a proxy is needed to reach HTTP and HTTPS traffic, please enter the name below. This proxy will be configured by default for all processes @@ -635,7 +622,8 @@ https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.h click.clear() if 'master_routingconfig_subdomain' not in oo_cfg.deployment.variables: - oo_cfg.deployment.variables['master_routingconfig_subdomain'] = get_master_routingconfig_subdomain() + oo_cfg.deployment.variables['master_routingconfig_subdomain'] = \ + get_routingconfig_subdomain() click.clear() # Are any proxy vars already presisted? @@ -644,7 +632,7 @@ https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.h saved_proxy_vars = [pv for pv in proxy_vars if oo_cfg.deployment.variables.get(pv, 'UNSET') is not 'UNSET'] - installer_log.debug("Evaluated proxy settings, found %s presisted values", + INSTALLER_LOG.debug("Evaluated proxy settings, found %s presisted values", len(saved_proxy_vars)) current_version = parse_version( oo_cfg.settings.get('variant_version', '0.0')) @@ -654,8 +642,8 @@ https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.h # recognizes proxy parameters. We must prompt the user for values # if this conditional is true. if not saved_proxy_vars and current_version >= min_version: - installer_log.debug("Prompting user to enter proxy values") - http_proxy, https_proxy, proxy_excludes = get_proxy_hostnames_and_excludes() + INSTALLER_LOG.debug("Prompting user to enter proxy values") + http_proxy, https_proxy, proxy_excludes = get_proxy_hosts_excludes() oo_cfg.deployment.variables['proxy_http'] = http_proxy oo_cfg.deployment.variables['proxy_https'] = https_proxy oo_cfg.deployment.variables['proxy_exclude_hosts'] = proxy_excludes @@ -709,82 +697,64 @@ def is_installed_host(host, callback_facts): return version_found -# pylint: disable=too-many-branches -# This pylint error will be corrected shortly in separate PR. -def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose): - - # Copy the list of existing hosts so we can remove any already installed nodes. - hosts_to_run_on = list(oo_cfg.deployment.hosts) +def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force): + """ + We get here once there are hosts in oo_cfg and we need to find out what + state they are in. There are several different cases that might occur: + + 1. All hosts in oo_cfg are uninstalled. In this case, we should proceed + with a normal installation. + 2. All hosts in oo_cfg are installed. In this case, ask the user if they + want to force reinstall or exit. We can also hint in this case about + the scaleup workflow. + 3. Some hosts are installed and some are uninstalled. In this case, prompt + the user if they want to force (re)install all hosts specified or direct + them to the scaleup workflow and exit. + """ + hosts_to_run_on = [] # Check if master or nodes already have something installed - installed_hosts, uninstalled_hosts = get_installed_hosts(oo_cfg.deployment.hosts, callback_facts) - if len(installed_hosts) > 0: - click.echo('Installed environment detected.') - # This check has to happen before we start removing hosts later in this method + installed_hosts, uninstalled_hosts = get_installed_hosts(oo_cfg.deployment.hosts, + callback_facts) + nodes = [host for host in oo_cfg.deployment.hosts if host.is_node()] + + # Case (1): All uninstalled hosts + if len(uninstalled_hosts) == len(nodes): + click.echo('All hosts in config are uninstalled. Proceeding with installation...') + hosts_to_run_on = list(oo_cfg.deployment.hosts) + else: + # Case (2): All installed hosts + if len(installed_hosts) == len(list(oo_cfg.deployment.hosts)): + message = """ +All specified hosts in specified environment are installed. +""" + # Case (3): Some installed, some uninstalled + else: + message = """ +A mix of installed and uninstalled hosts have been detected in your environment. +Please make sure your environment was installed successfully before adding new nodes. +""" + + click.echo(message) + + if not unattended: + response = click.confirm('Do you want to (re)install the environment?\n\n' + 'Note: This will potentially erase any custom changes.') + if response: + hosts_to_run_on = list(oo_cfg.deployment.hosts) + force = True + elif unattended and force: + hosts_to_run_on = list(oo_cfg.deployment.hosts) if not force: - if not unattended: - click.echo('By default the installer only adds new nodes ' - 'to an installed environment.') - response = click.prompt('Do you want to (1) only add additional nodes or ' - '(2) reinstall the existing hosts ' - 'potentially erasing any custom changes?', - type=int) - # TODO: this should be reworked with error handling. - # Click can certainly do this for us. - # This should be refactored as soon as we add a 3rd option. - if response == 1: - force = False - if response == 2: - force = True - - # present a message listing already installed hosts and remove hosts if needed - for host in installed_hosts: - if host.is_master(): - click.echo("{} is already an OpenShift master".format(host)) - # Masters stay in the list, we need to run against them when adding - # new nodes. - elif host.is_node(): - click.echo("{} is already an OpenShift node".format(host)) - # force is only used for reinstalls so we don't want to remove - # anything. - if not force: - hosts_to_run_on.remove(host) - - # Handle the cases where we know about uninstalled systems - # TODO: This logic is getting hard to understand. - # we should revise all this to be cleaner. - if not force and len(uninstalled_hosts) > 0: - for uninstalled_host in uninstalled_hosts: - click.echo("{} is currently uninstalled".format(uninstalled_host)) - # Fall through - click.echo('\nUninstalled hosts have been detected in your environment. ' - 'Please make sure your environment was installed successfully ' - 'before adding new nodes. If you want a fresh install, use ' - '`atomic-openshift-installer install --force`') + message = """ +If you want to force reinstall of your environment, run: +`atomic-openshift-installer install --force` + +If you want to add new nodes to this environment, run: +`atomic-openshift-installer scaleup` +""" + click.echo(message) sys.exit(1) - else: - if unattended: - if not force: - click.echo('Installed environment detected and no additional ' - 'nodes specified: aborting. If you want a fresh install, use ' - '`atomic-openshift-installer install --force`') - sys.exit(1) - else: - if not force: - new_nodes = collect_new_nodes(oo_cfg) - - hosts_to_run_on.extend(new_nodes) - oo_cfg.deployment.hosts.extend(new_nodes) - - openshift_ansible.set_config(oo_cfg) - click.echo('Gathering information from hosts...') - callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts, verbose) - if error or callback_facts is None: - click.echo("There was a problem fetching the required information. See " - "{} for details.".format(oo_cfg.settings['ansible_log_path'])) - sys.exit(1) - else: - pass # proceeding as normal should do a clean install return hosts_to_run_on, callback_facts @@ -800,6 +770,49 @@ def set_infra_nodes(hosts): host.node_labels = "{'region': 'infra'}" +def run_config_playbook(oo_cfg, hosts_to_run_on, unattended, verbose, gen_inventory): + # Write Ansible inventory file to disk: + inventory_file = openshift_ansible.generate_inventory(hosts_to_run_on) + + click.echo() + click.echo('Wrote atomic-openshift-installer config: %s' % oo_cfg.config_path) + click.echo("Wrote Ansible inventory: %s" % inventory_file) + click.echo() + + if gen_inventory: + sys.exit(0) + + click.echo('Ready to run installation process.') + message = """ +If changes are needed please edit the config file above and re-run. +""" + if not unattended: + confirm_continue(message) + + error = openshift_ansible.run_main_playbook(inventory_file, oo_cfg.deployment.hosts, + hosts_to_run_on, verbose) + + if error: + # The bootstrap script will print out the log location. + message = """ +An error was detected. After resolving the problem please relaunch the +installation process. +""" + click.echo(message) + sys.exit(1) + else: + message = """ +The installation was successful! + +If this is your first time installing please take a look at the Administrator +Guide for advanced options related to routing, storage, authentication, and +more: + +http://docs.openshift.com/enterprise/latest/admin_guide/overview.html +""" + click.echo(message) + + @click.group() @click.pass_context @click.option('--unattended', '-u', is_flag=True, default=False) @@ -846,8 +859,8 @@ def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_log_ # highest), anything below that (we only use debug/warning # presently) is not logged. If '-d' is given though, we'll # lower the threshold to debug (almost everything gets through) - installer_log.setLevel(logging.DEBUG) - installer_log.debug("Quick Installer debugging initialized") + INSTALLER_LOG.setLevel(logging.DEBUG) + INSTALLER_LOG.debug("Quick Installer debugging initialized") ctx.obj = {} ctx.obj['unattended'] = unattended @@ -857,8 +870,8 @@ def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_log_ try: oo_cfg = OOConfig(ctx.obj['configuration']) - except OOConfigInvalidHostError as e: - click.echo(e) + except OOConfigInvalidHostError as err: + click.echo(err) sys.exit(1) # If no playbook dir on the CLI, check the config: @@ -916,7 +929,7 @@ def uninstall(ctx): @click.option('--latest-minor', '-l', is_flag=True, default=False) @click.option('--next-major', '-n', is_flag=True, default=False) @click.pass_context -# pylint: disable=too-many-statements +# pylint: disable=too-many-statements,too-many-branches def upgrade(ctx, latest_minor, next_major): oo_cfg = ctx.obj['oo_cfg'] @@ -969,7 +982,7 @@ def upgrade(ctx, latest_minor, next_major): sys.exit(0) playbook = mapping['major_playbook'] new_version = mapping['major_version'] - # Update config to reflect the version we're targetting, we'll write + # Update config to reflect the version we're targeting, we'll write # to disk once Ansible completes successfully, not before. oo_cfg.settings['variant_version'] = new_version if oo_cfg.settings['variant'] == 'enterprise': @@ -1013,15 +1026,17 @@ def upgrade(ctx, latest_minor, next_major): def install(ctx, force, gen_inventory): oo_cfg = ctx.obj['oo_cfg'] verbose = ctx.obj['verbose'] + unattended = ctx.obj['unattended'] - if ctx.obj['unattended']: + if unattended: error_if_missing_info(oo_cfg) else: oo_cfg = get_missing_info_from_user(oo_cfg) - check_hosts_config(oo_cfg, ctx.obj['unattended']) + check_hosts_config(oo_cfg, unattended) - print_installation_summary(oo_cfg.deployment.hosts, oo_cfg.settings.get('variant_version', None)) + print_installation_summary(oo_cfg.deployment.hosts, + oo_cfg.settings.get('variant_version', None)) click.echo('Gathering information from hosts...') callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts, verbose) @@ -1031,62 +1046,92 @@ def install(ctx, force, gen_inventory): "Please see {} for details.".format(oo_cfg.settings['ansible_log_path'])) sys.exit(1) - hosts_to_run_on, callback_facts = get_hosts_to_run_on( - oo_cfg, callback_facts, ctx.obj['unattended'], force, verbose) + hosts_to_run_on, callback_facts = get_hosts_to_run_on(oo_cfg, + callback_facts, + unattended, + force) # We already verified this is not the case for unattended installs, so this can # only trigger for live CLI users: - # TODO: if there are *new* nodes and this is a live install, we may need the user - # to confirm the settings for new nodes. Look into this once we're distinguishing - # between new and pre-existing nodes. if not ctx.obj['unattended'] and len(oo_cfg.calc_missing_facts()) > 0: confirm_hosts_facts(oo_cfg, callback_facts) # Write quick installer config file to disk: oo_cfg.save_to_disk() - # Write Ansible inventory file to disk: - inventory_file = openshift_ansible.generate_inventory(hosts_to_run_on) + run_config_playbook(oo_cfg, hosts_to_run_on, unattended, verbose, gen_inventory) - click.echo() - click.echo('Wrote atomic-openshift-installer config: %s' % oo_cfg.config_path) - click.echo("Wrote Ansible inventory: %s" % inventory_file) - click.echo() - if gen_inventory: - sys.exit(0) +@click.command() +@click.option('--gen-inventory', is_flag=True, default=False, + help="Generate an Ansible inventory file and exit.") +@click.pass_context +def scaleup(ctx, gen_inventory): + oo_cfg = ctx.obj['oo_cfg'] + verbose = ctx.obj['verbose'] + unattended = ctx.obj['unattended'] - click.echo('Ready to run installation process.') + installed_hosts = list(oo_cfg.deployment.hosts) + + if len(installed_hosts) == 0: + click.echo('No hosts specified.') + sys.exit(1) + + click.echo('Welcome to the OpenShift Enterprise 3 Scaleup utility.') + + print_installation_summary(installed_hosts, + oo_cfg.settings['variant_version'], + verbose=False,) message = """ -If changes are needed please edit the config file above and re-run. -""" - if not ctx.obj['unattended']: - confirm_continue(message) +--- - error = openshift_ansible.run_main_playbook(inventory_file, oo_cfg.deployment.hosts, - hosts_to_run_on, verbose) +We have detected this previously installed OpenShift environment. - if error: - # The bootstrap script will print out the log location. - message = """ -An error was detected. After resolving the problem please relaunch the -installation process. +This tool will guide you through the process of adding additional +nodes to your cluster. """ - click.echo(message) + confirm_continue(message) + + error_if_missing_info(oo_cfg) + check_hosts_config(oo_cfg, True) + + installed_masters = [host for host in installed_hosts if host.is_master()] + new_nodes = collect_new_nodes(oo_cfg) + + oo_cfg.deployment.hosts.extend(new_nodes) + hosts_to_run_on = installed_masters + new_nodes + + openshift_ansible.set_config(oo_cfg) + click.echo('Gathering information from hosts...') + callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts, verbose) + if error or callback_facts is None: + click.echo("There was a problem fetching the required information. See " + "{} for details.".format(oo_cfg.settings['ansible_log_path'])) sys.exit(1) - else: - message = """ -The installation was successful! -If this is your first time installing please take a look at the Administrator -Guide for advanced options related to routing, storage, authentication, and -more: + print_installation_summary(oo_cfg.deployment.hosts, + oo_cfg.settings.get('variant_version', None)) + click.echo('Gathering information from hosts...') + callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts, + verbose) + + if error or callback_facts is None: + click.echo("There was a problem fetching the required information. " + "Please see {} for details.".format(oo_cfg.settings['ansible_log_path'])) + sys.exit(1) + + # We already verified this is not the case for unattended installs, so this can + # only trigger for live CLI users: + if not ctx.obj['unattended'] and len(oo_cfg.calc_missing_facts()) > 0: + confirm_hosts_facts(oo_cfg, callback_facts) + + # Write quick installer config file to disk: + oo_cfg.save_to_disk() + run_config_playbook(oo_cfg, hosts_to_run_on, unattended, verbose, gen_inventory) -http://docs.openshift.com/enterprise/latest/admin_guide/overview.html -""" - click.echo(message) cli.add_command(install) +cli.add_command(scaleup) cli.add_command(upgrade) cli.add_command(uninstall) diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py index 697ac9c08..e6bff7133 100644 --- a/utils/src/ooinstall/oo_config.py +++ b/utils/src/ooinstall/oo_config.py @@ -436,3 +436,11 @@ class OOConfig(object): if host.connect_to == name: return host return None + + def get_host_roles_set(self): + roles_set = set() + for host in self.deployment.hosts: + for role in host.roles: + roles_set.add(role) + + return roles_set diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index 80a79a6d2..764cc1e56 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -48,9 +48,6 @@ def set_config(cfg): def generate_inventory(hosts): global CFG - masters = [host for host in hosts if host.is_master()] - multiple_masters = len(masters) > 1 - new_nodes = [host for host in hosts if host.is_node() and host.new_host] scaleup = len(new_nodes) > 0 @@ -61,7 +58,7 @@ def generate_inventory(hosts): write_inventory_children(base_inventory, scaleup) - write_inventory_vars(base_inventory, multiple_masters, lb) + write_inventory_vars(base_inventory, lb) # write_inventory_hosts for role in CFG.deployment.roles: @@ -106,7 +103,7 @@ def write_inventory_children(base_inventory, scaleup): # pylint: disable=too-many-branches -def write_inventory_vars(base_inventory, multiple_masters, lb): +def write_inventory_vars(base_inventory, lb): global CFG base_inventory.write('\n[OSEv3:vars]\n') @@ -123,7 +120,7 @@ def write_inventory_vars(base_inventory, multiple_masters, lb): if CFG.deployment.variables['ansible_ssh_user'] != 'root': base_inventory.write('ansible_become=yes\n') - if multiple_masters and lb is not None: + if lb is not None: base_inventory.write('openshift_master_cluster_method=native\n') base_inventory.write("openshift_master_cluster_hostname={}\n".format(lb.hostname)) base_inventory.write( diff --git a/utils/src/ooinstall/utils.py b/utils/src/ooinstall/utils.py index eb27a57e4..85a77c75e 100644 --- a/utils/src/ooinstall/utils.py +++ b/utils/src/ooinstall/utils.py @@ -1,4 +1,6 @@ import logging +import re + installer_log = logging.getLogger('installer') @@ -8,3 +10,12 @@ def debug_env(env): if k.startswith("OPENSHIFT") or k.startswith("ANSIBLE") or k.startswith("OO"): installer_log.debug("{key}: {value}".format( key=k, value=env[k])) + + +def is_valid_hostname(hostname): + if not hostname or len(hostname) > 255: + return False + if hostname[-1] == ".": + hostname = hostname[:-1] # strip exactly one dot from the right, if present + allowed = re.compile(r"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE) + return all(allowed.match(x) for x in hostname.split(".")) diff --git a/utils/test-requirements.txt b/utils/test-requirements.txt index f2216a177..af91ab6a7 100644 --- a/utils/test-requirements.txt +++ b/utils/test-requirements.txt @@ -9,3 +9,4 @@ flake8 PyYAML click backports.functools_lru_cache +pyOpenSSL diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py index 34392777b..36dc18034 100644 --- a/utils/test/cli_installer_tests.py +++ b/utils/test/cli_installer_tests.py @@ -842,7 +842,7 @@ class AttendedCliTests(OOCliFixture): # interactive with config file and some installed some uninstalled hosts @patch('ooinstall.openshift_ansible.run_main_playbook') @patch('ooinstall.openshift_ansible.load_system_facts') - def test_add_nodes(self, load_facts_mock, run_playbook_mock): + def test_scaleup_hint(self, load_facts_mock, run_playbook_mock): # Modify the mock facts to return a version indicating OpenShift # is already installed on our master, and the first node. @@ -866,13 +866,12 @@ class AttendedCliTests(OOCliFixture): result = self.runner.invoke(cli.cli, self.cli_args, input=cli_input) - self.assert_result(result, 0) - self._verify_load_facts(load_facts_mock) - self._verify_run_playbook(run_playbook_mock, 3, 2) + # This is testing the install workflow so we want to make sure we + # exit with the appropriate hint. + self.assertTrue('scaleup' in result.output) + self.assert_result(result, 1) - written_config = read_yaml(self.config_file) - self._verify_config_hosts(written_config, 3) @patch('ooinstall.openshift_ansible.run_main_playbook') @patch('ooinstall.openshift_ansible.load_system_facts') @@ -897,30 +896,30 @@ class AttendedCliTests(OOCliFixture): written_config = read_yaml(config_file) self._verify_config_hosts(written_config, 3) - #interactive with config file and all installed hosts - @patch('ooinstall.openshift_ansible.run_main_playbook') - @patch('ooinstall.openshift_ansible.load_system_facts') - def test_get_hosts_to_run_on(self, load_facts_mock, run_playbook_mock): - mock_facts = copy.deepcopy(MOCK_FACTS) - mock_facts['10.0.0.1']['common']['version'] = "3.0.0" - mock_facts['10.0.0.2']['common']['version'] = "3.0.0" - - cli_input = build_input(hosts=[ - ('10.0.0.1', True, False), - ], - add_nodes=[('10.0.0.2', False, False)], - ssh_user='root', - variant_num=1, - schedulable_masters_ok=True, - confirm_facts='y', - storage='10.0.0.1',) - - self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, - run_playbook_mock, - cli_input, - exp_hosts_len=2, - exp_hosts_to_run_on_len=2, - force=False) +# #interactive with config file and all installed hosts +# @patch('ooinstall.openshift_ansible.run_main_playbook') +# @patch('ooinstall.openshift_ansible.load_system_facts') +# def test_get_hosts_to_run_on(self, load_facts_mock, run_playbook_mock): +# mock_facts = copy.deepcopy(MOCK_FACTS) +# mock_facts['10.0.0.1']['common']['version'] = "3.0.0" +# mock_facts['10.0.0.2']['common']['version'] = "3.0.0" +# +# cli_input = build_input(hosts=[ +# ('10.0.0.1', True, False), +# ], +# add_nodes=[('10.0.0.2', False, False)], +# ssh_user='root', +# variant_num=1, +# schedulable_masters_ok=True, +# confirm_facts='y', +# storage='10.0.0.1',) +# +# self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, +# run_playbook_mock, +# cli_input, +# exp_hosts_len=2, +# exp_hosts_to_run_on_len=2, +# force=False) #interactive multimaster: one more node than master @patch('ooinstall.openshift_ansible.run_main_playbook') diff --git a/utils/test/fixture.py b/utils/test/fixture.py index a883e5c56..62135c761 100644 --- a/utils/test/fixture.py +++ b/utils/test/fixture.py @@ -138,8 +138,8 @@ class OOCliFixture(OOInstallFixture): written_config = read_yaml(config_file) self._verify_config_hosts(written_config, exp_hosts_len) - if "Uninstalled" in result.output: - # verify we exited on seeing uninstalled hosts + if "If you want to force reinstall" in result.output: + # verify we exited on seeing installed hosts self.assertEqual(result.exit_code, 1) else: self.assert_result(result, 0) @@ -156,7 +156,7 @@ class OOCliFixture(OOInstallFixture): #pylint: disable=too-many-arguments,too-many-branches,too-many-statements def build_input(ssh_user=None, hosts=None, variant_num=None, add_nodes=None, confirm_facts=None, schedulable_masters_ok=None, - master_lb=None, storage=None): + master_lb=('', False), storage=None): """ Build an input string simulating a user entering values in an interactive attended install. @@ -204,11 +204,11 @@ def build_input(ssh_user=None, hosts=None, variant_num=None, i += 1 # You can pass a single master_lb or a list if you intend for one to get rejected: - if master_lb: - if isinstance(master_lb[0], list) or isinstance(master_lb[0], tuple): - inputs.extend(master_lb[0]) - else: - inputs.append(master_lb[0]) + if isinstance(master_lb[0], list) or isinstance(master_lb[0], tuple): + inputs.extend(master_lb[0]) + else: + inputs.append(master_lb[0]) + if master_lb[0]: inputs.append('y' if master_lb[1] else 'n') if storage: @@ -248,6 +248,7 @@ def build_input(ssh_user=None, hosts=None, variant_num=None, inputs.extend([ confirm_facts, 'y', # lets do this + 'y', ]) return '\n'.join(inputs) diff --git a/utils/test/test_utils.py b/utils/test/test_utils.py new file mode 100644 index 000000000..8d59f388e --- /dev/null +++ b/utils/test/test_utils.py @@ -0,0 +1,72 @@ +""" +Unittests for ooinstall utils. +""" + +import unittest +import logging +import sys +import copy +from ooinstall.utils import debug_env +import mock + + +class TestUtils(unittest.TestCase): + """ + Parent unittest TestCase. + """ + + def setUp(self): + self.debug_all_params = { + 'OPENSHIFT_FOO': 'bar', + 'ANSIBLE_FOO': 'bar', + 'OO_FOO': 'bar' + } + + self.expected = [ + mock.call('ANSIBLE_FOO: bar'), + mock.call('OPENSHIFT_FOO: bar'), + mock.call('OO_FOO: bar'), + ] + + # python 2.x has assertItemsEqual, python 3.x has assertCountEqual + if sys.version_info.major > 3: + self.assertItemsEqual = self.assertCountEqual + + ###################################################################### + # Validate ooinstall.utils.debug_env functionality + + def test_utils_debug_env_all_debugged(self): + """Verify debug_env debugs specific env variables""" + + with mock.patch('ooinstall.utils.installer_log') as _il: + debug_env(self.debug_all_params) + print _il.debug.call_args_list + + # Debug was called for each item we expect + self.assertEqual( + len(self.debug_all_params), + _il.debug.call_count) + + # Each item we expect was logged + self.assertItemsEqual( + self.expected, + _il.debug.call_args_list) + + def test_utils_debug_env_some_debugged(self): + """Verify debug_env skips non-wanted env variables""" + debug_some_params = copy.deepcopy(self.debug_all_params) + # This will not be logged by debug_env + debug_some_params['MG_FRBBR'] = "SKIPPED" + + with mock.patch('ooinstall.utils.installer_log') as _il: + debug_env(debug_some_params) + + # The actual number of debug calls was less than the + # number of items passed to debug_env + self.assertLess( + _il.debug.call_count, + len(debug_some_params)) + + self.assertItemsEqual( + self.expected, + _il.debug.call_args_list) |