diff options
author | Thomas Wiest <twiest@users.noreply.github.com> | 2015-12-14 12:47:51 -0500 |
---|---|---|
committer | Thomas Wiest <twiest@users.noreply.github.com> | 2015-12-14 12:47:51 -0500 |
commit | 4dfe16e0e567a633cedd8ee56ffaed5110ca1629 (patch) | |
tree | aa67c9a14ae30cf02177e53ae3e8d1e9c878f1c8 | |
parent | eeb164fae0e6721100c4fcc1717d92bb85b9652c (diff) | |
parent | 4322b19c0503a4f149ac5bca251beba14178948d (diff) | |
download | openshift-4dfe16e0e567a633cedd8ee56ffaed5110ca1629.tar.gz openshift-4dfe16e0e567a633cedd8ee56ffaed5110ca1629.tar.bz2 openshift-4dfe16e0e567a633cedd8ee56ffaed5110ca1629.tar.xz openshift-4dfe16e0e567a633cedd8ee56ffaed5110ca1629.zip |
Merge pull request #1059 from twiest/master
sync master -> prod branch
111 files changed, 590 insertions, 482 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index b7dc52080..3cc7946d7 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.0.19-1 ./ +3.0.20-1 ./ diff --git a/README_libvirt.md b/README_libvirt.md index fd0250781..3e5df2dca 100644 --- a/README_libvirt.md +++ b/README_libvirt.md @@ -115,9 +115,10 @@ Configuration The following options can be passed via the `-o` flag of the `create` command or as environment variables: -* `image_url` (default to `http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2`): URL of the QCOW2 image to download +* `image_url` (default to `http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2.xz`): URL of the QCOW2 image to download * `image_name` (default to `CentOS-7-x86_64-GenericCloud.qcow2`): Name of the QCOW2 image to boot the VMs on -* `image_sha256` (default to `e324e3ab1d24a1bbf035ddb365e7f9058c0b454acf48d7aa15c5519fae5998ab`): Expected SHA256 checksum of the downloaded image +* `image_compression` (default to `xz`): Source QCOW2 compression (only xz supported at this time) +* `image_sha256` (default to `9461006300d65172f5668d8875f2aad7b54f7ba4e9c5435d65a84a5a2d66e39b`): Expected SHA256 checksum of the downloaded image * `skip_image_download` (default to `no`): Skip QCOW2 image download. This requires the `image_name` QCOW2 image to be already present in `$HOME/libvirt-storage-pool-openshift-ansible` Creating a cluster diff --git a/README_origin.md b/README_origin.md index 343ecda3d..12e79791e 100644 --- a/README_origin.md +++ b/README_origin.md @@ -15,7 +15,7 @@ * There is currently a known issue with ansible-1.9.0, you can downgrade to 1.8.4 on Fedora by installing one of the builds from Koji: http://koji.fedoraproject.org/koji/packageinfo?packageID=13842 * Available in Fedora channels * Available for EL with EPEL and Optional channel -* One or more RHEL 7.1 or CentOS 7.1 VMs +* One or more RHEL 7.1+, CentOS 7.1+, or Fedora 23+ VMs * Either ssh key based auth for the root user or ssh key based auth for a user with sudo access (no password) * A checkout of openshift-ansible from https://github.com/openshift/openshift-ansible/ diff --git a/bin/openshift_ansible/awsutil.py b/bin/openshift_ansible/awsutil.py index 45345007c..ba32b4dbd 100644 --- a/bin/openshift_ansible/awsutil.py +++ b/bin/openshift_ansible/awsutil.py @@ -58,7 +58,7 @@ class AwsUtil(object): def get_environments(self): """Searches for env tags in the inventory and returns all of the envs found.""" - pattern = re.compile(r'^tag_environment_(.*)') + pattern = re.compile(r'^tag_env_(.*)') envs = [] inv = self.get_inventory() @@ -106,13 +106,13 @@ class AwsUtil(object): inst_by_env = {} for _, host in inv['_meta']['hostvars'].items(): # If you don't have an environment tag, we're going to ignore you - if 'ec2_tag_environment' not in host: + if 'ec2_tag_env' not in host: continue - if host['ec2_tag_environment'] not in inst_by_env: - inst_by_env[host['ec2_tag_environment']] = {} + if host['ec2_tag_env'] not in inst_by_env: + inst_by_env[host['ec2_tag_env']] = {} host_id = "%s:%s" % (host['ec2_tag_Name'], host['ec2_id']) - inst_by_env[host['ec2_tag_environment']][host_id] = host + inst_by_env[host['ec2_tag_env']][host_id] = host return inst_by_env @@ -154,7 +154,7 @@ class AwsUtil(object): def gen_env_tag(env): """Generate the environment tag """ - return "tag_environment_%s" % env + return "tag_env_%s" % env def gen_host_type_tag(self, host_type): """Generate the host type tag @@ -138,7 +138,7 @@ class Oscp(object): # attempt to select the correct environment if specified if self.env: - results = filter(lambda result: result[1]['ec2_tag_environment'] == self.env, results) + results = filter(lambda result: result[1]['ec2_tag_env'] == self.env, results) if results: return results @@ -167,7 +167,7 @@ class Oscp(object): name = server_info['ec2_tag_Name'] ec2_id = server_info['ec2_id'] ip = server_info['ec2_ip_address'] - print '{ec2_tag_Name:<35} {ec2_tag_environment:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info) + print '{ec2_tag_Name:<35} {ec2_tag_env:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info) if limit: print @@ -180,7 +180,7 @@ class Oscp(object): name = server_info['ec2_tag_Name'] ec2_id = server_info['ec2_id'] ip = server_info['ec2_ip_address'] - print '{ec2_tag_Name:<35} {ec2_tag_environment:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info) + print '{ec2_tag_Name:<35} {ec2_tag_env:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info) def scp(self): '''scp files to or from a specified host @@ -209,7 +209,7 @@ class Oscp(object): if len(results) > 1: print "Multiple results found for %s." % self.host for result in results: - print "{ec2_tag_Name:<35} {ec2_tag_environment:<5} {ec2_id:<10}".format(**result[1]) + print "{ec2_tag_Name:<35} {ec2_tag_env:<5} {ec2_id:<10}".format(**result[1]) return # early exit, too many results # Assume we have one and only one. @@ -127,7 +127,7 @@ class Ossh(object): # attempt to select the correct environment if specified if self.env: - results = filter(lambda result: result[1]['ec2_tag_environment'] == self.env, results) + results = filter(lambda result: result[1]['ec2_tag_env'] == self.env, results) if results: return results @@ -156,7 +156,7 @@ class Ossh(object): name = server_info['ec2_tag_Name'] ec2_id = server_info['ec2_id'] ip = server_info['ec2_ip_address'] - print '{ec2_tag_Name:<35} {ec2_tag_environment:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info) + print '{ec2_tag_Name:<35} {ec2_tag_env:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info) if limit: print @@ -169,7 +169,7 @@ class Ossh(object): name = server_info['ec2_tag_Name'] ec2_id = server_info['ec2_id'] ip = server_info['ec2_ip_address'] - print '{ec2_tag_Name:<35} {ec2_tag_environment:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info) + print '{ec2_tag_Name:<35} {ec2_tag_env:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info) def ssh(self): '''SSH to a specified host @@ -195,7 +195,7 @@ class Ossh(object): if len(results) > 1: print "Multiple results found for %s." % self.host for result in results: - print "{ec2_tag_Name:<35} {ec2_tag_environment:<5} {ec2_id:<10}".format(**result[1]) + print "{ec2_tag_Name:<35} {ec2_tag_env:<5} {ec2_id:<10}".format(**result[1]) return # early exit, too many results # Assume we have one and only one. diff --git a/bin/ossh_bash_completion b/bin/ossh_bash_completion index 997ff0f9c..440fa0a45 100755 --- a/bin/ossh_bash_completion +++ b/bin/ossh_bash_completion @@ -1,12 +1,12 @@ __ossh_known_hosts(){ if python -c 'import openshift_ansible' &>/dev/null; then - /usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])' + /usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])' elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then - /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])' + /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])' elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then - /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])' + /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])' fi } diff --git a/bin/ossh_zsh_completion b/bin/ossh_zsh_completion index 3c4018636..f9454357b 100644 --- a/bin/ossh_zsh_completion +++ b/bin/ossh_zsh_completion @@ -2,13 +2,13 @@ _ossh_known_hosts(){ if python -c 'import openshift_ansible' &>/dev/null; then - print $(/usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])') + print $(/usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])') elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then - print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])') + print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])') elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then - print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])') + print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])') fi diff --git a/bin/zsh_functions/_ossh b/bin/zsh_functions/_ossh index d205e1055..e34ca5bd4 100644 --- a/bin/zsh_functions/_ossh +++ b/bin/zsh_functions/_ossh @@ -2,7 +2,7 @@ _ossh_known_hosts(){ if [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then - print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items()])') + print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items()])') fi } diff --git a/docs/best_practices_guide.adoc b/docs/best_practices_guide.adoc index 08d95b2b8..6b744333c 100644 --- a/docs/best_practices_guide.adoc +++ b/docs/best_practices_guide.adoc @@ -466,3 +466,50 @@ If you want to use default with variables that evaluate to false you have to set In other words, normally the `default` filter will only replace the value if it's undefined. By setting the second parameter to `true`, it will also replace the value if it defaults to a false value in python, so None, empty list, empty string, etc. This is almost always more desirable than an empty list, string, etc. + +=== Yum and DNF +''' +[cols="2v,v"] +|=== +| **Rule** +| Package installation MUST use ansible action module to abstract away dnf/yum. +| Package installation MUST use name= and state=present rather than pkg= and state=installed respectively. +|=== +[cols="2v,v"] +|=== +| **Rule** +| Package installation MUST use name= and state=present rather than pkg= and state=installed respectively. +|=== + +This is done primarily because if you're registering the result of the +installation and you have two conditional tasks based on whether or not yum or +dnf are in use you'll end up inadvertently overwriting the value. It also +reduces duplication. name= and state=present are common between dnf and yum +modules. + +.Bad: +[source,yaml] +---- +--- +# tasks.yml +- name: Install etcd (for etcdctl) + yum: name=etcd state=latest" + when: "ansible_pkg_mgr == yum" + register: install_result + +- name: Install etcd (for etcdctl) + dnf: name=etcd state=latest" + when: "ansible_pkg_mgr == dnf" + register: install_result +---- + + +.Good: +[source,yaml] +---- +--- +# tasks.yml +- name: Install etcd (for etcdctl) + action: "{{ ansible_pkg_mgr }} name=etcd state=latest" + register: install_result + ---- diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index 1a854f637..48e27a24a 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -433,6 +433,7 @@ class FilterModule(object): ''' for tag in tags: # Skip tag_env-host-type to avoid ambiguity with tag_env + # Removing env-host-type tag but leaving this here if tag[:17] == 'tag_env-host-type': continue if tag[:len(key)+4] == 'tag_' + key: diff --git a/inventory/byo/hosts.aep.example b/inventory/byo/hosts.aep.example index d5b872e06..096d806a3 100644 --- a/inventory/byo/hosts.aep.example +++ b/inventory/byo/hosts.aep.example @@ -21,6 +21,9 @@ ansible_ssh_user=root # deployment type valid values are origin, online, atomic-enterprise, and openshift-enterprise deployment_type=atomic-enterprise +# Install the openshift examples +#openshift_install_examples=true + # Enable cluster metrics #use_cluster_metrics=true diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example index 77a3a04b4..6f015c404 100644 --- a/inventory/byo/hosts.origin.example +++ b/inventory/byo/hosts.origin.example @@ -21,6 +21,9 @@ ansible_ssh_user=root # deployment type valid values are origin, online, atomic-enterprise and openshift-enterprise deployment_type=origin +# Install the openshift examples +#openshift_install_examples=true + # Enable cluster metrics #use_cluster_metrics=true diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example index 5a4310298..778bbfb3a 100644 --- a/inventory/byo/hosts.ose.example +++ b/inventory/byo/hosts.ose.example @@ -21,6 +21,9 @@ ansible_ssh_user=root # deployment type valid values are origin, online, atomic-enterprise, and openshift-enterprise deployment_type=openshift-enterprise +# Install the openshift examples +#openshift_install_examples=true + # Enable cluster metrics #use_cluster_metrics=true diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 4f3cdbed1..563ea3cae 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -5,7 +5,7 @@ } Name: openshift-ansible -Version: 3.0.19 +Version: 3.0.20 Release: 1%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 @@ -259,6 +259,22 @@ Atomic OpenShift Utilities includes %changelog +* Thu Dec 10 2015 Thomas Wiest <twiest@redhat.com> 3.0.20-1 +- Revert "Automatic commit of package [openshift-ansible] release [3.0.20-1]." + (twiest@redhat.com) +- Automatic commit of package [openshift-ansible] release [3.0.20-1]. + (twiest@redhat.com) +- Install base package in openshift_common for version facts + (abutcher@redhat.com) +- Make the install of openshift_examples optional (jtslear@gmail.com) +- add support for remote command actions no support for anything but custom + scripts at this time (jdiaz@redhat.com) +- Remove yum / dnf duplication (sdodson@redhat.com) +- Remove hacluster user during uninstall. (abutcher@redhat.com) +- Simplify session secrets overrides. (abutcher@redhat.com) +- Squash pcs install into one task. (abutcher@redhat.com) +- Bump ansible requirement to 1.9.4 (sdodson@redhat.com) + * Wed Dec 09 2015 Brenton Leanhardt <bleanhar@redhat.com> 3.0.19-1 - Fix version dependent image streams (sdodson@redhat.com) - atomic-openshift-installer: Error handling on yaml loading diff --git a/playbooks/adhoc/create_pv/create_pv.yaml b/playbooks/adhoc/create_pv/create_pv.yaml index 4f0ef7a75..0ca040ee1 100644 --- a/playbooks/adhoc/create_pv/create_pv.yaml +++ b/playbooks/adhoc/create_pv/create_pv.yaml @@ -1,20 +1,22 @@ --- -#example run: +#example run: # ansible-playbook -e "cli_volume_size=1" \ # -e "cli_device_name=/dev/xvdf" \ # -e "cli_hosttype=master" \ -# -e "cli_environment=ops" \ +# -e "cli_env=ops" \ # create_pv.yaml -# FIXME: we need to change "environment" to "clusterid" as that's what it really is now. +# FIXME: we need to change "env" to "clusterid" as that's what it really is now. # - name: Create a volume and attach it to master hosts: localhost + connection: local + become: no gather_facts: no vars: cli_volume_type: gp2 cli_volume_iops: '' oo_name: "{{ groups['tag_host-type_' ~ cli_hosttype] | - intersect(groups['tag_environment_' ~ cli_environment]) | + intersect(groups['tag_env_' ~ cli_env]) | first }}" pre_tasks: - fail: @@ -24,7 +26,7 @@ - cli_volume_size - cli_device_name - cli_hosttype - - cli_environment + - cli_env - name: set oo_name fact set_fact: @@ -55,7 +57,7 @@ args: tags: Name: "pv-{{ hostvars[oo_name]['ec2_tag_Name'] }}" - env: "{{cli_environment}}" + env: "{{cli_env}}" register: voltags - debug: var=voltags @@ -103,7 +105,7 @@ filesystem: dev: "{{ cli_device_name }}" fstype: ext4 - + - name: Mount the dev mount: name: "{{ pv_mntdir }}" @@ -112,7 +114,7 @@ state: mounted - name: chgrp g+rwXs - file: + file: path: "{{ pv_mntdir }}" mode: 'g+rwXs' recurse: yes @@ -154,6 +156,6 @@ - debug: var=oc_output - - fail: + - fail: msg: "Failed to add {{ pv_template }} to master." when: oc_output.rc != 0 diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml index b6a2d2f26..89128dd3c 100644 --- a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml +++ b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml @@ -113,7 +113,7 @@ args: tags: Name: "{{ ec2_tag_Name }}" - env: "{{ ec2_tag_environment }}" + env: "{{ ec2_tag_env}}" register: voltags - name: Wait for volume to attach diff --git a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml index 63d473146..b4bcb25da 100644 --- a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml +++ b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml @@ -151,7 +151,7 @@ args: tags: Name: "{{ ec2_tag_Name }}" - env: "{{ ec2_tag_environment }}" + env: "{{ ec2_tag_env }}" register: voltags - name: check for attached drive diff --git a/playbooks/adhoc/noc/create_host.yml b/playbooks/adhoc/noc/create_host.yml index d250e6e69..2d2cae2b5 100644 --- a/playbooks/adhoc/noc/create_host.yml +++ b/playbooks/adhoc/noc/create_host.yml @@ -1,6 +1,8 @@ --- - name: 'Create a host object in zabbix' hosts: localhost + connection: local + become: no gather_facts: no roles: - os_zabbix @@ -23,6 +25,8 @@ #ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml - name: 'Create a host object in zabbix' hosts: localhost + connection: local + become: no gather_facts: no roles: - os_zabbix diff --git a/playbooks/adhoc/noc/create_maintenance.yml b/playbooks/adhoc/noc/create_maintenance.yml index c0ec57ce1..8ad5fa0e2 100644 --- a/playbooks/adhoc/noc/create_maintenance.yml +++ b/playbooks/adhoc/noc/create_maintenance.yml @@ -2,6 +2,8 @@ #ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml - name: 'Create a maintenace object in zabbix' hosts: localhost + connection: local + become: no gather_facts: no roles: - os_zabbix diff --git a/playbooks/adhoc/noc/get_zabbix_problems.yml b/playbooks/adhoc/noc/get_zabbix_problems.yml index 4b94fa228..79cae24ab 100644 --- a/playbooks/adhoc/noc/get_zabbix_problems.yml +++ b/playbooks/adhoc/noc/get_zabbix_problems.yml @@ -1,6 +1,8 @@ --- - name: 'Get current hosts who have triggers that are alerting by trigger description' hosts: localhost + connection: local + become: no gather_facts: no roles: - os_zabbix diff --git a/playbooks/adhoc/s3_registry/s3_registry.yml b/playbooks/adhoc/s3_registry/s3_registry.yml index 4dcef1a42..071c2cf46 100644 --- a/playbooks/adhoc/s3_registry/s3_registry.yml +++ b/playbooks/adhoc/s3_registry/s3_registry.yml @@ -6,7 +6,7 @@ # The AWS access/secret keys should be the keys of a separate user (not your main user), containing only the necessary S3 access role. # The 'clusterid' is the short name of your cluster. -- hosts: tag_env-host-type_{{ clusterid }}-openshift-master +- hosts: tag_env_{{ clusterid }}:&tag_host-type_openshift-master remote_user: root gather_facts: False diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 08a2ea6fb..9161076e5 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -47,40 +47,8 @@ - origin-node - pcsd - - yum: name={{ item }} state=absent - when: ansible_pkg_mgr == "yum" and not is_atomic | bool - with_items: - - atomic-enterprise - - atomic-enterprise-master - - atomic-enterprise-node - - atomic-enterprise-sdn-ovs - - atomic-openshift - - atomic-openshift-clients - - atomic-openshift-master - - atomic-openshift-node - - atomic-openshift-sdn-ovs - - corosync - - etcd - - openshift - - openshift-master - - openshift-node - - openshift-sdn - - openshift-sdn-ovs - - openvswitch - - origin - - origin-clients - - origin-master - - origin-node - - origin-sdn-ovs - - pacemaker - - pcs - - tuned-profiles-atomic-enterprise-node - - tuned-profiles-atomic-openshift-node - - tuned-profiles-openshift-node - - tuned-profiles-origin-node - - - dnf: name={{ item }} state=absent - when: ansible_pkg_mgr == "dnf" and not is_atomic | bool + - action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent" + when: not is_atomic | bool with_items: - atomic-enterprise - atomic-enterprise-master diff --git a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml index 1e884240a..09f7c76cc 100644 --- a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml +++ b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml @@ -1,6 +1,8 @@ --- - hosts: localhost gather_facts: no + connection: local + become: no vars: g_server: http://localhost:8080/zabbix/api_jsonrpc.php g_user: '' diff --git a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml index e2b8150c6..ec28564cf 100755 --- a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml +++ b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml @@ -2,6 +2,8 @@ --- - hosts: localhost gather_facts: no + connection: local + become: no vars: g_server: http://localhost/zabbix/api_jsonrpc.php g_user: Admin diff --git a/playbooks/aws/ansible-tower/config.yml b/playbooks/aws/ansible-tower/config.yml index efd1b9911..eb3f1a1da 100644 --- a/playbooks/aws/ansible-tower/config.yml +++ b/playbooks/aws/ansible-tower/config.yml @@ -2,6 +2,8 @@ - name: "populate oo_hosts_to_config host group if needed" hosts: localhost gather_facts: no + connection: local + become: no tasks: - name: Evaluate oo_host_group_exp if it's set add_host: "name={{ item }} groups=oo_hosts_to_config" diff --git a/playbooks/aws/ansible-tower/launch.yml b/playbooks/aws/ansible-tower/launch.yml index 850238ffb..d40529435 100644 --- a/playbooks/aws/ansible-tower/launch.yml +++ b/playbooks/aws/ansible-tower/launch.yml @@ -2,6 +2,7 @@ - name: Launch instance(s) hosts: localhost connection: local + become: no gather_facts: no vars: @@ -71,8 +72,8 @@ tasks: - - name: Yum update - yum: name=* state=latest + - name: Update All Things + action: "{{ ansible_pkg_mgr }} name=* state=latest" # Apply the configs, seprate so that just the configs can be run by themselves - include: config.yml diff --git a/playbooks/aws/openshift-cluster/addNodes.yml b/playbooks/aws/openshift-cluster/addNodes.yml index fff3e401b..3d88e6b23 100644 --- a/playbooks/aws/openshift-cluster/addNodes.yml +++ b/playbooks/aws/openshift-cluster/addNodes.yml @@ -2,6 +2,7 @@ - name: Launch instance(s) hosts: localhost connection: local + become: no gather_facts: no vars_files: - vars.yml diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml index 5aa6b0f9b..50fe42d6c 100644 --- a/playbooks/aws/openshift-cluster/config.yml +++ b/playbooks/aws/openshift-cluster/config.yml @@ -1,6 +1,8 @@ --- - hosts: localhost gather_facts: no + connection: local + become: no vars_files: - vars.yml tasks: @@ -10,10 +12,10 @@ - include: ../../common/openshift-cluster/config.yml vars: - g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}" - g_lb_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-lb' }}" - g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}" - g_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}" + g_etcd_hosts: "{{ (groups['tag_host-type_etcd']|default([])) | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}" + g_lb_hosts: "{{ (groups['tag_host-type_lb']|default([])) | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}" + g_master_hosts: "{{ (groups['tag_host-type_master']|default([])) | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}" + g_node_hosts: "{{ (groups['tag_host-type_node']|default([])) | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}" g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}" g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}" g_nodeonmaster: true diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml index 09bf34666..15b83dfad 100644 --- a/playbooks/aws/openshift-cluster/launch.yml +++ b/playbooks/aws/openshift-cluster/launch.yml @@ -2,6 +2,7 @@ - name: Launch instance(s) hosts: localhost connection: local + become: no gather_facts: no vars_files: - vars.yml diff --git a/playbooks/aws/openshift-cluster/list.yml b/playbooks/aws/openshift-cluster/list.yml index 04fcdc0a1..8341ba9c1 100644 --- a/playbooks/aws/openshift-cluster/list.yml +++ b/playbooks/aws/openshift-cluster/list.yml @@ -2,6 +2,8 @@ - name: Generate oo_list_hosts group hosts: localhost gather_facts: no + connection: local + become: no vars_files: - vars.yml tasks: diff --git a/playbooks/aws/openshift-cluster/scaleup.yml b/playbooks/aws/openshift-cluster/scaleup.yml index 4415700a3..9c9118286 100644 --- a/playbooks/aws/openshift-cluster/scaleup.yml +++ b/playbooks/aws/openshift-cluster/scaleup.yml @@ -2,6 +2,8 @@ - hosts: localhost gather_facts: no + connection: local + become: no vars_files: - vars.yml tasks: @@ -20,10 +22,10 @@ - include: ../../common/openshift-cluster/scaleup.yml vars: - g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}" - g_lb_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-lb' }}" - g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}" - g_new_nodes_group: 'nodes_to_add' + g_etcd_hosts: "{{ (groups['tag_host-type_etcd']|default([])) | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}" + g_lb_hosts: "{{ (groups['tag_host-type_lb']|default([])) | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}" + g_master_hosts: "{{ (groups['tag_host-type_master']|default([])) | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}" + g_new_node_hosts: "{{ groups.nodes_to_add }}" g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}" g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}" g_nodeonmaster: true diff --git a/playbooks/aws/openshift-cluster/service.yml b/playbooks/aws/openshift-cluster/service.yml index 25cf48505..ce0992a45 100644 --- a/playbooks/aws/openshift-cluster/service.yml +++ b/playbooks/aws/openshift-cluster/service.yml @@ -1,6 +1,8 @@ --- - name: Call same systemctl command for openshift on all instance(s) hosts: localhost + connection: local + become: no gather_facts: no vars_files: - vars.yml @@ -14,7 +16,7 @@ groups: g_service_masters ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([]) + with_items: "{{ g_master_hosts | default([]) }}" - name: Evaluate g_service_nodes add_host: @@ -22,7 +24,7 @@ groups: g_service_nodes ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([]) + with_items: "{{ g_node_hosts | default([]) }}" - include: ../../common/openshift-node/service.yml - include: ../../common/openshift-master/service.yml diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml index 99f0577fc..1fbd71a75 100644 --- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml @@ -3,7 +3,6 @@ created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}" docker_vol_ephemeral: "{{ lookup('env', 'os_docker_vol_ephemeral') | default(false, true) }}" env: "{{ cluster }}" - env_host_type: "{{ cluster }}-openshift-{{ type }}" host_type: "{{ type }}" sub_host_type: "{{ g_sub_host_type }}" @@ -124,10 +123,8 @@ wait: yes instance_tags: created-by: "{{ created_by }}" - environment: "{{ env }}" env: "{{ env }}" host-type: "{{ host_type }}" - env-host-type: "{{ env_host_type }}" sub-host-type: "{{ sub_host_type }}" volumes: "{{ volumes }}" register: ec2 @@ -142,9 +139,7 @@ Name: "{{ item.0 }}" - set_fact: - instance_groups: "tag_created-by_{{ created_by }}, tag_env_{{ env }}, - tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }}, - tag_sub-host-type_{{ sub_host_type }}" + instance_groups: "tag_created-by_{{ created_by }}, tag_env_{{ env }}, tag_host-type_{{ host_type }}, tag_sub-host-type_{{ sub_host_type }}" - set_fact: node_label: diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml index 77287cad0..aafd40c43 100644 --- a/playbooks/aws/openshift-cluster/terminate.yml +++ b/playbooks/aws/openshift-cluster/terminate.yml @@ -1,6 +1,8 @@ --- - name: Terminate instance(s) hosts: localhost + connection: local + become: no gather_facts: no vars_files: - vars.yml @@ -25,6 +27,7 @@ - name: Terminate instances hosts: localhost connection: local + become: no gather_facts: no vars: host_vars: "{{ hostvars @@ -36,7 +39,6 @@ tags: env: "{{ item['ec2_tag_env'] }}" host-type: "{{ item['ec2_tag_host-type'] }}" - env-host-type: "{{ item['ec2_tag_env-host-type'] }}" sub_host_type: "{{ item['ec2_tag_sub-host-type'] }}" with_items: host_vars when: "'oo_hosts_to_terminate' in groups" diff --git a/playbooks/aws/openshift-cluster/update.yml b/playbooks/aws/openshift-cluster/update.yml index e006aa74a..3df0c3f3a 100644 --- a/playbooks/aws/openshift-cluster/update.yml +++ b/playbooks/aws/openshift-cluster/update.yml @@ -1,19 +1,24 @@ --- -- name: Populate oo_hosts_to_update group +- name: Update - Populate oo_hosts_to_update group hosts: localhost + connection: local + become: no gather_facts: no + vars: + g_etcd_hosts: "{{ (groups['tag_host-type_etcd']|default([])) | intersect(groups['tag_env_' ~ cluster_id]) }}" + g_lb_hosts: "{{ (groups['tag_host-type_lb']|default([])) | intersect(groups['tag_env_' ~ cluster_id]) }}" + g_master_hosts: "{{ (groups['tag_host-type_master']|default([])) | intersect(groups['tag_env_' ~ cluster_id]) }}" + g_node_hosts: "{{ (groups['tag_host-type_node']|default([])) | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}" vars_files: - vars.yml tasks: - - name: Evaluate oo_hosts_to_update + - name: Update - Evaluate oo_hosts_to_update add_host: name: "{{ item }}" groups: oo_hosts_to_update ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: (groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([])) - | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([])) - | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-etcd"] | default([])) + with_items: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts) | default([]) }}" - include: ../../common/openshift-cluster/update_repos_and_packages.yml diff --git a/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml index 8cad51b5e..20cc97c8a 100644 --- a/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml +++ b/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml @@ -19,10 +19,10 @@ - include: ../../../../common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml vars: - g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}" - g_lb_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-lb' }}" - g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}" - g_nodes_group: "{{ tmp_nodes_group | default('') }}" + g_etcd_hosts: "{{ (groups['tag_host-type_etcd']|default([])) | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}" + g_lb_hosts: "{{ (groups['tag_host-type_lb']|default([])) | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}" + g_master_hosts: "{{ (groups['tag_host-type_master']|default([])) | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}" + g_node_hosts: "{{ (groups['tag_host-type_node']|default([])) | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}" g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}" g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}" g_nodeonmaster: true diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml index 411c7e660..ba8fe0a52 100644 --- a/playbooks/byo/openshift-cluster/config.yml +++ b/playbooks/byo/openshift-cluster/config.yml @@ -1,10 +1,10 @@ --- - include: ../../common/openshift-cluster/config.yml vars: - g_etcd_group: "{{ 'etcd' }}" - g_masters_group: "{{ 'masters' }}" - g_nodes_group: "{{ 'nodes' }}" - g_lb_group: "{{ 'lb' }}" + g_etcd_hosts: "{{ groups.etcd | default([]) }}" + g_master_hosts: "{{ groups.masters | default([]) }}" + g_node_hosts: "{{ groups.nodes | default([]) }}" + g_lb_hosts: "{{ groups.lb | default([]) }}" openshift_cluster_id: "{{ cluster_id | default('default') }}" openshift_debug_level: 2 openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/byo/openshift-cluster/scaleup.yml b/playbooks/byo/openshift-cluster/scaleup.yml index 70644d427..8f8ef6f21 100644 --- a/playbooks/byo/openshift-cluster/scaleup.yml +++ b/playbooks/byo/openshift-cluster/scaleup.yml @@ -1,10 +1,10 @@ --- - include: ../../common/openshift-cluster/scaleup.yml vars: - g_etcd_group: "{{ 'etcd' }}" - g_masters_group: "{{ 'masters' }}" - g_new_nodes_group: "{{ 'new_nodes' }}" - g_lb_group: "{{ 'lb' }}" + g_etcd_hosts: "{{ groups.etcd | default([]) }}" + g_master_hosts: "{{ groups.masters | default([]) }}" + g_new_node_hosts: "{{ groups.new_nodes | default([]) }}" + g_lb_hosts: "{{ groups.lb | default([]) }}" openshift_cluster_id: "{{ cluster_id | default('default') }}" openshift_debug_level: 2 openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml index 76fa9ba22..56e79e8c2 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml @@ -1,9 +1,9 @@ --- - include: ../../../../common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml vars: - g_etcd_group: "{{ 'etcd' }}" - g_masters_group: "{{ 'masters' }}" - g_nodes_group: "{{ 'nodes' }}" - g_lb_group: "{{ 'lb' }}" + g_etcd_hosts: "{{ groups.etcd | default([]) }}" + g_master_hosts: "{{ groups.masters | default([]) }}" + g_node_hosts: "{{ groups.nodes | default([]) }}" + g_lb_hosts: "{{ groups.lb | default([]) }}" openshift_cluster_id: "{{ cluster_id | default('default') }}" openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml index b06442366..b4b4f3ec0 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml @@ -1,9 +1,9 @@ --- - include: ../../../../common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml vars: - g_etcd_group: "{{ 'etcd' }}" - g_masters_group: "{{ 'masters' }}" - g_nodes_group: "{{ 'nodes' }}" - g_lb_group: "{{ 'lb' }}" + g_etcd_hosts: "{{ groups.etcd | default([]) }}" + g_master_hosts: "{{ groups.masters | default([]) }}" + g_node_hosts: "{{ groups.nodes | default([]) }}" + g_lb_hosts: "{{ groups.lb | default([]) }}" openshift_cluster_id: "{{ cluster_id | default('default') }}" openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml index 34da372a4..6343a2567 100644 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -1,23 +1,25 @@ --- - name: Populate config host groups hosts: localhost + connection: local + become: no gather_facts: no tasks: - fail: - msg: This playbook requires g_etcd_group to be set - when: g_etcd_group is not defined + msg: This playbook requires g_etcd_hosts to be set + when: g_etcd_hosts is not defined - fail: - msg: This playbook requires g_masters_group to be set - when: g_masters_group is not defined + msg: This playbook requires g_master_hosts to be set + when: g_master_hosts is not defined - fail: - msg: This playbook requires g_nodes_group or g_new_nodes_group to be set - when: g_nodes_group is not defined and g_new_nodes_group is not defined + msg: This playbook requires g_node_hosts or g_new_node_hosts to be set + when: g_node_hosts is not defined and g_new_node_hosts is not defined - fail: - msg: This playbook requires g_lb_group to be set - when: g_lb_group is not defined + msg: This playbook requires g_lb_hosts to be set + when: g_lb_hosts is not defined - name: Evaluate oo_etcd_to_config add_host: @@ -25,7 +27,7 @@ groups: oo_etcd_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_sudo: "{{ g_sudo | default(omit) }}" - with_items: groups[g_etcd_group] | default([]) + with_items: "{{ g_etcd_hosts | default([]) }}" - name: Evaluate oo_masters_to_config add_host: @@ -33,11 +35,11 @@ groups: oo_masters_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_sudo: "{{ g_sudo | default(omit) }}" - with_items: groups[g_masters_group] | default([]) + with_items: "{{ g_master_hosts | default([]) }}" - # Use g_new_nodes_group if it exists otherwise g_nodes_group + # Use g_new_node_hosts if it exists otherwise g_node_hosts - set_fact: - g_nodes_to_config: "{{ g_new_nodes_group | default(g_nodes_group | default([])) }}" + g_node_hosts_to_config: "{{ g_new_node_hosts | default(g_node_hosts | default([])) }}" - name: Evaluate oo_nodes_to_config add_host: @@ -45,32 +47,32 @@ groups: oo_nodes_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_sudo: "{{ g_sudo | default(omit) }}" - with_items: groups[g_nodes_to_config] | default([]) + with_items: "{{ g_node_hosts_to_config | default([]) }}" - # Skip adding the master to oo_nodes_to_config when g_new_nodes_group is + # Skip adding the master to oo_nodes_to_config when g_new_node_hosts is - name: Evaluate oo_nodes_to_config add_host: name: "{{ item }}" groups: oo_nodes_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_sudo: "{{ g_sudo | default(omit) }}" - with_items: groups[g_masters_group] | default([]) - when: g_nodeonmaster | default(false) == true and g_new_nodes_group is not defined + with_items: "{{ g_master_hosts | default([]) }}" + when: g_nodeonmaster | default(false) == true and g_new_node_hosts is not defined - name: Evaluate oo_first_etcd add_host: - name: "{{ groups[g_etcd_group][0] }}" + name: "{{ g_etcd_hosts[0] }}" groups: oo_first_etcd ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - when: g_etcd_group in groups and (groups[g_etcd_group] | length) > 0 + when: g_etcd_hosts|length > 0 - name: Evaluate oo_first_master add_host: - name: "{{ groups[g_masters_group][0] }}" + name: "{{ g_master_hosts[0] }}" groups: oo_first_master ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_sudo: "{{ g_sudo | default(omit) }}" - when: g_masters_group in groups and (groups[g_masters_group] | length) > 0 + when: g_master_hosts|length > 0 - name: Evaluate oo_lb_to_config add_host: @@ -78,4 +80,4 @@ groups: oo_lb_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_sudo: "{{ g_sudo | default(omit) }}" - with_items: groups[g_lb_group] | default([]) + with_items: "{{ g_lb_hosts | default([]) }}" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml index 9f7e49b93..63c8ef756 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml @@ -11,7 +11,7 @@ openshift_version: "{{ openshift_pkg_version | default('') }}" tasks: - name: Upgrade master packages - yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=latest + action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-master{{ openshift_version }} state=latest" - name: Restart master services service: name="{{ openshift.common.service_type}}-master" state=restarted @@ -21,7 +21,7 @@ openshift_version: "{{ openshift_pkg_version | default('') }}" tasks: - name: Upgrade node packages - yum: pkg={{ openshift.common.service_type }}-node{{ openshift_version }} state=latest + action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-node{{ openshift_version }} state=latest" - name: Restart node services service: name="{{ openshift.common.service_type }}-node" state=restarted diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml index 0309e8a77..fc098b4ed 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml @@ -12,6 +12,8 @@ - name: Evaluate additional groups for upgrade hosts: localhost + connection: local + become: no tasks: - name: Evaluate etcd_hosts_to_backup add_host: @@ -54,8 +56,8 @@ - name: Verify upgrade can proceed hosts: oo_masters_to_config:oo_nodes_to_config tasks: - - name: Clean yum cache - command: yum clean all + - name: Clean package cache + command: "{{ ansible_pkg_mgr }} clean all" - set_fact: g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}" @@ -87,6 +89,8 @@ ############################################################################## - name: Gate on pre-upgrade checks hosts: localhost + connection: local + become: no vars: pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}" tasks: @@ -149,9 +153,7 @@ when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int) - name: Install etcd (for etcdctl) - yum: - pkg: etcd - state: latest + action: "{{ ansible_pkg_mgr }} name=etcd state=latest" - name: Generate etcd backup command: > @@ -171,6 +173,8 @@ ############################################################################## - name: Gate on etcd backup hosts: localhost + connection: local + become: no tasks: - set_fact: etcd_backup_completed: "{{ hostvars @@ -189,6 +193,8 @@ ############################################################################### - name: Create temp directory for syncing certs hosts: localhost + connection: local + become: no gather_facts: no tasks: - name: Create local temp directory for syncing certs @@ -222,17 +228,13 @@ openshift_version: "{{ openshift_pkg_version | default('') }}" tasks: - name: Upgrade to latest available kernel - yum: - pkg: kernel - state: latest + action: "{{ ansible_pkg_mgr}} name=kernel state=latest" - name: Upgrade master packages - command: yum update -y {{ openshift.common.service_type }}-master{{ openshift_version }} + command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-master{{ openshift_version }}" - name: Ensure python-yaml present for config upgrade - yum: - pkg: PyYAML - state: installed + action: "{{ ansible_pkg_mgr }} name=PyYAML state=present" - name: Upgrade master configuration openshift_upgrade_config: @@ -339,6 +341,8 @@ - name: Delete temporary directory on localhost hosts: localhost + connection: local + become: no gather_facts: no tasks: - file: name={{ g_master_mktemp.stdout }} state=absent @@ -357,6 +361,8 @@ ############################################################################## - name: Gate on master update hosts: localhost + connection: local + become: no tasks: - set_fact: master_update_completed: "{{ hostvars @@ -380,7 +386,7 @@ - openshift_facts tasks: - name: Upgrade node packages - command: yum update -y {{ openshift.common.service_type }}-node{{ openshift_version }} + command: "{{ ansible_pkg_mgr }} update -y {{ openshift.common.service_type }}-node{{ openshift_version }}" - name: Restart node service service: name="{{ openshift.common.service_type }}-node" state=restarted @@ -397,6 +403,8 @@ ############################################################################## - name: Gate on nodes update hosts: localhost + connection: local + become: no tasks: - set_fact: node_update_completed: "{{ hostvars @@ -464,6 +472,8 @@ ############################################################################## - name: Gate on reconcile hosts: localhost + connection: local + become: no tasks: - set_fact: reconcile_completed: "{{ hostvars diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml index 7d94ced2e..6dee196e3 100644 --- a/playbooks/common/openshift-etcd/config.yml +++ b/playbooks/common/openshift-etcd/config.yml @@ -33,7 +33,7 @@ - name: Create temp directory for syncing certs hosts: localhost connection: local - sudo: false + become: no gather_facts: no tasks: - name: Create local temp directory for syncing certs @@ -92,7 +92,7 @@ - name: Delete temporary directory on localhost hosts: localhost connection: local - sudo: false + become: no gather_facts: no tasks: - file: name={{ g_etcd_mktemp.stdout }} state=absent diff --git a/playbooks/common/openshift-etcd/service.yml b/playbooks/common/openshift-etcd/service.yml index 0bf69b22f..fd2bc24ae 100644 --- a/playbooks/common/openshift-etcd/service.yml +++ b/playbooks/common/openshift-etcd/service.yml @@ -1,6 +1,8 @@ --- - name: Populate g_service_masters host group if needed hosts: localhost + connection: local + become: no gather_facts: no tasks: - fail: msg="new_cluster_state is required to be injected in this playbook" diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 196cdc8fe..dd638487a 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -70,7 +70,7 @@ - name: Create temp directory for syncing certs hosts: localhost connection: local - sudo: false + become: no gather_facts: no tasks: - name: Create local temp directory for syncing certs @@ -207,7 +207,7 @@ - name: Compute haproxy_backend_servers hosts: localhost connection: local - sudo: false + become: no gather_facts: no tasks: - set_fact: @@ -245,7 +245,7 @@ msg: "openshift_master_session_auth_secrets and openshift_master_encryption_secrets must be equal length" when: (openshift_master_session_auth_secrets is defined and openshift_master_session_encryption_secrets is defined) and (openshift_master_session_auth_secrets | length != openshift_master_session_encryption_secrets | length) - name: Install OpenSSL package - action: "{{ansible_pkg_mgr}} pkg=openssl state=present" + action: "{{ ansible_pkg_mgr }} name=openssl state=present" - name: Generate session authentication key command: /usr/bin/openssl rand -base64 24 register: session_auth_output @@ -260,6 +260,8 @@ - name: Parse named certificates hosts: localhost + connection: local + become: no vars: internal_hostnames: "{{ hostvars[groups.oo_first_master.0].openshift.common.internal_hostnames }}" named_certificates: "{{ hostvars[groups.oo_first_master.0].openshift_master_named_certificates | default([]) }}" @@ -341,7 +343,8 @@ roles: - role: openshift_master_cluster when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker" - - openshift_examples + - role: openshift_examples + when: openshift.common.install_examples | bool - role: openshift_cluster_metrics when: openshift.common.use_cluster_metrics | bool - role: openshift_manageiq @@ -374,7 +377,7 @@ - name: Delete temporary directory on localhost hosts: localhost connection: local - sudo: false + become: no gather_facts: no tasks: - file: name={{ g_master_mktemp.stdout }} state=absent diff --git a/playbooks/common/openshift-master/service.yml b/playbooks/common/openshift-master/service.yml index 27e1e66f9..f60c5a2b5 100644 --- a/playbooks/common/openshift-master/service.yml +++ b/playbooks/common/openshift-master/service.yml @@ -2,6 +2,8 @@ - name: Populate g_service_masters host group if needed hosts: localhost gather_facts: no + connection: local + become: no tasks: - fail: msg="new_cluster_state is required to be injected in this playbook" when: new_cluster_state is not defined diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 952a9fd51..69ccb0cb8 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -58,7 +58,7 @@ - name: Create temp directory for syncing certs hosts: localhost connection: local - sudo: false + become: no gather_facts: no tasks: - name: Create local temp directory for syncing certs @@ -191,7 +191,7 @@ - name: Delete temporary directory on localhost hosts: localhost connection: local - sudo: false + become: no gather_facts: no tasks: - file: name={{ mktemp.stdout }} state=absent diff --git a/playbooks/common/openshift-node/service.yml b/playbooks/common/openshift-node/service.yml index 5cf83e186..0f07add2a 100644 --- a/playbooks/common/openshift-node/service.yml +++ b/playbooks/common/openshift-node/service.yml @@ -1,6 +1,8 @@ --- - name: Populate g_service_nodes host group if needed hosts: localhost + connection: local + become: no gather_facts: no tasks: - fail: msg="new_cluster_state is required to be injected in this playbook" diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml index 745161bcb..5bf98c2d5 100644 --- a/playbooks/gce/openshift-cluster/config.yml +++ b/playbooks/gce/openshift-cluster/config.yml @@ -4,6 +4,8 @@ - hosts: localhost gather_facts: no + connection: local + become: no vars_files: - vars.yml tasks: @@ -15,10 +17,10 @@ - include: ../../common/openshift-cluster/config.yml vars: - g_etcd_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-etcd' }}" - g_lb_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-lb' }}" - g_masters_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-master' }}" - g_nodes_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-node' }}" + g_etcd_hosts: "{{ (groups['tag_host-type-etcd']|default([])) | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}" + g_lb_hosts: "{{ (groups['tag_host-type-lb']|default([])) | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}" + g_master_hosts: "{{ (groups['tag_host-type-master']|default([])) | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}" + g_node_hosts: "{{ (groups['tag_host-type-node']|default([])) | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}" g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}" g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}" g_nodeonmaster: true diff --git a/playbooks/gce/openshift-cluster/join_node.yml b/playbooks/gce/openshift-cluster/join_node.yml index 5ae3a8fef..ab593b897 100644 --- a/playbooks/gce/openshift-cluster/join_node.yml +++ b/playbooks/gce/openshift-cluster/join_node.yml @@ -1,7 +1,14 @@ --- - name: Populate oo_hosts_to_update group hosts: localhost + connection: local + become: no gather_facts: no + vars: + g_etcd_hosts: "{{ (groups['tag_host-type-etcd']|default([])) | intersect(groups['tag_env-' ~ cluster_id]) }}" + g_lb_hosts: "{{ (groups['tag_host-type-lb']|default([])) | intersect(groups['tag_env-' ~ cluster_id]) }}" + g_master_hosts: "{{ (groups['tag_host-type-master']|default([])) | intersect(groups['tag_env-' ~ cluster_id]) }}" + g_node_hosts: "{{ (groups['tag_host-type-node']|default([])) | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}" vars_files: - vars.yml tasks: @@ -16,6 +23,8 @@ - name: Populate oo_masters_to_config host group hosts: localhost + connection: local + become: no gather_facts: no vars_files: - vars.yml @@ -29,11 +38,11 @@ - name: Evaluate oo_first_master add_host: - name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}" + name: "{{ g_master_hosts | first }}" ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" groups: oo_first_master - when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups" + when: g_master_hosts is defined and g_master_hosts|length > 0 #- include: config.yml - include: ../../common/openshift-node/config.yml diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml index d6ef57c45..562bf8d29 100644 --- a/playbooks/gce/openshift-cluster/launch.yml +++ b/playbooks/gce/openshift-cluster/launch.yml @@ -2,6 +2,7 @@ - name: Launch instance(s) hosts: localhost connection: local + become: no gather_facts: no vars_files: - vars.yml diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml index 53b2b9a5e..b9ff89c79 100644 --- a/playbooks/gce/openshift-cluster/list.yml +++ b/playbooks/gce/openshift-cluster/list.yml @@ -1,6 +1,8 @@ --- - name: Generate oo_list_hosts group hosts: localhost + connection: local + become: no gather_facts: no vars_files: - vars.yml diff --git a/playbooks/gce/openshift-cluster/service.yml b/playbooks/gce/openshift-cluster/service.yml index 2d0f2ab95..337ba7e44 100644 --- a/playbooks/gce/openshift-cluster/service.yml +++ b/playbooks/gce/openshift-cluster/service.yml @@ -1,6 +1,8 @@ --- - name: Call same systemctl command for openshift on all instance(s) hosts: localhost + connection: local + become: no gather_facts: no vars_files: - vars.yml @@ -8,21 +10,19 @@ - fail: msg="cluster_id is required to be injected in this playbook" when: cluster_id is not defined - - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-node - add_host: name: "{{ item }}" groups: g_service_nodes ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated) + with_items: "{{ g_node_hosts | default([]) | difference(['localhost']) | difference(groups.status_terminated) }}" - - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-master - add_host: name: "{{ item }}" groups: g_service_masters ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated) + with_items: "{{ g_master_hosts | default([]) | difference(['localhost']) | difference(groups.status_terminated) }}" - include: ../../common/openshift-node/service.yml - include: ../../common/openshift-master/service.yml diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml index de8a75b18..2360a3263 100644 --- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml @@ -19,7 +19,6 @@ - env-{{ cluster }} - host-type-{{ type }} - sub-host-type-{{ g_sub_host_type }} - - env-host-type-{{ cluster }}-openshift-{{ type }} when: instances |length > 0 register: gce diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml index e20e0a8bc..f4e89983b 100644 --- a/playbooks/gce/openshift-cluster/terminate.yml +++ b/playbooks/gce/openshift-cluster/terminate.yml @@ -2,6 +2,7 @@ - name: Terminate instance(s) hosts: localhost connection: local + become: no gather_facts: no vars_files: - vars.yml @@ -27,6 +28,7 @@ - name: Terminate instances(s) hosts: localhost + become: no connection: local gather_facts: no vars_files: diff --git a/playbooks/gce/openshift-cluster/update.yml b/playbooks/gce/openshift-cluster/update.yml index 8096aa654..d60662397 100644 --- a/playbooks/gce/openshift-cluster/update.yml +++ b/playbooks/gce/openshift-cluster/update.yml @@ -1,7 +1,14 @@ --- - name: Populate oo_hosts_to_update group hosts: localhost + become: no + connection: local gather_facts: no + vars: + g_etcd_hosts: "{{ (groups['tag_host-type-etcd']|default([])) | intersect(groups['tag_env-' ~ cluster_id]) }}" + g_lb_hosts: "{{ (groups['tag_host-type-lb']|default([])) | intersect(groups['tag_env-' ~ cluster_id]) }}" + g_master_hosts: "{{ (groups['tag_host-type-master']|default([])) | intersect(groups['tag_env-' ~ cluster_id]) }}" + g_node_hosts: "{{ (groups['tag_host-type-node']|default([])) | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}" vars_files: - vars.yml tasks: @@ -11,9 +18,7 @@ groups: oo_hosts_to_update ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: (groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([])) - | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([])) - | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-etcd"] | default([])) + with_items: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts) | default([]) }}" - include: ../../common/openshift-cluster/update_repos_and_packages.yml diff --git a/playbooks/gce/openshift-cluster/wip.yml b/playbooks/gce/openshift-cluster/wip.yml index 51a521a6b..0e3757546 100644 --- a/playbooks/gce/openshift-cluster/wip.yml +++ b/playbooks/gce/openshift-cluster/wip.yml @@ -1,6 +1,7 @@ --- - name: WIP hosts: localhost + become: no connection: local gather_facts: no vars_files: @@ -12,7 +13,7 @@ groups: oo_masters_for_deploy ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([]) + with_items: "{{ g_master_hosts | default([]) }}" - name: Deploy OpenShift Services hosts: oo_masters_for_deploy diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml index 4d1ae22ff..b84bde084 100644 --- a/playbooks/libvirt/openshift-cluster/config.yml +++ b/playbooks/libvirt/openshift-cluster/config.yml @@ -5,6 +5,8 @@ - hosts: localhost gather_facts: no + become: no + connection: local vars_files: - vars.yml tasks: @@ -14,10 +16,10 @@ - include: ../../common/openshift-cluster/config.yml vars: - g_etcd_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-etcd' }}" - g_lb_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-lb' }}" - g_masters_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-master' }}" - g_nodes_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-node' }}" + g_etcd_hosts: "{{ (groups['tag_host-type-etcd']|default([])) | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}" + g_lb_hosts: "{{ (groups['tag_host-type-lb']|default([])) | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}" + g_master_hosts: "{{ (groups['tag_host-type-master']|default([])) | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}" + g_node_hosts: "{{ (groups['tag_host-type-node']|default([])) | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}" g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}" g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}" openshift_cluster_id: "{{ cluster_id }}" diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml index 8d7949dd1..3a48c82bc 100644 --- a/playbooks/libvirt/openshift-cluster/launch.yml +++ b/playbooks/libvirt/openshift-cluster/launch.yml @@ -1,6 +1,8 @@ --- - name: Launch instance(s) hosts: localhost + become: no + connection: local gather_facts: no vars_files: - vars.yml @@ -11,6 +13,7 @@ image_url: "{{ deployment_vars[deployment_type].image.url }}" image_sha256: "{{ deployment_vars[deployment_type].image.sha256 }}" image_name: "{{ deployment_vars[deployment_type].image.name }}" + image_compression: "{{ deployment_vars[deployment_type].image.compression }}" tasks: - fail: msg="Deployment type not supported for libvirt provider yet" when: deployment_type == 'online' diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml index 5954bb01e..d89e699f2 100644 --- a/playbooks/libvirt/openshift-cluster/list.yml +++ b/playbooks/libvirt/openshift-cluster/list.yml @@ -1,6 +1,8 @@ --- - name: Generate oo_list_hosts group hosts: localhost + become: no + connection: local gather_facts: no vars_files: - vars.yml @@ -21,6 +23,8 @@ - name: List Hosts hosts: localhost + become: no + connection: local gather_facts: no vars_files: - vars.yml diff --git a/playbooks/libvirt/openshift-cluster/service.yml b/playbooks/libvirt/openshift-cluster/service.yml index ae095f5a2..cd07c8701 100644 --- a/playbooks/libvirt/openshift-cluster/service.yml +++ b/playbooks/libvirt/openshift-cluster/service.yml @@ -5,6 +5,8 @@ - name: Call same systemctl command for openshift on all instance(s) hosts: localhost + become: no + connection: local gather_facts: no vars_files: - vars.yml @@ -18,7 +20,7 @@ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" groups: g_service_masters - with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([]) + with_items: "{{ g_master_hosts | default([]) }}" - name: Evaluate g_service_nodes add_host: @@ -26,7 +28,7 @@ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" groups: g_service_nodes - with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([]) + with_items: "{{ g_node_hosts | default([]) }}" - include: ../../common/openshift-node/service.yml - include: ../../common/openshift-master/service.yml diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml index 4825207c9..ae8275ef6 100644 --- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml @@ -13,8 +13,15 @@ get_url: url: '{{ image_url }}' sha256sum: '{{ image_sha256 }}' - dest: '{{ os_libvirt_storage_pool_path }}/{{ image_name }}' + dest: '{{ os_libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}' when: '{{ ( lookup("oo_option", "skip_image_download") | default("no", True) | lower ) in ["false", "no"] }}' + register: downloaded_image + +- name: Uncompress Base Cloud image + command: 'unxz -kf {{ os_libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}' + args: + creates: '{{ os_libvirt_storage_pool_path }}/{{ image_name }}' + when: image_compression in ["xz"] and downloaded_image.changed - name: Create the cloud-init config drive path file: @@ -81,7 +88,7 @@ ansible_ssh_host: '{{ item.1 }}' ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_env-host-type-{{ cluster }}-openshift-{{ type }}, tag_sub-host-type-{{ g_sub_host_type }}' + groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_sub-host-type-{{ g_sub_host_type }}' with_together: - instances - ips diff --git a/playbooks/libvirt/openshift-cluster/templates/domain.xml b/playbooks/libvirt/openshift-cluster/templates/domain.xml index 870bcf2a6..c4ac6a434 100644 --- a/playbooks/libvirt/openshift-cluster/templates/domain.xml +++ b/playbooks/libvirt/openshift-cluster/templates/domain.xml @@ -4,7 +4,6 @@ <metadata xmlns:ansible="https://github.com/ansible/ansible"> <ansible:tags> <ansible:tag>env-{{ cluster }}</ansible:tag> - <ansible:tag>env-host-type-{{ cluster }}-openshift-{{ type }}</ansible:tag> <ansible:tag>host-type-{{ type }}</ansible:tag> <ansible:tag>sub-host-type-{{ g_sub_host_type }}</ansible:tag> </ansible:tags> diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml index 8f00812a9..a6b963608 100644 --- a/playbooks/libvirt/openshift-cluster/terminate.yml +++ b/playbooks/libvirt/openshift-cluster/terminate.yml @@ -3,6 +3,8 @@ - name: Terminate instance(s) hosts: localhost + become: no + connection: local gather_facts: no vars_files: - vars.yml @@ -28,6 +30,8 @@ - name: Terminate instance(s) hosts: localhost + become: no + connection: local gather_facts: no vars_files: - vars.yml diff --git a/playbooks/libvirt/openshift-cluster/update.yml b/playbooks/libvirt/openshift-cluster/update.yml index d09832c16..5e2bd3a3d 100644 --- a/playbooks/libvirt/openshift-cluster/update.yml +++ b/playbooks/libvirt/openshift-cluster/update.yml @@ -1,7 +1,15 @@ --- - name: Populate oo_hosts_to_update group hosts: localhost + become: no + connection: local gather_facts: no + vars: + g_etcd_hosts: "{{ (groups['tag_host-type-etcd']|default([])) | intersect(groups['tag_env-' ~ cluster_id]) }}" + g_lb_hosts: "{{ (groups['tag_host-type-lb']|default([])) | intersect(groups['tag_env-' ~ cluster_id]) }}" + g_master_hosts: "{{ (groups['tag_host-type-master']|default([])) | intersect(groups['tag_env-' ~ cluster_id]) }}" + g_node_hosts: "{{ (groups['tag_host-type-node']|default([])) | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}" + vars_files: - vars.yml tasks: @@ -11,9 +19,7 @@ groups: oo_hosts_to_update ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: (groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([])) - | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([])) - | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-etcd"] | default([])) + with_items: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts) | default([]) }}" - include: ../../common/openshift-cluster/update_repos_and_packages.yml diff --git a/playbooks/libvirt/openshift-cluster/vars.yml b/playbooks/libvirt/openshift-cluster/vars.yml index c77a0797e..67cfbcdb8 100644 --- a/playbooks/libvirt/openshift-cluster/vars.yml +++ b/playbooks/libvirt/openshift-cluster/vars.yml @@ -8,11 +8,13 @@ deployment_vars: origin: image: url: "{{ lookup('oo_option', 'image_url') | - default('http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2', True) }}" + default('http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2.xz', True) }}" + compression: "{{ lookup('oo_option', 'image_compression') | + default('xz', True) }}" name: "{{ lookup('oo_option', 'image_name') | default('CentOS-7-x86_64-GenericCloud.qcow2', True) }}" sha256: "{{ lookup('oo_option', 'image_sha256') | - default('e324e3ab1d24a1bbf035ddb365e7f9058c0b454acf48d7aa15c5519fae5998ab', True) }}" + default('9461006300d65172f5668d8875f2aad7b54f7ba4e9c5435d65a84a5a2d66e39b', True) }}" ssh_user: openshift sudo: yes online: diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml index 888804e28..da7b5cc49 100644 --- a/playbooks/openstack/openshift-cluster/config.yml +++ b/playbooks/openstack/openshift-cluster/config.yml @@ -1,5 +1,7 @@ - hosts: localhost gather_facts: no + become: no + connection: local vars_files: - vars.yml tasks: @@ -9,10 +11,10 @@ - include: ../../common/openshift-cluster/config.yml vars: - g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}" - g_lb_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-lb' }}" - g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}" - g_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}" + g_etcd_hosts: "{{ (groups['tag_host-type_etcd']|default([])) | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}" + g_lb_hosts: "{{ (groups['tag_host-type_lb']|default([])) | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}" + g_master_hosts: "{{ (groups['tag_host-type_master']|default([])) | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}" + g_node_hosts: "{{ (groups['tag_host-type_node']|default([])) | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}" g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}" g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}" openshift_cluster_id: "{{ cluster_id }}" diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml index 9dcab3e60..3f24a3e45 100644 --- a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml +++ b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml @@ -107,12 +107,6 @@ resources: metadata: env: { get_param: cluster_id } host-type: { get_param: type } - env-host-type: - str_replace: - template: cluster_id-openshift-type - params: - cluster_id: { get_param: cluster_id } - type: { get_param: type } sub-host-type: { get_param: subtype } port: diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml index b18512495..876ca595a 100644 --- a/playbooks/openstack/openshift-cluster/launch.yml +++ b/playbooks/openstack/openshift-cluster/launch.yml @@ -1,6 +1,7 @@ --- - name: Launch instance(s) hosts: localhost + become: no connection: local gather_facts: no vars_files: @@ -70,7 +71,7 @@ ansible_ssh_host: '{{ item[2] }}' ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - groups: 'tag_env_{{ cluster_id }}, tag_host-type_etcd, tag_env-host-type_{{ cluster_id }}-openshift-etcd, tag_sub-host-type_default' + groups: 'tag_env_{{ cluster_id }}, tag_host-type_etcd, tag_sub-host-type_default' with_together: - parsed_outputs.etcd_names - parsed_outputs.etcd_ips @@ -82,7 +83,7 @@ ansible_ssh_host: '{{ item[2] }}' ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - groups: 'tag_env_{{ cluster_id }}, tag_host-type_master, tag_env-host-type_{{ cluster_id }}-openshift-master, tag_sub-host-type_default' + groups: 'tag_env_{{ cluster_id }}, tag_host-type_master, tag_sub-host-type_default' with_together: - parsed_outputs.master_names - parsed_outputs.master_ips @@ -94,7 +95,7 @@ ansible_ssh_host: '{{ item[2] }}' ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - groups: 'tag_env_{{ cluster_id }}, tag_host-type_node, tag_env-host-type_{{ cluster_id }}-openshift-node, tag_sub-host-type_compute' + groups: 'tag_env_{{ cluster_id }}, tag_host-type_node, tag_sub-host-type_compute' with_together: - parsed_outputs.node_names - parsed_outputs.node_ips @@ -106,7 +107,7 @@ ansible_ssh_host: '{{ item[2] }}' ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - groups: 'tag_env_{{ cluster_id }}, tag_host-type_node, tag_env-host-type_{{ cluster_id }}-openshift-node, tag_sub-host-type_infra' + groups: 'tag_env_{{ cluster_id }}, tag_host-type_node, tag_sub-host-type_infra' with_together: - parsed_outputs.infra_names - parsed_outputs.infra_ips diff --git a/playbooks/openstack/openshift-cluster/list.yml b/playbooks/openstack/openshift-cluster/list.yml index fa194b072..436d3e6f7 100644 --- a/playbooks/openstack/openshift-cluster/list.yml +++ b/playbooks/openstack/openshift-cluster/list.yml @@ -1,6 +1,8 @@ --- - name: Generate oo_list_hosts group hosts: localhost + become: no + connection: local gather_facts: no vars_files: - vars.yml @@ -22,6 +24,8 @@ - name: List Hosts hosts: localhost + become: no + connection: local gather_facts: no vars_files: - vars.yml diff --git a/playbooks/openstack/openshift-cluster/terminate.yml b/playbooks/openstack/openshift-cluster/terminate.yml index 62df2be73..2a7f04505 100644 --- a/playbooks/openstack/openshift-cluster/terminate.yml +++ b/playbooks/openstack/openshift-cluster/terminate.yml @@ -1,5 +1,6 @@ - name: Terminate instance(s) hosts: localhost + become: no connection: local gather_facts: no vars_files: @@ -25,6 +26,7 @@ default('no', True) | lower in ['no', 'false'] - hosts: localhost + become: no connection: local gather_facts: no vars_files: diff --git a/playbooks/openstack/openshift-cluster/update.yml b/playbooks/openstack/openshift-cluster/update.yml index e006aa74a..4ecf75a5d 100644 --- a/playbooks/openstack/openshift-cluster/update.yml +++ b/playbooks/openstack/openshift-cluster/update.yml @@ -1,7 +1,15 @@ --- - name: Populate oo_hosts_to_update group hosts: localhost + become: no + connection: local gather_facts: no + vars: + g_etcd_hosts: "{{ (groups['tag_host-type_etcd']|default([])) | intersect(groups['tag_env_' ~ cluster_id]) }}" + g_lb_hosts: "{{ (groups['tag_host-type_lb']|default([])) | intersect(groups['tag_env_' ~ cluster_id]) }}" + g_master_hosts: "{{ (groups['tag_host-type_master']|default([])) | intersect(groups['tag_env_' ~ cluster_id]) }}" + g_node_hosts: "{{ (groups['tag_host-type_node']|default([])) | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}" + vars_files: - vars.yml tasks: @@ -11,9 +19,7 @@ groups: oo_hosts_to_update ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: (groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([])) - | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([])) - | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-etcd"] | default([])) + with_items: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts) | default([]) }}" - include: ../../common/openshift-cluster/update_repos_and_packages.yml diff --git a/roles/ansible/tasks/main.yml b/roles/ansible/tasks/main.yml index f79273824..2a6ac7713 100644 --- a/roles/ansible/tasks/main.yml +++ b/roles/ansible/tasks/main.yml @@ -2,16 +2,7 @@ # Install ansible client - name: Install Ansible - yum: - pkg: ansible - state: installed - when: ansible_pkg_mgr == "yum" - -- name: Install Ansible - dnf: - pkg: ansible - state: installed - when: ansible_pkg_mgr == "dnf" + action: "{{ ansible_pkg_mgr }} name=ansible state=present" - include: config.yml vars: diff --git a/roles/ansible_tower/tasks/main.yaml b/roles/ansible_tower/tasks/main.yaml index b7757214d..36fc9b282 100644 --- a/roles/ansible_tower/tasks/main.yaml +++ b/roles/ansible_tower/tasks/main.yaml @@ -1,6 +1,6 @@ --- - name: install some useful packages - yum: name={{ item }} + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" with_items: - git - python-pip diff --git a/roles/ansible_tower_cli/tasks/main.yml b/roles/ansible_tower_cli/tasks/main.yml index 41fac22a0..0c5163b50 100644 --- a/roles/ansible_tower_cli/tasks/main.yml +++ b/roles/ansible_tower_cli/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Install python-ansible-tower-cli - yum: name=python-ansible-tower-cli + action: "{{ ansible_pkg_mgr }} name=python-ansible-tower-cli state=present" - template: src: tower_cli.cfg.j2 diff --git a/roles/cockpit/tasks/main.yml b/roles/cockpit/tasks/main.yml index 8410e7c90..b90e7dfd6 100644 --- a/roles/cockpit/tasks/main.yml +++ b/roles/cockpit/tasks/main.yml @@ -1,25 +1,11 @@ --- - name: Install cockpit-ws - yum: - name: "{{ item }}" - state: present + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" with_items: - cockpit-ws - cockpit-shell - cockpit-bridge - "{{ cockpit_plugins }}" - when: ansible_pkg_mgr == "yum" - -- name: Install cockpit-ws - dnf: - name: "{{ item }}" - state: present - with_items: - - cockpit-ws - - cockpit-shell - - cockpit-bridge - - "{{ cockpit_plugins }}" - when: ansible_pkg_mgr == "dnf" - name: Enable cockpit-ws service: diff --git a/roles/copr_cli/tasks/main.yml b/roles/copr_cli/tasks/main.yml index f8496199d..4bfd551d3 100644 --- a/roles/copr_cli/tasks/main.yml +++ b/roles/copr_cli/tasks/main.yml @@ -1,10 +1,2 @@ --- -- yum: - name: copr-cli - state: present - when: ansible_pkg_mgr == "yum" - -- dnf: - name: copr-cli - state: present - when: ansible_pkg_mgr == "dnf" +- action: "{{ ansible_pkg_mgr }} name=copr-cli state=present" diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 857674454..e94ebe3e1 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -1,13 +1,8 @@ --- # tasks file for docker - name: Install docker - yum: pkg=docker - when: ansible_pkg_mgr == "yum" - -- name: Install docker - dnf: pkg=docker - when: ansible_pkg_mgr == "dnf" - + action: "{{ ansible_pkg_mgr }} name=docker state=present" + - name: enable and start the docker service service: name=docker enabled=yes state=started diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index efaab5f31..61892fe06 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -8,12 +8,7 @@ when: "'ipv4' not in hostvars[inventory_hostname]['ansible_' ~ etcd_interface] or 'address' not in hostvars[inventory_hostname]['ansible_' ~ etcd_interface].ipv4" - name: Install etcd - yum: pkg=etcd-2.* state=present - when: ansible_pkg_mgr == "yum" - -- name: Install etcd - dnf: pkg=etcd* state=present - when: ansible_pkg_mgr == "dnf" + action: "{{ ansible_pkg_mgr }} name=etcd-2.* state=present" - name: Validate permissions on the config dir file: diff --git a/roles/flannel/tasks/main.yml b/roles/flannel/tasks/main.yml index 86e1bc96e..1e86176ea 100644 --- a/roles/flannel/tasks/main.yml +++ b/roles/flannel/tasks/main.yml @@ -1,13 +1,7 @@ --- - name: Install flannel sudo: true - yum: pkg=flannel state=present - when: ansible_pkg_mgr == "yum" - -- name: Install flannel - sudo: true - dnf: pkg=flannel state=present - when: ansible_pkg_mgr == "dnf" + action: "{{ ansible_pkg_mgr }} name=flannel state=present" - name: Set flannel etcd url sudo: true diff --git a/roles/fluentd_master/tasks/main.yml b/roles/fluentd_master/tasks/main.yml index 43c499b4d..65c67fe8d 100644 --- a/roles/fluentd_master/tasks/main.yml +++ b/roles/fluentd_master/tasks/main.yml @@ -1,16 +1,7 @@ --- # TODO: Update fluentd install and configuration when packaging is complete - name: download and install td-agent - yum: - name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm' - state: present - when: ansible_pkg_mgr == "yum" - -- name: download and install td-agent - dnf: - name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm' - state: present - when: ansible_pkg_mgr == "dnf" + action: "{{ ansible_pkg_mgr }} name='http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm' state=present" - name: Verify fluentd plugin installed command: '/opt/td-agent/embedded/bin/gem query -i fluent-plugin-kubernetes' diff --git a/roles/fluentd_node/tasks/main.yml b/roles/fluentd_node/tasks/main.yml index 827a1c075..85488b55e 100644 --- a/roles/fluentd_node/tasks/main.yml +++ b/roles/fluentd_node/tasks/main.yml @@ -1,16 +1,7 @@ --- # TODO: Update fluentd install and configuration when packaging is complete - name: download and install td-agent - yum: - name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm' - state: present - when: ansible_pkg_mgr == "yum" - -- name: download and install td-agent - dnf: - name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm' - state: present - when: ansible_pkg_mgr == "dnf" + action: "{{ ansible_pkg_mgr }} name='http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm' state=present" - name: Verify fluentd plugin installed command: '/opt/td-agent/embedded/bin/gem query -i fluent-plugin-kubernetes' diff --git a/roles/haproxy/tasks/main.yml b/roles/haproxy/tasks/main.yml index 5d015fadd..106ab8489 100644 --- a/roles/haproxy/tasks/main.yml +++ b/roles/haproxy/tasks/main.yml @@ -1,15 +1,6 @@ --- - name: Install haproxy - yum: - pkg: haproxy - state: present - when: ansible_pkg_mgr == "yum" - -- name: Install haproxy - dnf: - pkg: haproxy - state: present - when: ansible_pkg_mgr == "dnf" + action: "{{ ansible_pkg_mgr }} name=haproxy state=present" - name: Configure haproxy template: diff --git a/roles/kube_nfs_volumes/tasks/main.yml b/roles/kube_nfs_volumes/tasks/main.yml index 3fcb9fd18..2cc35844c 100644 --- a/roles/kube_nfs_volumes/tasks/main.yml +++ b/roles/kube_nfs_volumes/tasks/main.yml @@ -1,11 +1,6 @@ --- - name: Install pyparted (RedHat/Fedora) - yum: name=pyparted,python-httplib2 state=present - when: ansible_pkg_mgr == "yum" - -- name: Install pyparted (RedHat/Fedora) - dnf: name=pyparted,python-httplib2 state=present - when: ansible_pkg_mgr == "dnf" + action: "{{ ansible_pkg_mgr }} name=pyparted,python-httplib2 state=present" - name: partition the drives partitionpool: disks={{ disks }} force={{ force }} sizes={{ sizes }} diff --git a/roles/kube_nfs_volumes/tasks/nfs.yml b/roles/kube_nfs_volumes/tasks/nfs.yml index a58a7b824..eaec28544 100644 --- a/roles/kube_nfs_volumes/tasks/nfs.yml +++ b/roles/kube_nfs_volumes/tasks/nfs.yml @@ -1,11 +1,6 @@ --- -- name: Install NFS server on Fedora/Red Hat - yum: name=nfs-utils state=present - when: ansible_pkg_mgr == "yum" - -- name: Install NFS server on Fedora/Red Hat - dnf: name=nfs-utils state=present - when: ansible_pkg_mgr == "dnf" +- name: Install NFS server + action: "{{ ansible_pkg_mgr }} name=nfs-utils state=present" - name: Start rpcbind on Fedora/Red Hat service: name=rpcbind state=started enabled=yes diff --git a/roles/lib_zabbix/library/zbx_action.py b/roles/lib_zabbix/library/zbx_action.py index 8bb586c0b..c08bef4f7 100644 --- a/roles/lib_zabbix/library/zbx_action.py +++ b/roles/lib_zabbix/library/zbx_action.py @@ -30,6 +30,17 @@ # pylint: disable=import-error from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection, ZabbixAPIError +CUSTOM_SCRIPT_ACTION = '0' +IPMI_ACTION = '1' +SSH_ACTION = '2' +TELNET_ACTION = '3' +GLOBAL_SCRIPT_ACTION = '4' + +EXECUTE_ON_ZABBIX_AGENT = '0' +EXECUTE_ON_ZABBIX_SERVER = '1' + +OPERATION_REMOTE_COMMAND = '1' + def exists(content, key='result'): ''' Check if key exists in content or the size of content[key] > 0 ''' @@ -70,6 +81,40 @@ def filter_differences(zabbix_filters, user_filters): return rval +def host_in_zabbix(zab_hosts, usr_host): + ''' Check whether a particular user host is already in the + Zabbix list of hosts ''' + + for usr_hst_key, usr_hst_val in usr_host.items(): + for zab_host in zab_hosts: + if usr_hst_key in zab_host and \ + zab_host[usr_hst_key] == str(usr_hst_val): + return True + + return False + +def hostlist_in_zabbix(zab_hosts, usr_hosts): + ''' Check whether user-provided list of hosts are already in + the Zabbix action ''' + + if len(zab_hosts) != len(usr_hosts): + return False + + for usr_host in usr_hosts: + if not host_in_zabbix(zab_hosts, usr_host): + return False + + return True + +def opcommand_diff(zab_op_cmd, usr_op_cmd): + ''' Check whether user-provided opcommand matches what's already + stored in Zabbix ''' + + for usr_op_cmd_key, usr_op_cmd_val in usr_op_cmd.items(): + if zab_op_cmd[usr_op_cmd_key] != str(usr_op_cmd_val): + return True + return False + # This logic is quite complex. We are comparing two lists of dictionaries. # The outer for-loops allow us to descend down into both lists at the same time # and then walk over the key,val pairs of the incoming user dict's changes @@ -116,6 +161,18 @@ def operation_differences(zabbix_ops, user_ops): if usr_ids != zab_usr_ids: rval[key] = val + elif key == 'opcommand': + if opcommand_diff(zab[key], val): + rval[key] = val + break + + # opcommand_grp can be treated just like opcommand_hst + # as opcommand_grp[] is just a list of groups + elif key == 'opcommand_hst' or key == 'opcommand_grp': + if not hostlist_in_zabbix(zab[key], val): + rval[key] = val + break + elif zab[key] != str(val): rval[key] = val return rval @@ -288,7 +345,7 @@ def get_condition_type(event_source, inc_condition): def get_operation_type(inc_operation): ''' determine the correct operation type''' o_types = {'send message': 0, - 'remote command': 1, + 'remote command': OPERATION_REMOTE_COMMAND, 'add host': 2, 'remove host': 3, 'add to host group': 4, @@ -301,7 +358,64 @@ def get_operation_type(inc_operation): return o_types[inc_operation] -def get_action_operations(zapi, inc_operations): +def get_opcommand_type(opcommand_type): + ''' determine the opcommand type ''' + oc_types = {'custom script': CUSTOM_SCRIPT_ACTION, + 'IPMI': IPMI_ACTION, + 'SSH': SSH_ACTION, + 'Telnet': TELNET_ACTION, + 'global script': GLOBAL_SCRIPT_ACTION, + } + + return oc_types[opcommand_type] + +def get_execute_on(execute_on): + ''' determine the execution target ''' + e_types = {'zabbix agent': EXECUTE_ON_ZABBIX_AGENT, + 'zabbix server': EXECUTE_ON_ZABBIX_SERVER, + } + + return e_types[execute_on] + +def action_remote_command(ansible_module, zapi, operation): + ''' Process remote command type of actions ''' + + if 'type' not in operation['opcommand']: + ansible_module.exit_json(failed=True, changed=False, state='unknown', + results="No Operation Type provided") + + operation['opcommand']['type'] = get_opcommand_type(operation['opcommand']['type']) + + if operation['opcommand']['type'] == CUSTOM_SCRIPT_ACTION: + + if 'execute_on' in operation['opcommand']: + operation['opcommand']['execute_on'] = get_execute_on(operation['opcommand']['execute_on']) + + # custom script still requires the target hosts/groups to be set + operation['opcommand_hst'] = [] + operation['opcommand_grp'] = [] + for usr_host in operation['target_hosts']: + if usr_host['target_type'] == 'zabbix server': + # 0 = target host local/current host + operation['opcommand_hst'].append({'hostid': 0}) + elif usr_host['target_type'] == 'group': + group_name = usr_host['target'] + gid = get_host_group_id_by_name(zapi, group_name) + operation['opcommand_grp'].append({'groupid': gid}) + elif usr_host['target_type'] == 'host': + host_name = usr_host['target'] + hid = get_host_id_by_name(zapi, host_name) + operation['opcommand_hst'].append({'hostid': hid}) + + # 'target_hosts' is just to make it easier to build zbx_actions + # not part of ZabbixAPI + del operation['target_hosts'] + else: + ansible_module.exit_json(failed=True, changed=False, state='unknown', + results="Unsupported remote command type") + + +def get_action_operations(ansible_module, zapi, inc_operations): '''Convert the operations into syntax for api''' for operation in inc_operations: operation['operationtype'] = get_operation_type(operation['operationtype']) @@ -315,9 +429,8 @@ def get_action_operations(zapi, inc_operations): else: operation['opmessage']['default_msg'] = 0 - # NOT supported for remote commands - elif operation['operationtype'] == 1: - continue + elif operation['operationtype'] == OPERATION_REMOTE_COMMAND: + action_remote_command(ansible_module, zapi, operation) # Handle Operation conditions: # Currently there is only 1 available which @@ -464,7 +577,8 @@ def main(): if state == 'present': conditions = get_action_conditions(zapi, module.params['event_source'], module.params['conditions_filter']) - operations = get_action_operations(zapi, module.params['operations']) + operations = get_action_operations(module, zapi, + module.params['operations']) params = {'name': module.params['name'], 'esc_period': module.params['escalation_time'], 'eventsource': get_event_source(module.params['event_source']), diff --git a/roles/nickhammond.logrotate/tasks/main.yml b/roles/nickhammond.logrotate/tasks/main.yml index fda23e05e..0a0cf1fae 100644 --- a/roles/nickhammond.logrotate/tasks/main.yml +++ b/roles/nickhammond.logrotate/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: nickhammond.logrotate | Install logrotate - action: "{{ansible_pkg_mgr}} pkg=logrotate state=present" + action: "{{ ansible_pkg_mgr }} name=logrotate state=present" - name: nickhammond.logrotate | Setup logrotate.d scripts template: diff --git a/roles/openshift_ansible_inventory/tasks/main.yml b/roles/openshift_ansible_inventory/tasks/main.yml index 2b99f8bcd..4a0968686 100644 --- a/roles/openshift_ansible_inventory/tasks/main.yml +++ b/roles/openshift_ansible_inventory/tasks/main.yml @@ -1,17 +1,5 @@ --- -- yum: - name: "{{ item }}" - state: present - when: ansible_pkg_mgr == "yum" - with_items: - - openshift-ansible-inventory - - openshift-ansible-inventory-aws - - openshift-ansible-inventory-gce - -- dnf: - name: "{{ item }}" - state: present - when: ansible_pkg_mgr == "dnf" +- action: "{{ ansible_pkg_mgr }} name={{ item}} state=present" with_items: - openshift-ansible-inventory - openshift-ansible-inventory-aws diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml index c0982290d..c34f42838 100644 --- a/roles/openshift_common/tasks/main.yml +++ b/roles/openshift_common/tasks/main.yml @@ -14,6 +14,7 @@ cluster_id: "{{ openshift_cluster_id | default('default') }}" debug_level: "{{ openshift_debug_level | default(2) }}" hostname: "{{ openshift_hostname | default(None) }}" + install_examples: "{{ openshift_install_examples | default(True) }}" ip: "{{ openshift_ip | default(None) }}" public_hostname: "{{ openshift_public_hostname | default(None) }}" public_ip: "{{ openshift_public_ip | default(None) }}" @@ -24,6 +25,12 @@ use_flannel: "{{ openshift_use_flannel | default(None) }}" use_manageiq: "{{ openshift_use_manageiq | default(None) }}" +- name: Install the base package for versioning + action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}{{ openshift_version | default('') }} state=present" + +- name: Set version facts + openshift_facts: + # For enterprise versions < 3.1 and origin versions < 1.1 we want to set the # hostname by default. - set_fact: diff --git a/roles/openshift_examples/README.md b/roles/openshift_examples/README.md index 7d8735a0a..6ddbe7017 100644 --- a/roles/openshift_examples/README.md +++ b/roles/openshift_examples/README.md @@ -11,6 +11,13 @@ ansible. Requirements ------------ +Facts +----- + +| Name | Default Value | Description | +-----------------------------|---------------|----------------------------------------| +| openshift_install_examples | true | Runs the role with the below variables | + Role Variables -------------- @@ -32,7 +39,7 @@ Example Playbook TODO ---- Currently we use `oc create -f` against various files and we accept non zero return code as a success -if (and only iff) stderr also contains the string 'already exists'. This means that if one object in the file exists already +if (and only if) stderr also contains the string 'already exists'. This means that if one object in the file exists already but others fail to create you won't be aware of the failure. This also means that we do not currently support updating existing objects. diff --git a/roles/openshift_expand_partition/tasks/main.yml b/roles/openshift_expand_partition/tasks/main.yml index 42e7903fd..84d859553 100644 --- a/roles/openshift_expand_partition/tasks/main.yml +++ b/roles/openshift_expand_partition/tasks/main.yml @@ -1,11 +1,6 @@ --- - name: Ensure growpart is installed - yum: pkg=cloud-utils-growpart state=present - when: ansible_pkg_mgr == "yum" - -- name: Ensure growpart is installed - dnf: pkg=cloud-utils-growpart state=present - when: ansible_pkg_mgr == "dnf" + action: "{{ ansible_pkg_mgr }} name=cloud-utils-growpart state=present" - name: Grow the partitions command: "growpart {{oep_drive}} {{oep_partition}}" diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 8b3402729..e557853b1 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -1057,6 +1057,7 @@ class OpenShiftFacts(object): common['client_binary'] = 'oc' if os.path.isfile('/usr/bin/oc') else 'osc' common['admin_binary'] = 'oadm' if os.path.isfile('/usr/bin/oadm') else 'osadm' common['dns_domain'] = 'cluster.local' + common['install_examples'] = True defaults['common'] = common if 'master' in roles: diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml index 2e889d7d5..832f7ad84 100644 --- a/roles/openshift_facts/tasks/main.yml +++ b/roles/openshift_facts/tasks/main.yml @@ -7,16 +7,7 @@ - ansible_version | version_compare('1.9.0.1', 'ne') - name: Ensure PyYaml is installed - yum: pkg={{ item }} state=installed - when: ansible_pkg_mgr == "yum" - with_items: - - PyYAML - -- name: Ensure PyYaml is installed - dnf: pkg={{ item }} state=installed - when: ansible_pkg_mgr == "dnf" - with_items: - - PyYAML + action: "{{ ansible_pkg_mgr }} name=PyYAML state=present" - name: Gather Cluster facts openshift_facts: diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 011b5dedd..8995863ec 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -78,14 +78,7 @@ controller_lease_ttl: "{{ osm_controller_lease_ttl | default(None) }}" - name: Install Master package - yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=present - when: ansible_pkg_mgr == "yum" - register: install_result - -- name: Install Master package - dnf: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=present - when: ansible_pkg_mgr == "dnf" - register: install_result + action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-master{{ openshift_version }} state=present" - name: Re-gather package dependent master facts openshift_facts: @@ -117,13 +110,8 @@ - restart master controllers - name: Install httpd-tools if needed - yum: pkg=httpd-tools state=present - when: (ansible_pkg_mgr == "yum") and (item.kind == 'HTPasswdPasswordIdentityProvider') - with_items: openshift.master.identity_providers - -- name: Install httpd-tools if needed - dnf: pkg=httpd-tools state=present - when: (ansible_pkg_mgr == "dnf") and (item.kind == 'HTPasswdPasswordIdentityProvider') + action: "{{ ansible_pkg_mgr }} name=httpd-tools state=present" + when: (item.kind == 'HTPasswdPasswordIdentityProvider') with_items: openshift.master.identity_providers - name: Ensure htpasswd directory exists @@ -147,13 +135,11 @@ template: src: atomic-openshift-master-api.service.j2 dest: /usr/lib/systemd/system/{{ openshift.common.service_type }}-master-api.service - force: no when: openshift_master_ha | bool and openshift_master_cluster_method == "native" - name: Create the controllers service file template: src: atomic-openshift-master-controllers.service.j2 dest: /usr/lib/systemd/system/{{ openshift.common.service_type }}-master-controllers.service - force: no when: openshift_master_ha | bool and openshift_master_cluster_method == "native" - name: Create the api env file template: @@ -254,20 +240,17 @@ master_api_service_status_changed = start_result | changed when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' -# TODO: fix the ugly workaround of setting ignore_errors -# the controllers service tries to start even if it is already started - name: Start and enable master controller service: name={{ openshift.common.service_type }}-master-controllers enabled=yes state=started when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' register: start_result - ignore_errors: yes - set_fact: master_controllers_service_status_changed = start_result | changed when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' - name: Install cluster packages - action: "{{ansible_pkg_mgr}} pkg=pcs state=present" + action: "{{ ansible_pkg_mgr }} name=pcs state=present" when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker' register: install_result diff --git a/roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2 index 8952c86ef..ef0b57ef4 100644 --- a/roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2 +++ b/roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2 @@ -7,7 +7,7 @@ Before={{ openshift.common.service_type }}-node.service Requires=network.target [Service] -Type=notify +Type=simple EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers Environment=GOTRACEBACK=crash ExecStart=/usr/bin/openshift start master controllers --config=${CONFIG_FILE} $OPTIONS diff --git a/roles/openshift_master_ca/tasks/main.yml b/roles/openshift_master_ca/tasks/main.yml index caac13be3..0db95a4eb 100644 --- a/roles/openshift_master_ca/tasks/main.yml +++ b/roles/openshift_master_ca/tasks/main.yml @@ -1,13 +1,6 @@ --- - name: Install the base package for admin tooling - yum: pkg={{ openshift.common.service_type }}{{ openshift_version }} state=present - when: ansible_pkg_mgr == "yum" - register: install_result - -- name: Install the base package for admin tooling - dnf: pkg={{ openshift.common.service_type }}{{ openshift_version }} state=present - when: ansible_pkg_mgr == "dnf" - register: install_result + action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}{{ openshift_version }} state=present" - name: Reload generated facts openshift_facts: diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 110556b4a..eef7bec9a 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -37,24 +37,11 @@ # We have to add tuned-profiles in the same transaction otherwise we run into depsolving # problems because the rpms don't pin the version properly. - name: Install Node package - yum: pkg={{ openshift.common.service_type }}-node{{ openshift_version }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_version }} state=present - when: ansible_pkg_mgr == "yum" - register: node_install_result - -- name: Install Node package - dnf: pkg={{ openshift.common.service_type }}-node{{ openshift_version }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_version }} state=present - when: ansible_pkg_mgr == "dnf" - register: node_install_result - -- name: Install sdn-ovs package - yum: pkg={{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }} state=present - register: sdn_install_result - when: ansible_pkg_mgr == "yum" and openshift.common.use_openshift_sdn + action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-node{{ openshift_version }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_version }} state=present" - name: Install sdn-ovs package - dnf: pkg={{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }} state=present - register: sdn_install_result - when: ansible_pkg_mgr == "dnf" and openshift.common.use_openshift_sdn + action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }} state=present" + when: openshift.common.use_openshift_sdn # TODO: add the validate parameter when there is a validation command to run - name: Create the Node config diff --git a/roles/openshift_node/tasks/storage_plugins/ceph.yml b/roles/openshift_node/tasks/storage_plugins/ceph.yml index b5146dcac..10d0990a0 100644 --- a/roles/openshift_node/tasks/storage_plugins/ceph.yml +++ b/roles/openshift_node/tasks/storage_plugins/ceph.yml @@ -1,12 +1,3 @@ --- - name: Install Ceph storage plugin dependencies - yum: - pkg: ceph-common - state: installed - when: ansible_pkg_mgr == "yum" - -- name: Install Ceph storage plugin dependencies - dnf: - pkg: ceph-common - state: installed - when: ansible_pkg_mgr == "dnf" + action: "{{ ansible_pkg_mgr }} name=ceph-common state=present"
\ No newline at end of file diff --git a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml index a357023e1..1080646ee 100644 --- a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml +++ b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml @@ -1,15 +1,6 @@ --- - name: Install GlusterFS storage plugin dependencies - yum: - pkg: glusterfs-fuse - state: installed - when: ansible_pkg_mgr == "yum" - -- name: Install GlusterFS storage plugin dependencies - dnf: - pkg: glusterfs-fuse - state: installed - when: ansible_pkg_mgr == "dnf" + action: "{{ ansible_pkg_mgr }} name=glusterfs-fuse state=present" - name: Set sebooleans to allow gluster storage plugin access from containers seboolean: diff --git a/roles/openshift_repos/handlers/main.yml b/roles/openshift_repos/handlers/main.yml index fed4ab2f0..198fc7d6e 100644 --- a/roles/openshift_repos/handlers/main.yml +++ b/roles/openshift_repos/handlers/main.yml @@ -1,6 +1,3 @@ --- -- name: refresh yum cache - command: yum clean all - -- name: refresh dnf cache - command: dnf clean all +- name: refresh cache + command: "{{ ansible_pkg_mgr }} clean all" diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml index c55b5df89..9faf0dfd9 100644 --- a/roles/openshift_repos/tasks/main.yaml +++ b/roles/openshift_repos/tasks/main.yaml @@ -11,30 +11,21 @@ that: openshift.common.deployment_type in known_openshift_deployment_types - name: Ensure libselinux-python is installed - yum: - pkg: libselinux-python - state: present - when: ansible_pkg_mgr == "yum" - -- name: Ensure libselinux-python is installed - dnf: - pkg: libselinux-python - state: present - when: ansible_pkg_mgr == "dnf" + action: "{{ ansible_pkg_mgr }} name=libselinux-python state=present" - name: Create any additional repos that are defined template: src: yum_repo.j2 dest: /etc/yum.repos.d/openshift_additional.repo when: openshift_additional_repos | length > 0 - notify: refresh yum cache + notify: refresh cache - name: Remove the additional repos if no longer defined file: dest: /etc/yum.repos.d/openshift_additional.repo state: absent when: openshift_additional_repos | length == 0 - notify: refresh yum cache + notify: refresh cache - name: Remove any yum repo files for other deployment types RHEL/CentOS file: @@ -44,7 +35,7 @@ - '*/repos/*' when: not (item | search("/files/" ~ openshift_deployment_type ~ "/repos")) and (ansible_os_family == "RedHat" and ansible_distribution != "Fedora") - notify: refresh yum cache + notify: refresh cache - name: Remove any yum repo files for other deployment types Fedora file: @@ -54,24 +45,24 @@ - '*/repos/*' when: not (item | search("/files/fedora-" ~ openshift_deployment_type ~ "/repos")) and (ansible_distribution == "Fedora") - notify: refresh dnf cache + notify: refresh cache - name: Configure gpg keys if needed copy: src={{ item }} dest=/etc/pki/rpm-gpg/ with_fileglob: - "{{ openshift_deployment_type }}/gpg_keys/*" - notify: refresh yum cache + notify: refresh cache - name: Configure yum repositories RHEL/CentOS copy: src={{ item }} dest=/etc/yum.repos.d/ with_fileglob: - "{{ openshift_deployment_type }}/repos/*" - notify: refresh yum cache + notify: refresh cache when: (ansible_os_family == "RedHat" and ansible_distribution != "Fedora") - name: Configure yum repositories Fedora copy: src={{ item }} dest=/etc/yum.repos.d/ with_fileglob: - "fedora-{{ openshift_deployment_type }}/repos/*" - notify: refresh dnf cache + notify: refresh cache when: (ansible_distribution == "Fedora") diff --git a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml index bf23dfe98..cf1ba6f25 100644 --- a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml +++ b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml @@ -1,12 +1,7 @@ --- - name: Install NFS server - yum: name=nfs-utils state=present - when: ansible_pkg_mgr == "yum" - -- name: Install NFS server - dnf: name=nfs-utils state=present - when: ansible_pkg_mgr == "dnf" - + action: "{{ ansible_pkg_mgr }} name=nfs-utils state=present" + - name: Start rpcbind service: name=rpcbind state=started enabled=yes diff --git a/roles/os_env_extras/tasks/main.yaml b/roles/os_env_extras/tasks/main.yaml index 29599559c..cbf5c37f5 100644 --- a/roles/os_env_extras/tasks/main.yaml +++ b/roles/os_env_extras/tasks/main.yaml @@ -12,13 +12,4 @@ dest: /root/.vimrc - name: Bash Completion - yum: - pkg: bash-completion - state: installed - when: ansible_pkg_mgr == "yum" - -- name: Bash Completion - dnf: - pkg: bash-completion - state: installed - when: ansible_pkg_mgr == "dnf" + action: "{{ ansible_pkg_mgr }} name=bash-completion state=present"
\ No newline at end of file diff --git a/roles/os_firewall/tasks/firewall/firewalld.yml b/roles/os_firewall/tasks/firewall/firewalld.yml index cf2a2c733..ba12c6b0c 100644 --- a/roles/os_firewall/tasks/firewall/firewalld.yml +++ b/roles/os_firewall/tasks/firewall/firewalld.yml @@ -1,16 +1,6 @@ --- - name: Install firewalld packages - yum: - name: firewalld - state: present - when: ansible_pkg_mgr == "yum" - register: install_result - -- name: Install firewalld packages - dnf: - name: firewalld - state: present - when: ansible_pkg_mgr == "dnf" + action: "{{ ansible_pkg_mgr }} name=firewalld state=present" register: install_result - name: Check if iptables-services is installed diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml index 36d51504c..d3a5b1fa7 100644 --- a/roles/os_firewall/tasks/firewall/iptables.yml +++ b/roles/os_firewall/tasks/firewall/iptables.yml @@ -1,22 +1,9 @@ --- - name: Install iptables packages - yum: - name: "{{ item }}" - state: present + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" with_items: - iptables - iptables-services - when: ansible_pkg_mgr == "yum" - register: install_result - -- name: Install iptables packages - dnf: - name: "{{ item }}" - state: present - with_items: - - iptables - - iptables-services - when: ansible_pkg_mgr == "dnf" register: install_result - name: Check if firewalld is installed diff --git a/roles/os_update_latest/tasks/main.yml b/roles/os_update_latest/tasks/main.yml index 40eec8d35..2532059c0 100644 --- a/roles/os_update_latest/tasks/main.yml +++ b/roles/os_update_latest/tasks/main.yml @@ -1,8 +1,3 @@ --- - name: Update all packages - yum: name=* state=latest - when: ansible_pkg_mgr == "yum" - -- name: Update all packages - dnf: name=* state=latest - when: ansible_pkg_mgr == "dnf" + action: "{{ ansible_pkg_mgr }} name=* state=latest"
\ No newline at end of file diff --git a/roles/os_zabbix/tasks/main.yml b/roles/os_zabbix/tasks/main.yml index d0b307a3d..7552086d4 100644 --- a/roles/os_zabbix/tasks/main.yml +++ b/roles/os_zabbix/tasks/main.yml @@ -37,6 +37,9 @@ - include_vars: template_aws.yml tags: - aws +- include_vars: template_zagg_server.yml + tags: + - zagg_server - name: Include Template Heartbeat include: ../../lib_zabbix/tasks/create_template.yml @@ -137,3 +140,13 @@ password: "{{ ozb_password }}" tags: - aws + +- name: Include Template Zagg Server + include: ../../lib_zabbix/tasks/create_template.yml + vars: + template: "{{ g_template_zagg_server }}" + server: "{{ ozb_server }}" + user: "{{ ozb_user }}" + password: "{{ ozb_password }}" + tags: + - zagg_server diff --git a/roles/os_zabbix/vars/template_zagg_server.yml b/roles/os_zabbix/vars/template_zagg_server.yml new file mode 100644 index 000000000..0e8e53bb7 --- /dev/null +++ b/roles/os_zabbix/vars/template_zagg_server.yml @@ -0,0 +1,36 @@ +--- +g_template_zagg_server: + name: Template Zagg Server + zitems: + - key: zagg.server.metrics.count + applications: + - Zagg Server + value_type: int + + - key: zagg.server.processor.errors + applications: + - Zagg Server + value_type: int + + - key: zagg.server.heartbeat.count + applications: + - Zagg Server + value_type: int + + ztriggers: + - name: 'Error sending metrics on {HOST.NAME}' + expression: '{Template Zagg Server:zagg.server.processor.errors.min(#3)}>0' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/zagg_server.asciidoc' + priority: average + + - name: 'Critically High number of metrics in Zagg queue {HOST.NAME}' + expression: '{Template Zagg Server:zagg.server.metrics.count.min(#3)}>10000' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/zagg_server.asciidoc' + priority: high + + - name: 'High number of metrics in Zagg queue {HOST.NAME}' + expression: '{Template Zagg Server:zagg.server.metrics.count.min(#3)}>5000' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/zagg_server.asciidoc' + dependencies: + - 'Critically High number of metrics in Zagg queue {HOST.NAME}' + priority: average diff --git a/roles/tito/tasks/main.yml b/roles/tito/tasks/main.yml index f7b4ef363..3cf9e2bfd 100644 --- a/roles/tito/tasks/main.yml +++ b/roles/tito/tasks/main.yml @@ -1,4 +1,2 @@ --- -- yum: - name: tito - state: present +- action: "{{ ansible_pkg_mgr }} name=tito state=present" |