summaryrefslogtreecommitdiffstats
path: root/playbooks/adhoc
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks/adhoc')
-rw-r--r--playbooks/adhoc/atomic_openshift_tutorial_reset.yml29
-rw-r--r--playbooks/adhoc/bootstrap-fedora.yml5
-rw-r--r--playbooks/adhoc/create_pv/create_pv.yaml160
-rw-r--r--playbooks/adhoc/create_pv/pv-template.j216
-rw-r--r--playbooks/adhoc/docker_loopback_to_lvm/docker-storage-setup2
-rw-r--r--playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml142
-rwxr-xr-xplaybooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml115
-rw-r--r--playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml69
-rw-r--r--playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py41
-rw-r--r--playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml206
-rw-r--r--playbooks/adhoc/noc/create_host.yml4
-rw-r--r--playbooks/adhoc/noc/create_maintenance.yml2
l---------playbooks/adhoc/noc/filter_plugins1
-rw-r--r--playbooks/adhoc/noc/get_zabbix_problems.yml4
-rw-r--r--playbooks/adhoc/openshift_hosted_logging_efk.yaml6
-rw-r--r--playbooks/adhoc/s3_registry/s3_registry.j223
-rw-r--r--playbooks/adhoc/s3_registry/s3_registry.yml78
-rwxr-xr-xplaybooks/adhoc/sdn_restart/oo-sdn-restart.yml52
-rw-r--r--playbooks/adhoc/setupnfs.yml21
-rw-r--r--playbooks/adhoc/uninstall.yml406
-rw-r--r--playbooks/adhoc/zabbix_setup/clean_zabbix.yml60
l---------playbooks/adhoc/zabbix_setup/filter_plugins1
-rwxr-xr-xplaybooks/adhoc/zabbix_setup/oo-clean-zaio.yml7
-rwxr-xr-xplaybooks/adhoc/zabbix_setup/oo-config-zaio.yml19
l---------playbooks/adhoc/zabbix_setup/roles (renamed from playbooks/adhoc/noc/roles)0
25 files changed, 1467 insertions, 2 deletions
diff --git a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml
new file mode 100644
index 000000000..5a5a00ea4
--- /dev/null
+++ b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml
@@ -0,0 +1,29 @@
+# This deletes *ALL* Docker images, and uninstalls OpenShift and
+# Atomic Enterprise RPMs. It is primarily intended for use
+# with the tutorial as well as for developers to reset state.
+#
+---
+- include: uninstall.yml
+
+- hosts:
+ - OSEv3:children
+
+ become: yes
+
+ tasks:
+ - shell: docker ps -a -q | xargs docker stop
+ changed_when: False
+ failed_when: False
+
+ - shell: docker ps -a -q| xargs docker rm
+ changed_when: False
+ failed_when: False
+
+ - shell: docker images -q |xargs docker rmi
+ changed_when: False
+ failed_when: False
+
+ - user: name={{ item }} state=absent remove=yes
+ with_items:
+ - alice
+ - joe
diff --git a/playbooks/adhoc/bootstrap-fedora.yml b/playbooks/adhoc/bootstrap-fedora.yml
new file mode 100644
index 000000000..b370d7fba
--- /dev/null
+++ b/playbooks/adhoc/bootstrap-fedora.yml
@@ -0,0 +1,5 @@
+- hosts: OSEv3
+ gather_facts: false
+ tasks:
+ - name: install python and deps for ansible modules
+ raw: dnf install -y python2 python2-dnf libselinux-python libsemanage-python python2-firewall pyOpenSSL python-cryptography
diff --git a/playbooks/adhoc/create_pv/create_pv.yaml b/playbooks/adhoc/create_pv/create_pv.yaml
new file mode 100644
index 000000000..81c1ee653
--- /dev/null
+++ b/playbooks/adhoc/create_pv/create_pv.yaml
@@ -0,0 +1,160 @@
+---
+#example run:
+# ansible-playbook -e "cli_volume_size=1" \
+# -e "cli_device_name=/dev/xvdf" \
+# -e "cli_hosttype=master" \
+# -e "cli_clusterid=ops" \
+# create_pv.yaml
+#
+- name: Create a volume and attach it to master
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ vars:
+ cli_volume_type: gp2
+ cli_volume_iops: ''
+ oo_name: "{{ groups['tag_host-type_' ~ cli_hosttype] |
+ intersect(groups['oo_clusterid_' ~ cli_clusterid]) |
+ first }}"
+ pre_tasks:
+ - fail:
+ msg: "This playbook requires {{item}} to be set."
+ when: "{{ item }} is not defined or {{ item }} == ''"
+ with_items:
+ - cli_volume_size
+ - cli_device_name
+ - cli_hosttype
+ - cli_clusterid
+
+ - name: set oo_name fact
+ set_fact:
+ oo_name: "{{ oo_name }}"
+
+
+ - name: Select a single master to run this on
+ add_host:
+ hostname: "{{ oo_name }}"
+ ansible_ssh_host: "{{ hostvars[oo_name].ec2_public_dns_name }}"
+ groups: oo_master
+
+ - name: Create a volume and attach it
+ ec2_vol:
+ state: present
+ instance: "{{ hostvars[oo_name]['ec2_id'] }}"
+ region: "{{ hostvars[oo_name]['ec2_region'] }}"
+ volume_size: "{{ cli_volume_size }}"
+ volume_type: "{{ cli_volume_type }}"
+ device_name: "{{ cli_device_name }}"
+ iops: "{{ cli_volume_iops }}"
+ register: vol
+
+ - debug: var=vol
+
+ - name: tag the vol with a name
+ ec2_tag: region={{ hostvars[oo_name]['ec2_region'] }} resource={{vol.volume_id}}
+ args:
+ tags:
+ Name: "pv-{{ hostvars[oo_name]['ec2_tag_Name'] }}"
+ clusterid: "{{cli_clusterid}}"
+ register: voltags
+
+ - debug: var=voltags
+
+- name: Configure the drive
+ gather_facts: no
+ hosts: oo_master
+ user: root
+ connection: ssh
+ vars:
+ pv_tmpdir: /tmp/persistentvolumes
+
+ post_tasks:
+ - name: Setting facts for template
+ set_fact:
+ pv_name: "pv-{{cli_volume_size}}-{{ hostvars[hostvars.localhost.oo_name]['ec2_tag_Name'] }}-{{hostvars.localhost.vol.volume_id }}"
+ vol_az: "{{ hostvars[hostvars.localhost.oo_name]['ec2_placement'] }}"
+ vol_id: "{{ hostvars.localhost.vol.volume_id }}"
+ vol_size: "{{ cli_volume_size }}"
+ pv_mntdir: "{{ pv_tmpdir }}/mnt-{{ 1000 | random }}"
+
+ - set_fact:
+ pv_template: "{{ pv_tmpdir }}/{{ pv_name }}.yaml"
+
+ - name: "Mkdir {{ pv_tmpdir }}"
+ file:
+ state: directory
+ path: "{{ pv_tmpdir }}"
+ mode: '0750'
+
+ - name: "Mkdir {{ pv_mntdir }}"
+ file:
+ state: directory
+ path: "{{ pv_mntdir }}"
+ mode: '0750'
+
+ - name: Create pv file from template
+ template:
+ src: ./pv-template.j2
+ dest: "{{ pv_template }}"
+ owner: root
+ mode: '0640'
+
+ - name: mkfs
+ filesystem:
+ dev: "{{ cli_device_name }}"
+ fstype: ext4
+
+ - name: Mount the dev
+ mount:
+ name: "{{ pv_mntdir }}"
+ src: "{{ cli_device_name }}"
+ fstype: ext4
+ state: mounted
+
+ - name: chgrp g+rwXs
+ file:
+ path: "{{ pv_mntdir }}"
+ mode: 'g+rwXs'
+ recurse: yes
+ seuser: system_u
+ serole: object_r
+ setype: svirt_sandbox_file_t
+ selevel: s0
+
+ - name: umount
+ mount:
+ name: "{{ pv_mntdir }}"
+ src: "{{ cli_device_name }}"
+ state: unmounted
+ fstype: ext4
+
+ - name: remove from fstab
+ mount:
+ name: "{{ pv_mntdir }}"
+ src: "{{ cli_device_name }}"
+ state: absent
+ fstype: ext4
+
+ - name: detach drive
+ delegate_to: localhost
+ ec2_vol:
+ region: "{{ hostvars[hostvars.localhost.oo_name].ec2_region }}"
+ id: "{{ hostvars.localhost.vol.volume_id }}"
+ instance: None
+
+ - name: "Remove {{ pv_mntdir }}"
+ file:
+ state: absent
+ path: "{{ pv_mntdir }}"
+
+ # We have to use the shell module because we can't set env vars with the command module.
+ - name: "Place PV into oc"
+ shell: "KUBECONFIG=/etc/origin/master/admin.kubeconfig oc create -f {{ pv_template | quote }}"
+ register: oc_output
+
+ - debug: var=oc_output
+
+ - fail:
+ msg: "Failed to add {{ pv_template }} to master."
+ when: oc_output.rc != 0
diff --git a/playbooks/adhoc/create_pv/pv-template.j2 b/playbooks/adhoc/create_pv/pv-template.j2
new file mode 100644
index 000000000..df082614b
--- /dev/null
+++ b/playbooks/adhoc/create_pv/pv-template.j2
@@ -0,0 +1,16 @@
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: {{ pv_name }}
+ labels:
+ type: ebs
+spec:
+ capacity:
+ storage: {{ vol_size }}Gi
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ awsElasticBlockStore:
+ volumeID: aws://{{ vol_az }}/{{ vol_id }}
+ fsType: ext4
diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker-storage-setup b/playbooks/adhoc/docker_loopback_to_lvm/docker-storage-setup
new file mode 100644
index 000000000..059058823
--- /dev/null
+++ b/playbooks/adhoc/docker_loopback_to_lvm/docker-storage-setup
@@ -0,0 +1,2 @@
+DEVS=/dev/xvdb
+VG=docker_vg
diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
new file mode 100644
index 000000000..4d32fc40b
--- /dev/null
+++ b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
@@ -0,0 +1,142 @@
+---
+# This playbook coverts docker to go from loopback to direct-lvm (the Red Hat recommended way to run docker)
+# in AWS. This adds an additional EBS volume and creates the Volume Group on this EBS volume to use.
+#
+# To run:
+# 1. Source your AWS credentials (make sure it's the corresponding AWS account) into your environment
+# export AWS_ACCESS_KEY_ID='XXXXX'
+# export AWS_SECRET_ACCESS_KEY='XXXXXX'
+#
+# 2. run the playbook:
+# ansible-playbook -e 'cli_tag_name=<tag-name>' -e "cli_volume_size=30" docker_loopback_to_direct_lvm.yml
+#
+# Example:
+# ansible-playbook -e 'cli_tag_name=ops-master-12345' -e "cli_volume_size=30" docker_loopback_to_direct_lvm.yml
+#
+# Notes:
+# * By default this will do a 30GB volume.
+# * iops are calculated by Disk Size * 30. e.g ( 30GB * 30) = 900 iops
+# * This will remove /var/lib/docker!
+# * You may need to re-deploy docker images after this is run (like monitoring)
+#
+
+- name: Fix docker to have a provisioned iops drive
+ hosts: "tag_Name_{{ cli_tag_name }}"
+ user: root
+ connection: ssh
+ gather_facts: no
+
+ vars:
+ cli_volume_type: gp2
+ cli_volume_size: 30
+
+ pre_tasks:
+ - fail:
+ msg: "This playbook requires {{item}} to be set."
+ when: "{{ item }} is not defined or {{ item }} == ''"
+ with_items:
+ - cli_tag_name
+ - cli_volume_size
+
+ - debug:
+ var: hosts
+
+ - name: start docker
+ service:
+ name: docker
+ state: started
+
+ - name: Determine if loopback
+ shell: docker info | grep 'Data file:.*loop'
+ register: loop_device_check
+ ignore_errors: yes
+
+ - debug:
+ var: loop_device_check
+
+ - name: fail if we don't detect loopback
+ fail:
+ msg: loopback not detected! Please investigate manually.
+ when: loop_device_check.rc == 1
+
+ - name: stop zagg client monitoring container
+ service:
+ name: oso-rhel7-zagg-client
+ state: stopped
+ ignore_errors: yes
+
+ - name: stop pcp client monitoring container
+ service:
+ name: oso-f22-host-monitoring
+ state: stopped
+ ignore_errors: yes
+
+ - name: stop docker
+ service:
+ name: docker
+ state: stopped
+
+ - name: delete /var/lib/docker
+ command: rm -rf /var/lib/docker
+
+ - name: remove /var/lib/docker
+ command: rm -rf /var/lib/docker
+
+ - name: check to see if /dev/xvdb exists
+ command: test -e /dev/xvdb
+ register: xvdb_check
+ ignore_errors: yes
+
+ - debug: var=xvdb_check
+
+ - name: fail if /dev/xvdb already exists
+ fail:
+ msg: /dev/xvdb already exists. Please investigate
+ when: xvdb_check.rc == 0
+
+ - name: Create a volume and attach it
+ delegate_to: localhost
+ ec2_vol:
+ state: present
+ instance: "{{ ec2_id }}"
+ region: "{{ ec2_region }}"
+ volume_size: "{{ cli_volume_size | default(30, True)}}"
+ volume_type: "{{ cli_volume_type }}"
+ device_name: /dev/xvdb
+ register: vol
+
+ - debug: var=vol
+
+ - name: tag the vol with a name
+ delegate_to: localhost
+ ec2_tag: region={{ ec2_region }} resource={{ vol.volume_id }}
+ args:
+ tags:
+ Name: "{{ ec2_tag_Name }}"
+ clusterid: "{{ ec2_tag_clusterid }}"
+ register: voltags
+
+ - name: Wait for volume to attach
+ pause:
+ seconds: 30
+
+ - name: copy the docker-storage-setup config file
+ copy:
+ src: docker-storage-setup
+ dest: /etc/sysconfig/docker-storage-setup
+ owner: root
+ group: root
+ mode: 0664
+
+ - name: docker storage setup
+ command: docker-storage-setup
+ register: setup_output
+
+ - debug: var=setup_output
+
+ - name: start docker
+ command: systemctl start docker.service
+ register: dockerstart
+
+ - debug: var=dockerstart
+
diff --git a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
new file mode 100755
index 000000000..1438fd7d5
--- /dev/null
+++ b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
@@ -0,0 +1,115 @@
+#!/usr/bin/ansible-playbook
+---
+# This playbook coverts docker to go from loopback to direct-lvm (the Red Hat recommended way to run docker).
+#
+# It requires the block device to be already provisioned and attached to the host. This is a generic playbook,
+# meant to be used for manual conversion. For AWS specific conversions, use the other playbook in this directory.
+#
+# To run:
+# ./ops-docker-loopback-to-direct-lvm.yml -e cli_host=<host to run on> -e cli_docker_device=<path to device>
+#
+# Example:
+# ./ops-docker-loopback-to-direct-lvm.yml -e cli_host=twiesttest-master-fd32 -e cli_docker_device=/dev/sdb
+#
+# Notes:
+# * This will remove /var/lib/docker!
+# * You may need to re-deploy docker images after this is run (like monitoring)
+
+- name: Fix docker to have a provisioned iops drive
+ hosts: "{{ cli_host }}"
+ user: root
+ connection: ssh
+ gather_facts: no
+
+ pre_tasks:
+ - fail:
+ msg: "This playbook requires {{item}} to be set."
+ when: "{{ item }} is not defined or {{ item }} == ''"
+ with_items:
+ - cli_docker_device
+
+ - name: start docker
+ service:
+ name: docker
+ state: started
+
+ - name: Determine if loopback
+ shell: docker info | grep 'Data file:.*loop'
+ register: loop_device_check
+ ignore_errors: yes
+
+ - debug:
+ var: loop_device_check
+
+ - name: fail if we don't detect loopback
+ fail:
+ msg: loopback not detected! Please investigate manually.
+ when: loop_device_check.rc == 1
+
+ - name: stop zagg client monitoring container
+ service:
+ name: oso-rhel7-zagg-client
+ state: stopped
+ ignore_errors: yes
+
+ - name: stop pcp client monitoring container
+ service:
+ name: oso-f22-host-monitoring
+ state: stopped
+ ignore_errors: yes
+
+ - name: "check to see if {{ cli_docker_device }} exists"
+ command: "test -e {{ cli_docker_device }}"
+ register: docker_dev_check
+ ignore_errors: yes
+
+ - debug: var=docker_dev_check
+
+ - name: "fail if {{ cli_docker_device }} doesn't exist"
+ fail:
+ msg: "{{ cli_docker_device }} doesn't exist. Please investigate"
+ when: docker_dev_check.rc != 0
+
+ - name: stop docker
+ service:
+ name: docker
+ state: stopped
+
+ - name: delete /var/lib/docker
+ command: rm -rf /var/lib/docker
+
+ - name: remove /var/lib/docker
+ command: rm -rf /var/lib/docker
+
+ - name: copy the docker-storage-setup config file
+ copy:
+ content: >
+ DEVS={{ cli_docker_device }}
+ VG=docker_vg
+ dest: /etc/sysconfig/docker-storage-setup
+ owner: root
+ group: root
+ mode: 0664
+
+ - name: docker storage setup
+ command: docker-storage-setup
+ register: setup_output
+
+ - debug: var=setup_output
+
+ - name: extend the vg
+ command: lvextend -l 90%VG /dev/docker_vg/docker-pool
+ register: extend_output
+
+ - debug: var=extend_output
+
+ - name: start docker
+ service:
+ name: docker
+ state: restarted
+
+ - name: docker info
+ command: docker info
+ register: dockerinfo
+
+ - debug: var=dockerinfo
diff --git a/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml
new file mode 100644
index 000000000..b6dde357e
--- /dev/null
+++ b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml
@@ -0,0 +1,69 @@
+---
+# This playbook attempts to cleanup unwanted docker files to help alleviate docker disk space issues.
+#
+# To run:
+#
+# 1. run the playbook:
+#
+# ansible-playbook -e 'cli_tag_name=<tag-name>' docker_storage_cleanup.yml
+#
+# Example:
+#
+# ansible-playbook -e 'cli_tag_name=ops-node-compute-12345' docker_storage_cleanup.yml
+#
+# Notes:
+# * This *should* not interfere with running docker images
+#
+
+- name: Clean up Docker Storage
+ gather_facts: no
+ hosts: "tag_Name_{{ cli_tag_name }}"
+ user: root
+ connection: ssh
+
+ pre_tasks:
+
+ - fail:
+ msg: "This playbook requires {{item}} to be set."
+ when: "{{ item }} is not defined or {{ item }} == ''"
+ with_items:
+ - cli_tag_name
+
+ - name: Ensure docker is running
+ service:
+ name: docker
+ state: started
+ enabled: yes
+
+ - name: Get docker info
+ command: docker info
+ register: docker_info
+
+ - name: Show docker info
+ debug:
+ var: docker_info.stdout_lines
+
+ - name: Remove exited and dead containers
+ shell: "docker ps -a | awk '/Exited|Dead/ {print $1}' | xargs --no-run-if-empty docker rm"
+ ignore_errors: yes
+
+ - name: Remove dangling docker images
+ shell: "docker images -q -f dangling=true | xargs --no-run-if-empty docker rmi"
+ ignore_errors: yes
+
+ - name: Remove non-running docker images
+ shell: "docker images | grep -v -e registry.access.redhat.com -e docker-registry.usersys.redhat.com -e docker-registry.ops.rhcloud.com | awk '{print $3}' | xargs --no-run-if-empty docker rmi 2>/dev/null"
+ ignore_errors: yes
+
+ # leaving off the '-t' for docker exec. With it, it doesn't work with ansible and tty support
+ - name: update zabbix docker items
+ command: docker exec -i oso-rhel7-host-monitoring /usr/local/bin/cron-send-docker-metrics.py
+
+ # Get and show docker info again.
+ - name: Get docker info
+ command: docker info
+ register: docker_info
+
+ - name: Show docker info
+ debug:
+ var: docker_info.stdout_lines
diff --git a/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py b/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py
new file mode 100644
index 000000000..d0264cde9
--- /dev/null
+++ b/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py
@@ -0,0 +1,41 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+'''
+Custom filters for use in openshift-ansible
+'''
+
+import pdb
+
+
+class FilterModule(object):
+ ''' Custom ansible filters '''
+
+ @staticmethod
+ def oo_pdb(arg):
+ ''' This pops you into a pdb instance where arg is the data passed in
+ from the filter.
+ Ex: "{{ hostvars | oo_pdb }}"
+ '''
+ pdb.set_trace()
+ return arg
+
+ @staticmethod
+ def translate_volume_name(volumes, target_volume):
+ '''
+ This filter matches a device string /dev/sdX to /dev/xvdX
+ It will then return the AWS volume ID
+ '''
+ for vol in volumes:
+ translated_name = vol["attachment_set"]["device"].replace("/dev/sd", "/dev/xvd")
+ if target_volume.startswith(translated_name):
+ return vol["id"]
+
+ return None
+
+
+ def filters(self):
+ ''' returns a mapping of filters to methods '''
+ return {
+ "translate_volume_name": self.translate_volume_name,
+ }
diff --git a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
new file mode 100644
index 000000000..d24e9cafa
--- /dev/null
+++ b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
@@ -0,0 +1,206 @@
+---
+# This playbook grows the docker VG on a node by:
+# * add a new volume
+# * add volume to the existing VG.
+# * pv move to the new volume.
+# * remove old volume
+# * detach volume
+# * mark old volume in AWS with "REMOVE ME" tag
+# * grow docker LVM to 90% of the VG
+#
+# To run:
+# 1. Source your AWS credentials (make sure it's the corresponding AWS account) into your environment
+# export AWS_ACCESS_KEY_ID='XXXXX'
+# export AWS_SECRET_ACCESS_KEY='XXXXXX'
+#
+# 2. run the playbook:
+# ansible-playbook -e 'cli_tag_name=<tag-name>' grow_docker_vg.yml
+#
+# Example:
+# ansible-playbook -e 'cli_tag_name=ops-compute-12345' grow_docker_vg.yml
+#
+# Notes:
+# * By default this will do a 200GB GP2 volume. The can be overidden with the "-e 'cli_volume_size=100'" variable
+# * This does a GP2 by default. Support for Provisioned IOPS has not been added
+# * This will assign the new volume to /dev/xvdc. This is not variablized, yet.
+# * This can be done with NO downtime on the host
+# * This playbook assumes that there is a Logical Volume that is installed and called "docker-pool". This is
+# the LV that gets created via the "docker-storage-setup" command
+#
+
+- name: Grow the docker volume group
+ hosts: "tag_Name_{{ cli_tag_name }}"
+ user: root
+ connection: ssh
+ gather_facts: no
+
+ vars:
+ cli_volume_type: gp2
+ cli_volume_size: 200
+# cli_volume_iops: "{{ 30 * cli_volume_size }}"
+
+ pre_tasks:
+ - fail:
+ msg: "This playbook requires {{item}} to be set."
+ when: "{{ item }} is not defined or {{ item }} == ''"
+ with_items:
+ - cli_tag_name
+ - cli_volume_size
+
+ - debug:
+ var: hosts
+
+ - name: start docker
+ service:
+ name: docker
+ state: started
+
+ - name: Determine if Storage Driver (docker info) is devicemapper
+ shell: docker info | grep 'Storage Driver:.*devicemapper'
+ register: device_mapper_check
+ ignore_errors: yes
+
+ - debug:
+ var: device_mapper_check
+
+ - name: fail if we don't detect devicemapper
+ fail:
+ msg: The "Storage Driver" in "docker info" is not set to "devicemapper"! Please investigate manually.
+ when: device_mapper_check.rc == 1
+
+ # docker-storage-setup creates a docker-pool as the lvm. I am using docker-pool lvm to test
+ # and find the volume group.
+ - name: Attempt to find the Volume Group that docker is using
+ shell: lvs | grep docker-pool | awk '{print $2}'
+ register: docker_vg_name
+ ignore_errors: yes
+
+ - debug:
+ var: docker_vg_name
+
+ - name: fail if we don't find a docker volume group
+ fail:
+ msg: Unable to find docker volume group. Please investigate manually.
+ when: docker_vg_name.stdout_lines|length != 1
+
+ # docker-storage-setup creates a docker-pool as the lvm. I am using docker-pool lvm to test
+ # and find the physical volume.
+ - name: Attempt to find the Phyisical Volume that docker is using
+ shell: "pvs | grep {{ docker_vg_name.stdout }} | awk '{print $1}'"
+ register: docker_pv_name
+ ignore_errors: yes
+
+ - debug:
+ var: docker_pv_name
+
+ - name: fail if we don't find a docker physical volume
+ fail:
+ msg: Unable to find docker physical volume. Please investigate manually.
+ when: docker_pv_name.stdout_lines|length != 1
+
+
+ - name: get list of volumes from AWS
+ delegate_to: localhost
+ ec2_vol:
+ state: list
+ instance: "{{ ec2_id }}"
+ region: "{{ ec2_region }}"
+ register: attached_volumes
+
+ - debug: var=attached_volumes
+
+ - name: get volume id of current docker volume
+ set_fact:
+ old_docker_volume_id: "{{ attached_volumes.volumes | translate_volume_name(docker_pv_name.stdout) }}"
+
+ - debug: var=old_docker_volume_id
+
+ - name: check to see if /dev/xvdc exists
+ command: test -e /dev/xvdc
+ register: xvdc_check
+ ignore_errors: yes
+
+ - debug: var=xvdc_check
+
+ - name: fail if /dev/xvdc already exists
+ fail:
+ msg: /dev/xvdc already exists. Please investigate
+ when: xvdc_check.rc == 0
+
+ - name: Create a volume and attach it
+ delegate_to: localhost
+ ec2_vol:
+ state: present
+ instance: "{{ ec2_id }}"
+ region: "{{ ec2_region }}"
+ volume_size: "{{ cli_volume_size | default(30, True)}}"
+ volume_type: "{{ cli_volume_type }}"
+ device_name: /dev/xvdc
+ register: create_volume
+
+ - debug: var=create_volume
+
+ - name: Fail when problems creating volumes and attaching
+ fail:
+ msg: "Failed to create or attach volume msg: {{ create_volume.msg }}"
+ when: create_volume.msg is defined
+
+ - name: tag the vol with a name
+ delegate_to: localhost
+ ec2_tag: region={{ ec2_region }} resource={{ create_volume.volume_id }}
+ args:
+ tags:
+ Name: "{{ ec2_tag_Name }}"
+ clusterid: "{{ ec2_tag_clusterid }}"
+ register: voltags
+
+ - name: check for attached drive
+ command: test -b /dev/xvdc
+ register: attachment_check
+ until: attachment_check.rc == 0
+ retries: 30
+ delay: 2
+
+ - name: partition the new drive and make it lvm
+ command: parted /dev/xvdc --script -- mklabel msdos mkpart primary 0% 100% set 1 lvm
+
+ - name: pvcreate /dev/xvdc
+ command: pvcreate /dev/xvdc1
+
+ - name: Extend the docker volume group
+ command: vgextend "{{ docker_vg_name.stdout }}" /dev/xvdc1
+
+ - name: pvmove onto new volume
+ command: "pvmove {{ docker_pv_name.stdout }} /dev/xvdc1"
+ async: 43200
+ poll: 10
+
+ - name: Remove the old docker drive from the volume group
+ command: "vgreduce {{ docker_vg_name.stdout }} {{ docker_pv_name.stdout }}"
+
+ - name: Remove the pv from the old drive
+ command: "pvremove {{ docker_pv_name.stdout }}"
+
+ - name: Extend the docker lvm
+ command: "lvextend -l '90%VG' /dev/{{ docker_vg_name.stdout }}/docker-pool"
+
+ - name: detach old docker volume
+ delegate_to: localhost
+ ec2_vol:
+ region: "{{ ec2_region }}"
+ id: "{{ old_docker_volume_id }}"
+ instance: None
+
+ - name: tag the old vol valid label
+ delegate_to: localhost
+ ec2_tag: region={{ ec2_region }} resource={{old_docker_volume_id}}
+ args:
+ tags:
+ Name: "{{ ec2_tag_Name }} REMOVE ME"
+ register: voltags
+
+ - name: Update the /etc/sysconfig/docker-storage-setup with new device
+ lineinfile:
+ dest: /etc/sysconfig/docker-storage-setup
+ regexp: ^DEVS=
+ line: DEVS=/dev/xvdc
diff --git a/playbooks/adhoc/noc/create_host.yml b/playbooks/adhoc/noc/create_host.yml
index d250e6e69..2d2cae2b5 100644
--- a/playbooks/adhoc/noc/create_host.yml
+++ b/playbooks/adhoc/noc/create_host.yml
@@ -1,6 +1,8 @@
---
- name: 'Create a host object in zabbix'
hosts: localhost
+ connection: local
+ become: no
gather_facts: no
roles:
- os_zabbix
@@ -23,6 +25,8 @@
#ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml
- name: 'Create a host object in zabbix'
hosts: localhost
+ connection: local
+ become: no
gather_facts: no
roles:
- os_zabbix
diff --git a/playbooks/adhoc/noc/create_maintenance.yml b/playbooks/adhoc/noc/create_maintenance.yml
index c0ec57ce1..8ad5fa0e2 100644
--- a/playbooks/adhoc/noc/create_maintenance.yml
+++ b/playbooks/adhoc/noc/create_maintenance.yml
@@ -2,6 +2,8 @@
#ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml
- name: 'Create a maintenace object in zabbix'
hosts: localhost
+ connection: local
+ become: no
gather_facts: no
roles:
- os_zabbix
diff --git a/playbooks/adhoc/noc/filter_plugins b/playbooks/adhoc/noc/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/adhoc/noc/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/adhoc/noc/get_zabbix_problems.yml b/playbooks/adhoc/noc/get_zabbix_problems.yml
index 4b94fa228..32fc7ce68 100644
--- a/playbooks/adhoc/noc/get_zabbix_problems.yml
+++ b/playbooks/adhoc/noc/get_zabbix_problems.yml
@@ -1,6 +1,8 @@
---
- name: 'Get current hosts who have triggers that are alerting by trigger description'
hosts: localhost
+ connection: local
+ become: no
gather_facts: no
roles:
- os_zabbix
@@ -31,7 +33,7 @@
- add_host:
name: "{{ item }}"
groups: problem_hosts_group
- with_items: problem_hosts
+ with_items: "{{ problem_hosts }}"
- name: "Run on problem hosts"
hosts: problem_hosts_group
diff --git a/playbooks/adhoc/openshift_hosted_logging_efk.yaml b/playbooks/adhoc/openshift_hosted_logging_efk.yaml
new file mode 100644
index 000000000..a3121d046
--- /dev/null
+++ b/playbooks/adhoc/openshift_hosted_logging_efk.yaml
@@ -0,0 +1,6 @@
+---
+- hosts: masters[0]
+ roles:
+ - role: openshift_hosted_logging
+ openshift_hosted_logging_cleanup: no
+
diff --git a/playbooks/adhoc/s3_registry/s3_registry.j2 b/playbooks/adhoc/s3_registry/s3_registry.j2
new file mode 100644
index 000000000..10454ad11
--- /dev/null
+++ b/playbooks/adhoc/s3_registry/s3_registry.j2
@@ -0,0 +1,23 @@
+version: 0.1
+log:
+ level: debug
+http:
+ addr: :5000
+storage:
+ cache:
+ layerinfo: inmemory
+ s3:
+ accesskey: {{ aws_access_key }}
+ secretkey: {{ aws_secret_key }}
+ region: {{ aws_bucket_region }}
+ bucket: {{ aws_bucket_name }}
+ encrypt: true
+ secure: true
+ v4auth: true
+ rootdirectory: /registry
+auth:
+ openshift:
+ realm: openshift
+middleware:
+ repository:
+ - name: openshift
diff --git a/playbooks/adhoc/s3_registry/s3_registry.yml b/playbooks/adhoc/s3_registry/s3_registry.yml
new file mode 100644
index 000000000..daf84e242
--- /dev/null
+++ b/playbooks/adhoc/s3_registry/s3_registry.yml
@@ -0,0 +1,78 @@
+---
+# This playbook creates an S3 bucket named after your cluster and configures the docker-registry service to use the bucket as its backend storage.
+# Usage:
+# ansible-playbook s3_registry.yml -e clusterid="mycluster" -e aws_bucket="clusterid-docker" -e aws_region="us-east-1"
+#
+# The AWS access/secret keys should be the keys of a separate user (not your main user), containing only the necessary S3 access role.
+# The 'clusterid' is the short name of your cluster.
+
+- hosts: tag_clusterid_{{ clusterid }}:&tag_host-type_openshift-master
+ remote_user: root
+ gather_facts: False
+
+ vars:
+ aws_access_key: "{{ lookup('env', 'S3_ACCESS_KEY_ID') }}"
+ aws_secret_key: "{{ lookup('env', 'S3_SECRET_ACCESS_KEY') }}"
+ aws_bucket_name: "{{ aws_bucket | default(clusterid ~ '-docker') }}"
+ aws_bucket_region: "{{ aws_region | default(lookup('env', 'S3_REGION') | default('us-east-1', true)) }}"
+ aws_create_bucket: "{{ aws_create | default(True) }}"
+ aws_tmp_path: "{{ aws_tmp_pathfile | default('/root/config.yml')}}"
+ aws_delete_tmp_file: "{{ aws_delete_tmp | default(True) }}"
+
+ tasks:
+
+ - name: Check for AWS creds
+ fail:
+ msg: "Couldn't find {{ item }} creds in ENV"
+ when: "{{ item }} == ''"
+ with_items:
+ - aws_access_key
+ - aws_secret_key
+
+ - name: Scale down registry
+ command: oc scale --replicas=0 dc/docker-registry
+
+ - name: Create S3 bucket
+ when: aws_create_bucket | bool
+ local_action:
+ module: s3 bucket="{{ aws_bucket_name }}" mode=create
+
+ - name: Set up registry environment variable
+ command: oc env dc/docker-registry REGISTRY_CONFIGURATION_PATH=/etc/registryconfig/config.yml
+
+ - name: Generate docker registry config
+ template: src="s3_registry.j2" dest="/root/config.yml" owner=root mode=0600
+
+ - name: Determine if new secrets are needed
+ command: oc get secrets
+ register: secrets
+
+ - name: Create registry secrets
+ command: oc secrets new dockerregistry /root/config.yml
+ when: "'dockerregistry' not in secrets.stdout"
+
+ - name: Determine if service account contains secrets
+ command: oc describe serviceaccount/registry
+ register: serviceaccount
+
+ - name: Add secrets to registry service account
+ command: oc secrets add serviceaccount/registry secrets/dockerregistry
+ when: "'dockerregistry' not in serviceaccount.stdout"
+
+ - name: Determine if deployment config contains secrets
+ command: oc volume dc/docker-registry --list
+ register: dc
+
+ - name: Add secrets to registry deployment config
+ command: oc volume dc/docker-registry --add --name=dockersecrets -m /etc/registryconfig --type=secret --secret-name=dockerregistry
+ when: "'dockersecrets' not in dc.stdout"
+
+ - name: Wait for deployment config to take effect before scaling up
+ pause: seconds=30
+
+ - name: Scale up registry
+ command: oc scale --replicas=1 dc/docker-registry
+
+ - name: Delete temporary config file
+ file: path={{ aws_tmp_path }} state=absent
+ when: aws_delete_tmp_file | bool
diff --git a/playbooks/adhoc/sdn_restart/oo-sdn-restart.yml b/playbooks/adhoc/sdn_restart/oo-sdn-restart.yml
new file mode 100755
index 000000000..08e8f8968
--- /dev/null
+++ b/playbooks/adhoc/sdn_restart/oo-sdn-restart.yml
@@ -0,0 +1,52 @@
+#!/usr/bin/ansible-playbook
+---
+#example run:
+# ansible-playbook -e "host=ops-node-compute-abcde" oo-sdn-restart.yml
+#
+
+- name: Check vars
+ hosts: localhost
+ gather_facts: false
+
+ pre_tasks:
+ - fail:
+ msg: "Playbook requires host to be set"
+ when: host is not defined or host == ''
+
+- name: Restart openshift/docker (and monitoring containers)
+ hosts: oo_version_3:&oo_name_{{ host }}
+ gather_facts: false
+ user: root
+
+ tasks:
+ - name: stop openshift/docker
+ service:
+ name: "{{ item }}"
+ state: stopped
+ with_items:
+ - atomic-openshift-node
+ - docker
+
+ - name: restart openvswitch
+ service:
+ name: openvswitch
+ state: restarted
+
+ - name: wait 5 sec
+ pause:
+ seconds: 5
+
+ - name: start openshift/docker
+ service:
+ name: "{{ item }}"
+ state: started
+ with_items:
+ - atomic-openshift-node
+ - docker
+
+ - name: start monitoring containers
+ service:
+ name: "{{ item }}"
+ state: restarted
+ with_items:
+ - oso-rhel7-host-monitoring
diff --git a/playbooks/adhoc/setupnfs.yml b/playbooks/adhoc/setupnfs.yml
new file mode 100644
index 000000000..fd489dc70
--- /dev/null
+++ b/playbooks/adhoc/setupnfs.yml
@@ -0,0 +1,21 @@
+---
+### This playbook is old and we are currently not using NFS.
+- hosts: tag_Name_nfs-v3-stg
+ become: no
+ remote_user: root
+ gather_facts: no
+ roles:
+ - role: openshift_storage_nfs_lvm
+ mount_dir: /exports/stg-black
+ volume_prefix: "kwoodsontest"
+ volume_size: 5
+ volume_num_start: 222
+ number_of_volumes: 3
+ tasks:
+ - fetch:
+ dest: json/
+ src: /root/"{{ item }}"
+ with_items:
+ - persistent-volume.kwoodsontest5g0222.json
+ - persistent-volume.kwoodsontest5g0223.json
+ - persistent-volume.kwoodsontest5g0224.json
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
new file mode 100644
index 000000000..4ea639cbe
--- /dev/null
+++ b/playbooks/adhoc/uninstall.yml
@@ -0,0 +1,406 @@
+# This deletes *ALL* Origin, Atomic Enterprise Platform and OpenShift
+# Enterprise content installed by ansible. This includes:
+#
+# configuration
+# containers
+# example templates and imagestreams
+# images
+# RPMs
+---
+- hosts: OSEv3:children
+ become: yes
+ tasks:
+ - name: Detecting Operating System
+ shell: ls /run/ostree-booted
+ ignore_errors: yes
+ failed_when: false
+ register: ostree_output
+
+ # Since we're not calling openshift_facts we'll do this for now
+ - set_fact:
+ is_atomic: "{{ ostree_output.rc == 0 }}"
+ - set_fact:
+ is_containerized: "{{ is_atomic or containerized | default(false) | bool }}"
+
+# Stop services on all hosts prior to removing files.
+- hosts: nodes
+ become: yes
+ tasks:
+ - name: Stop services
+ service: name={{ item }} state=stopped
+ with_items:
+ - atomic-enterprise-node
+ - atomic-openshift-node
+ - openshift-node
+ - openvswitch
+ - origin-node
+ failed_when: false
+
+- hosts: masters
+ become: yes
+ tasks:
+ - name: Stop services
+ service: name={{ item }} state=stopped
+ with_items:
+ - atomic-enterprise-master
+ - atomic-openshift-master
+ - atomic-openshift-master-api
+ - atomic-openshift-master-controllers
+ - openshift-master
+ - openshift-master-api
+ - openshift-master-controllers
+ - origin-master
+ - origin-master-api
+ - origin-master-controllers
+ - pcsd
+ failed_when: false
+
+- hosts: etcd
+ become: yes
+ tasks:
+ - name: Stop services
+ service: name={{ item }} state=stopped
+ with_items:
+ - etcd
+ failed_when: false
+
+- hosts: lb
+ become: yes
+ tasks:
+ - name: Stop services
+ service: name={{ item }} state=stopped
+ with_items:
+ - haproxy
+ failed_when: false
+
+- hosts: nodes
+ become: yes
+ tasks:
+ - name: unmask services
+ command: systemctl unmask "{{ item }}"
+ changed_when: False
+ failed_when: False
+ with_items:
+ - firewalld
+
+ - name: Remove packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent"
+ when: not is_atomic | bool
+ with_items:
+ - atomic-enterprise
+ - atomic-enterprise-node
+ - atomic-enterprise-sdn-ovs
+ - atomic-openshift
+ - atomic-openshift-clients
+ - atomic-openshift-node
+ - atomic-openshift-sdn-ovs
+ - cockpit-bridge
+ - cockpit-docker
+ - cockpit-shell
+ - cockpit-ws
+ - kubernetes-client
+ - openshift
+ - openshift-node
+ - openshift-sdn
+ - openshift-sdn-ovs
+ - openvswitch
+ - origin
+ - origin-clients
+ - origin-node
+ - origin-sdn-ovs
+ - tuned-profiles-atomic-enterprise-node
+ - tuned-profiles-atomic-openshift-node
+ - tuned-profiles-openshift-node
+ - tuned-profiles-origin-node
+
+ - name: Remove flannel package
+ action: "{{ ansible_pkg_mgr }} name=flannel state=absent"
+ when: openshift_use_flannel | default(false) | bool and not is_atomic | bool
+
+ - shell: systemctl reset-failed
+ changed_when: False
+
+ - shell: systemctl daemon-reload
+ changed_when: False
+
+ - name: Remove br0 interface
+ shell: ovs-vsctl del-br br0
+ changed_when: False
+ failed_when: False
+
+ - name: Remove linux interfaces
+ shell: ip link del "{{ item }}"
+ changed_when: False
+ failed_when: False
+ with_items:
+ - lbr0
+ - vlinuxbr
+ - vovsbr
+
+ - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+ changed_when: False
+
+ - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+ changed_when: False
+
+ - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+ changed_when: False
+
+ - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node
+ changed_when: False
+ failed_when: False
+ with_items:
+ - openshift-enterprise
+ - atomic-enterprise
+ - origin
+
+ - shell: docker ps -a | grep Exited | egrep "{{ item }}" | awk '{print $1}'
+ changed_when: False
+ failed_when: False
+ register: exited_containers_to_delete
+ with_items:
+ - aep3.*/aep
+ - aep3.*/node
+ - aep3.*/openvswitch
+ - openshift3/ose
+ - openshift3/node
+ - openshift3/openvswitch
+ - openshift/origin
+
+ - shell: "docker rm {{ item.stdout_lines | join(' ') }}"
+ changed_when: False
+ failed_when: False
+ with_items: "{{ exited_containers_to_delete.results }}"
+
+ - shell: docker images | egrep {{ item }} | awk '{ print $3 }'
+ changed_when: False
+ failed_when: False
+ register: images_to_delete
+ with_items:
+ - registry\.access\..*redhat\.com/openshift3
+ - registry\.access\..*redhat\.com/aep3
+ - registry\.qe\.openshift\.com/.*
+ - registry\.access\..*redhat\.com/rhel7/etcd
+ - docker.io/openshift
+ when: openshift_uninstall_images | default(True) | bool
+
+ - shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}"
+ changed_when: False
+ failed_when: False
+ with_items: "{{ images_to_delete.results }}"
+ when: openshift_uninstall_images | default(True) | bool
+
+ - name: Remove sdn drop files
+ file:
+ path: /run/openshift-sdn
+ state: absent
+
+ - name: Remove remaining files
+ file: path={{ item }} state=absent
+ with_items:
+ - /etc/ansible/facts.d/openshift.fact
+ - /etc/atomic-enterprise
+ - /etc/dnsmasq.d/origin-dns.conf
+ - /etc/dnsmasq.d/origin-upstream-dns.conf
+ - /etc/NetworkManager/dispatcher.d/99-origin-dns.sh
+ - /etc/openshift
+ - /etc/openshift-sdn
+ - /etc/origin
+ - /etc/sysconfig/atomic-enterprise-node
+ - /etc/sysconfig/atomic-openshift-node
+ - /etc/sysconfig/atomic-openshift-node-dep
+ - /etc/sysconfig/openshift-node
+ - /etc/sysconfig/openshift-node-dep
+ - /etc/sysconfig/openvswitch
+ - /etc/sysconfig/origin-node
+ - /etc/sysconfig/origin-node
+ - /etc/sysconfig/origin-node-dep
+ - /etc/systemd/system/atomic-openshift-node-dep.service
+ - /etc/systemd/system/atomic-openshift-node.service
+ - /etc/systemd/system/atomic-openshift-node.service.wants
+ - /etc/systemd/system/docker.service.d/docker-sdn-ovs.conf
+ - /etc/systemd/system/openvswitch.service
+ - /etc/systemd/system/origin-node-dep.service
+ - /etc/systemd/system/origin-node.service
+ - /etc/systemd/system/origin-node.service.wants
+ - /run/openshift-sdn
+ - /var/lib/atomic-enterprise
+ - /var/lib/openshift
+ - /var/lib/origin
+
+ - name: restart docker
+ service: name=docker state=restarted
+
+ - name: restart NetworkManager
+ service: name=NetworkManager state=restarted
+
+
+- hosts: masters
+ become: yes
+ tasks:
+ - name: unmask services
+ command: systemctl unmask "{{ item }}"
+ changed_when: False
+ failed_when: False
+ with_items:
+ - firewalld
+ - atomic-openshift-master
+
+ - name: Remove packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent"
+ when: not is_atomic | bool
+ with_items:
+ - atomic-enterprise
+ - atomic-enterprise-master
+ - atomic-openshift
+ - atomic-openshift-clients
+ - atomic-openshift-master
+ - cockpit-bridge
+ - cockpit-docker
+ - cockpit-shell
+ - cockpit-ws
+ - corosync
+ - kubernetes-client
+ - openshift
+ - openshift-master
+ - origin
+ - origin-clients
+ - origin-master
+ - pacemaker
+ - pcs
+
+ - shell: systemctl reset-failed
+ changed_when: False
+
+ - shell: systemctl daemon-reload
+ changed_when: False
+
+ - name: Remove remaining files
+ file: path={{ item }} state=absent
+ with_items:
+ - "~{{ ansible_ssh_user }}/.kube"
+ - /etc/ansible/facts.d/openshift.fact
+ - /etc/atomic-enterprise
+ - /etc/corosync
+ - /etc/openshift
+ - /etc/openshift-sdn
+ - /etc/origin
+ - /etc/systemd/system/atomic-openshift-master.service
+ - /etc/systemd/system/atomic-openshift-master-api.service
+ - /etc/systemd/system/atomic-openshift-master-controllers.service
+ - /etc/systemd/system/origin-master.service
+ - /etc/systemd/system/origin-master-api.service
+ - /etc/systemd/system/origin-master-controllers.service
+ - /etc/systemd/system/openvswitch.service
+ - /etc/sysconfig/atomic-enterprise-master
+ - /etc/sysconfig/atomic-enterprise-master-api
+ - /etc/sysconfig/atomic-enterprise-master-controllers
+ - /etc/sysconfig/atomic-openshift-master
+ - /etc/sysconfig/atomic-openshift-master-api
+ - /etc/sysconfig/atomic-openshift-master-controllers
+ - /etc/sysconfig/origin-master
+ - /etc/sysconfig/origin-master-api
+ - /etc/sysconfig/origin-master-controllers
+ - /etc/sysconfig/openshift-master
+ - /etc/sysconfig/openvswitch
+ - /etc/sysconfig/origin-master
+ - /etc/sysconfig/origin-master-api
+ - /etc/sysconfig/origin-master-controllers
+ - /root/.kube
+ - /usr/share/openshift/examples
+ - /var/lib/atomic-enterprise
+ - /var/lib/openshift
+ - /var/lib/origin
+ - /var/lib/pacemaker
+ - /var/lib/pcsd
+ - /usr/lib/systemd/system/atomic-openshift-master-api.service
+ - /usr/lib/systemd/system/atomic-openshift-master-controllers.service
+ - /usr/lib/systemd/system/origin-master-api.service
+ - /usr/lib/systemd/system/origin-master-controllers.service
+ - /usr/local/bin/openshift
+ - /usr/local/bin/oadm
+ - /usr/local/bin/oc
+ - /usr/local/bin/kubectl
+ - /etc/flannel
+
+ # Since we are potentially removing the systemd unit files for separated
+ # master-api and master-controllers services, so we need to reload the
+ # systemd configuration manager
+ - name: Reload systemd manager configuration
+ command: systemctl daemon-reload
+
+- hosts: etcd
+ become: yes
+ tasks:
+ - name: unmask services
+ command: systemctl unmask "{{ item }}"
+ changed_when: False
+ failed_when: False
+ with_items:
+ - etcd
+ - etcd3
+ - firewalld
+
+ - name: Stop additional atomic services
+ service: name={{ item }} state=stopped
+ when: is_containerized | bool
+ with_items:
+ - etcd_container
+ failed_when: false
+
+ - name: Remove packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent"
+ when: not is_atomic | bool
+ with_items:
+ - etcd
+ - etcd3
+
+ - shell: systemctl reset-failed
+ changed_when: False
+
+ - shell: systemctl daemon-reload
+ changed_when: False
+
+ - name: Remove remaining files
+ file: path={{ item }} state=absent
+ with_items:
+ - /etc/ansible/facts.d/openshift.fact
+ - /etc/etcd
+ - /etc/systemd/system/etcd_container.service
+ - /etc/profile.d/etcdctl.sh
+
+ # Intenationally using rm command over file module because if someone had mounted a filesystem
+ # at /var/lib/etcd then the contents was not removed correctly
+ - name: Remove etcd data
+ shell: rm -rf /var/lib/etcd/*
+ args:
+ warn: no
+ failed_when: false
+
+- hosts: lb
+ become: yes
+ tasks:
+ - name: unmask services
+ command: systemctl unmask "{{ item }}"
+ changed_when: False
+ failed_when: False
+ with_items:
+ - firewalld
+
+ - name: Remove packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent"
+ when: not is_atomic | bool
+ with_items:
+ - haproxy
+
+ - shell: systemctl reset-failed
+ changed_when: False
+
+ - shell: systemctl daemon-reload
+ changed_when: False
+
+ - name: Remove remaining files
+ file: path={{ item }} state=absent
+ with_items:
+ - /etc/ansible/facts.d/openshift.fact
+ - /var/lib/haproxy
diff --git a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml
new file mode 100644
index 000000000..09f7c76cc
--- /dev/null
+++ b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml
@@ -0,0 +1,60 @@
+---
+- hosts: localhost
+ gather_facts: no
+ connection: local
+ become: no
+ vars:
+ g_server: http://localhost:8080/zabbix/api_jsonrpc.php
+ g_user: ''
+ g_password: ''
+
+ roles:
+ - lib_zabbix
+
+ post_tasks:
+ - name: CLEAN List template for heartbeat
+ zbx_template:
+ zbx_server: "{{ g_server }}"
+ zbx_user: "{{ g_user }}"
+ zbx_password: "{{ g_password }}"
+ state: list
+ name: 'Template Heartbeat'
+ register: templ_heartbeat
+
+ - name: CLEAN List template app zabbix server
+ zbx_template:
+ zbx_server: "{{ g_server }}"
+ zbx_user: "{{ g_user }}"
+ zbx_password: "{{ g_password }}"
+ state: list
+ name: 'Template App Zabbix Server'
+ register: templ_zabbix_server
+
+ - name: CLEAN List template app zabbix server
+ zbx_template:
+ zbx_server: "{{ g_server }}"
+ zbx_user: "{{ g_user }}"
+ zbx_password: "{{ g_password }}"
+ state: list
+ name: 'Template App Zabbix Agent'
+ register: templ_zabbix_agent
+
+ - name: CLEAN List all templates
+ zbx_template:
+ zbx_server: "{{ g_server }}"
+ zbx_user: "{{ g_user }}"
+ zbx_password: "{{ g_password }}"
+ state: list
+ register: templates
+
+ - debug: var=templ_heartbeat.results
+
+ - name: Remove templates if heartbeat template is missing
+ zbx_template:
+ zbx_server: "{{ g_server }}"
+ zbx_user: "{{ g_user }}"
+ zbx_password: "{{ g_password }}"
+ name: "{{ item }}"
+ state: absent
+ with_items: "{{ templates.results | difference(templ_zabbix_agent.results) | difference(templ_zabbix_server.results) | oo_collect('host') }}"
+ when: templ_heartbeat.results | length == 0
diff --git a/playbooks/adhoc/zabbix_setup/filter_plugins b/playbooks/adhoc/zabbix_setup/filter_plugins
new file mode 120000
index 000000000..b0b7a3414
--- /dev/null
+++ b/playbooks/adhoc/zabbix_setup/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml
new file mode 100755
index 000000000..0fe65b338
--- /dev/null
+++ b/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml
@@ -0,0 +1,7 @@
+#!/usr/bin/env ansible-playbook
+---
+- include: clean_zabbix.yml
+ vars:
+ g_server: http://localhost/zabbix/api_jsonrpc.php
+ g_user: Admin
+ g_password: zabbix
diff --git a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml
new file mode 100755
index 000000000..2f1d003ff
--- /dev/null
+++ b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml
@@ -0,0 +1,19 @@
+#!/usr/bin/ansible-playbook
+---
+- hosts: localhost
+ gather_facts: no
+ connection: local
+ become: no
+ vars:
+ g_server: http://localhost/zabbix/api_jsonrpc.php
+ g_user: Admin
+ g_password: zabbix
+ g_zbx_scriptrunner_user: scriptrunner
+ g_zbx_scriptrunner_bastion_host: specialhost.example.com
+ roles:
+ - role: os_zabbix
+ ozb_server: "{{ g_server }}"
+ ozb_user: "{{ g_user }}"
+ ozb_password: "{{ g_password }}"
+ ozb_scriptrunner_user: "{{ g_zbx_scriptrunner_user }}"
+ ozb_scriptrunner_bastion_host: "{{ g_zbx_scriptrunner_bastion_host }}"
diff --git a/playbooks/adhoc/noc/roles b/playbooks/adhoc/zabbix_setup/roles
index 20c4c58cf..20c4c58cf 120000
--- a/playbooks/adhoc/noc/roles
+++ b/playbooks/adhoc/zabbix_setup/roles