diff options
Diffstat (limited to 'playbooks/adhoc')
25 files changed, 1349 insertions, 1 deletions
diff --git a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml new file mode 100644 index 000000000..c14d08e87 --- /dev/null +++ b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml @@ -0,0 +1,29 @@ +# This deletes *ALL* Docker images, and uninstalls OpenShift and +# Atomic Enterprise RPMs. It is primarily intended for use +# with the tutorial as well as for developers to reset state. +# +--- +- include: uninstall.yml + +- hosts: + - OSEv3:children + + sudo: yes + + tasks: + - shell: docker ps -a -q | xargs docker stop + changed_when: False + failed_when: False + + - shell: docker ps -a -q| xargs docker rm + changed_when: False + failed_when: False + + - shell: docker images -q |xargs docker rmi + changed_when: False + failed_when: False + + - user: name={{ item }} state=absent remove=yes + with_items: + - alice + - joe diff --git a/playbooks/adhoc/create_pv/create_pv.yaml b/playbooks/adhoc/create_pv/create_pv.yaml new file mode 100644 index 000000000..4f0ef7a75 --- /dev/null +++ b/playbooks/adhoc/create_pv/create_pv.yaml @@ -0,0 +1,159 @@ +--- +#example run: +# ansible-playbook -e "cli_volume_size=1" \ +# -e "cli_device_name=/dev/xvdf" \ +# -e "cli_hosttype=master" \ +# -e "cli_environment=ops" \ +# create_pv.yaml +# FIXME: we need to change "environment" to "clusterid" as that's what it really is now. +# +- name: Create a volume and attach it to master + hosts: localhost + gather_facts: no + vars: + cli_volume_type: gp2 + cli_volume_iops: '' + oo_name: "{{ groups['tag_host-type_' ~ cli_hosttype] | + intersect(groups['tag_environment_' ~ cli_environment]) | + first }}" + pre_tasks: + - fail: + msg: "This playbook requires {{item}} to be set." + when: "{{ item }} is not defined or {{ item }} == ''" + with_items: + - cli_volume_size + - cli_device_name + - cli_hosttype + - cli_environment + + - name: set oo_name fact + set_fact: + oo_name: "{{ oo_name }}" + + + - name: Select a single master to run this on + add_host: + hostname: "{{ oo_name }}" + ansible_ssh_host: "{{ hostvars[oo_name].ec2_public_dns_name }}" + groups: oo_master + + - name: Create a volume and attach it + ec2_vol: + state: present + instance: "{{ hostvars[oo_name]['ec2_id'] }}" + region: "{{ hostvars[oo_name]['ec2_region'] }}" + volume_size: "{{ cli_volume_size }}" + volume_type: "{{ cli_volume_type }}" + device_name: "{{ cli_device_name }}" + iops: "{{ cli_volume_iops }}" + register: vol + + - debug: var=vol + + - name: tag the vol with a name + ec2_tag: region={{ hostvars[oo_name]['ec2_region'] }} resource={{vol.volume_id}} + args: + tags: + Name: "pv-{{ hostvars[oo_name]['ec2_tag_Name'] }}" + env: "{{cli_environment}}" + register: voltags + + - debug: var=voltags + +- name: Configure the drive + gather_facts: no + hosts: oo_master + user: root + connection: ssh + vars: + pv_tmpdir: /tmp/persistentvolumes + + post_tasks: + - name: Setting facts for template + set_fact: + pv_name: "pv-{{cli_volume_size}}-{{ hostvars[hostvars.localhost.oo_name]['ec2_tag_Name'] }}-{{hostvars.localhost.vol.volume_id }}" + vol_az: "{{ hostvars[hostvars.localhost.oo_name]['ec2_placement'] }}" + vol_id: "{{ hostvars.localhost.vol.volume_id }}" + vol_size: "{{ cli_volume_size }}" + pv_mntdir: "{{ pv_tmpdir }}/mnt-{{ 1000 | random }}" + + - set_fact: + pv_template: "{{ pv_tmpdir }}/{{ pv_name }}.yaml" + + - name: "Mkdir {{ pv_tmpdir }}" + file: + state: directory + path: "{{ pv_tmpdir }}" + mode: '0750' + + - name: "Mkdir {{ pv_mntdir }}" + file: + state: directory + path: "{{ pv_mntdir }}" + mode: '0750' + + - name: Create pv file from template + template: + src: ./pv-template.j2 + dest: "{{ pv_template }}" + owner: root + mode: '0640' + + - name: mkfs + filesystem: + dev: "{{ cli_device_name }}" + fstype: ext4 + + - name: Mount the dev + mount: + name: "{{ pv_mntdir }}" + src: "{{ cli_device_name }}" + fstype: ext4 + state: mounted + + - name: chgrp g+rwXs + file: + path: "{{ pv_mntdir }}" + mode: 'g+rwXs' + recurse: yes + seuser: system_u + serole: object_r + setype: svirt_sandbox_file_t + selevel: s0 + + - name: umount + mount: + name: "{{ pv_mntdir }}" + src: "{{ cli_device_name }}" + state: unmounted + fstype: ext4 + + - name: remove from fstab + mount: + name: "{{ pv_mntdir }}" + src: "{{ cli_device_name }}" + state: absent + fstype: ext4 + + - name: detach drive + delegate_to: localhost + ec2_vol: + region: "{{ hostvars[hostvars.localhost.oo_name].ec2_region }}" + id: "{{ hostvars.localhost.vol.volume_id }}" + instance: None + + - name: "Remove {{ pv_mntdir }}" + file: + state: absent + path: "{{ pv_mntdir }}" + + # We have to use the shell module because we can't set env vars with the command module. + - name: "Place PV into oc" + shell: "KUBECONFIG=/etc/openshift/master/admin.kubeconfig oc create -f {{ pv_template | quote }}" + register: oc_output + + - debug: var=oc_output + + - fail: + msg: "Failed to add {{ pv_template }} to master." + when: oc_output.rc != 0 diff --git a/playbooks/adhoc/create_pv/pv-template.j2 b/playbooks/adhoc/create_pv/pv-template.j2 new file mode 100644 index 000000000..5654ef6c4 --- /dev/null +++ b/playbooks/adhoc/create_pv/pv-template.j2 @@ -0,0 +1,16 @@ +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: {{ pv_name }} + labels: + type: ebs +spec: + capacity: + storage: {{ vol_size }}Gi + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Recycle + awsElasticBlockStore: + volumeID: aws://{{ vol_az }}/{{ vol_id }} + fsType: ext4 diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker-storage-setup b/playbooks/adhoc/docker_loopback_to_lvm/docker-storage-setup new file mode 100644 index 000000000..059058823 --- /dev/null +++ b/playbooks/adhoc/docker_loopback_to_lvm/docker-storage-setup @@ -0,0 +1,2 @@ +DEVS=/dev/xvdb +VG=docker_vg diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml new file mode 100644 index 000000000..b6a2d2f26 --- /dev/null +++ b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml @@ -0,0 +1,142 @@ +--- +# This playbook coverts docker to go from loopback to direct-lvm (the Red Hat recommended way to run docker) +# in AWS. This adds an additional EBS volume and creates the Volume Group on this EBS volume to use. +# +# To run: +# 1. Source your AWS credentials (make sure it's the corresponding AWS account) into your environment +# export AWS_ACCESS_KEY_ID='XXXXX' +# export AWS_SECRET_ACCESS_KEY='XXXXXX' +# +# 2. run the playbook: +# ansible-playbook -e 'cli_tag_name=<tag-name>' -e "cli_volume_size=30" docker_loopback_to_direct_lvm.yml +# +# Example: +# ansible-playbook -e 'cli_tag_name=ops-master-12345' -e "cli_volume_size=30" docker_loopback_to_direct_lvm.yml +# +# Notes: +# * By default this will do a 30GB volume. +# * iops are calculated by Disk Size * 30. e.g ( 30GB * 30) = 900 iops +# * This will remove /var/lib/docker! +# * You may need to re-deploy docker images after this is run (like monitoring) +# + +- name: Fix docker to have a provisioned iops drive + hosts: "tag_Name_{{ cli_tag_name }}" + user: root + connection: ssh + gather_facts: no + + vars: + cli_volume_type: gp2 + cli_volume_size: 30 + + pre_tasks: + - fail: + msg: "This playbook requires {{item}} to be set." + when: "{{ item }} is not defined or {{ item }} == ''" + with_items: + - cli_tag_name + - cli_volume_size + + - debug: + var: hosts + + - name: start docker + service: + name: docker + state: started + + - name: Determine if loopback + shell: docker info | grep 'Data file:.*loop' + register: loop_device_check + ignore_errors: yes + + - debug: + var: loop_device_check + + - name: fail if we don't detect loopback + fail: + msg: loopback not detected! Please investigate manually. + when: loop_device_check.rc == 1 + + - name: stop zagg client monitoring container + service: + name: oso-rhel7-zagg-client + state: stopped + ignore_errors: yes + + - name: stop pcp client monitoring container + service: + name: oso-f22-host-monitoring + state: stopped + ignore_errors: yes + + - name: stop docker + service: + name: docker + state: stopped + + - name: delete /var/lib/docker + command: rm -rf /var/lib/docker + + - name: remove /var/lib/docker + command: rm -rf /var/lib/docker + + - name: check to see if /dev/xvdb exists + command: test -e /dev/xvdb + register: xvdb_check + ignore_errors: yes + + - debug: var=xvdb_check + + - name: fail if /dev/xvdb already exists + fail: + msg: /dev/xvdb already exists. Please investigate + when: xvdb_check.rc == 0 + + - name: Create a volume and attach it + delegate_to: localhost + ec2_vol: + state: present + instance: "{{ ec2_id }}" + region: "{{ ec2_region }}" + volume_size: "{{ cli_volume_size | default(30, True)}}" + volume_type: "{{ cli_volume_type }}" + device_name: /dev/xvdb + register: vol + + - debug: var=vol + + - name: tag the vol with a name + delegate_to: localhost + ec2_tag: region={{ ec2_region }} resource={{ vol.volume_id }} + args: + tags: + Name: "{{ ec2_tag_Name }}" + env: "{{ ec2_tag_environment }}" + register: voltags + + - name: Wait for volume to attach + pause: + seconds: 30 + + - name: copy the docker-storage-setup config file + copy: + src: docker-storage-setup + dest: /etc/sysconfig/docker-storage-setup + owner: root + group: root + mode: 0664 + + - name: docker storage setup + command: docker-storage-setup + register: setup_output + + - debug: var=setup_output + + - name: start docker + command: systemctl start docker.service + register: dockerstart + + - debug: var=dockerstart + diff --git a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml new file mode 100755 index 000000000..72fcd77b3 --- /dev/null +++ b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml @@ -0,0 +1,115 @@ +#!/usr/bin/ansible-playbook +--- +# This playbook coverts docker to go from loopback to direct-lvm (the Red Hat recommended way to run docker). +# +# It requires the block device to be already provisioned and attached to the host. This is a generic playbook, +# meant to be used for manual conversion. For AWS specific conversions, use the other playbook in this directory. +# +# To run: +# ./ops-docker-loopback-to-direct-lvm.yml -e cli_host=<host to run on> -e cli_docker_device=<path to device> +# +# Example: +# ./ops-docker-loopback-to-direct-lvm.yml -e cli_host=twiesttest-master-fd32 -e cli_docker_device=/dev/sdb +# +# Notes: +# * This will remove /var/lib/docker! +# * You may need to re-deploy docker images after this is run (like monitoring) + +- name: Fix docker to have a provisioned iops drive + hosts: "{{ cli_name }}" + user: root + connection: ssh + gather_facts: no + + pre_tasks: + - fail: + msg: "This playbook requires {{item}} to be set." + when: "{{ item }} is not defined or {{ item }} == ''" + with_items: + - cli_docker_device + + - name: start docker + service: + name: docker + state: started + + - name: Determine if loopback + shell: docker info | grep 'Data file:.*loop' + register: loop_device_check + ignore_errors: yes + + - debug: + var: loop_device_check + + - name: fail if we don't detect loopback + fail: + msg: loopback not detected! Please investigate manually. + when: loop_device_check.rc == 1 + + - name: stop zagg client monitoring container + service: + name: oso-rhel7-zagg-client + state: stopped + ignore_errors: yes + + - name: stop pcp client monitoring container + service: + name: oso-f22-host-monitoring + state: stopped + ignore_errors: yes + + - name: "check to see if {{ cli_docker_device }} exists" + command: "test -e {{ cli_docker_device }}" + register: docker_dev_check + ignore_errors: yes + + - debug: var=docker_dev_check + + - name: "fail if {{ cli_docker_device }} doesn't exist" + fail: + msg: "{{ cli_docker_device }} doesn't exist. Please investigate" + when: docker_dev_check.rc != 0 + + - name: stop docker + service: + name: docker + state: stopped + + - name: delete /var/lib/docker + command: rm -rf /var/lib/docker + + - name: remove /var/lib/docker + command: rm -rf /var/lib/docker + + - name: copy the docker-storage-setup config file + copy: + content: > + DEVS={{ cli_docker_device }} + VG=docker_vg + dest: /etc/sysconfig/docker-storage-setup + owner: root + group: root + mode: 0664 + + - name: docker storage setup + command: docker-storage-setup + register: setup_output + + - debug: var=setup_output + + - name: extend the vg + command: lvextend -l 90%VG /dev/docker_vg/docker-pool + register: extend_output + + - debug: var=extend_output + + - name: start docker + service: + name: docker + state: restarted + + - name: docker info + command: docker info + register: dockerinfo + + - debug: var=dockerinfo diff --git a/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml new file mode 100644 index 000000000..a19291a9f --- /dev/null +++ b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml @@ -0,0 +1,69 @@ +--- +# This playbook attempts to cleanup unwanted docker files to help alleviate docker disk space issues. +# +# To run: +# +# 1. run the playbook: +# +# ansible-playbook -e 'cli_tag_name=<tag-name>' docker_storage_cleanup.yml +# +# Example: +# +# ansible-playbook -e 'cli_tag_name=ops-node-compute-12345' docker_storage_cleanup.yml +# +# Notes: +# * This *should* not interfere with running docker images +# + +- name: Clean up Docker Storage + gather_facts: no + hosts: "tag_Name_{{ cli_tag_name }}" + user: root + connection: ssh + + pre_tasks: + + - fail: + msg: "This playbook requires {{item}} to be set." + when: "{{ item }} is not defined or {{ item }} == ''" + with_items: + - cli_tag_name + + - name: Ensure docker is running + service: + name: docker + state: started + enabled: yes + + - name: Get docker info + command: docker info + register: docker_info + + - name: Show docker info + debug: + var: docker_info.stdout_lines + + - name: Remove exited and dead containers + shell: "docker ps -a | awk '/Exited|Dead/ {print $1}' | xargs --no-run-if-empty docker rm" + ignore_errors: yes + + - name: Remove dangling docker images + shell: "docker images -q -f dangling=true | xargs --no-run-if-empty docker rmi" + ignore_errors: yes + + - name: Remove non-running docker images + shell: "docker images | grep -v -e registry.access.redhat.com -e docker-registry.usersys.redhat.com -e docker-registry.ops.rhcloud.com | awk '{print $3}' | xargs --no-run-if-empty docker rmi 2>/dev/null" + ignore_errors: yes + + # leaving off the '-t' for docker exec. With it, it doesn't work with ansible and tty support + - name: update zabbix docker items + command: docker exec -i oso-rhel7-zagg-client /usr/local/bin/cron-send-docker-metrics.py + + # Get and show docker info again. + - name: Get docker info + command: docker info + register: docker_info + + - name: Show docker info + debug: + var: docker_info.stdout_lines diff --git a/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py b/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py new file mode 100644 index 000000000..d0264cde9 --- /dev/null +++ b/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py @@ -0,0 +1,41 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# vim: expandtab:tabstop=4:shiftwidth=4 +''' +Custom filters for use in openshift-ansible +''' + +import pdb + + +class FilterModule(object): + ''' Custom ansible filters ''' + + @staticmethod + def oo_pdb(arg): + ''' This pops you into a pdb instance where arg is the data passed in + from the filter. + Ex: "{{ hostvars | oo_pdb }}" + ''' + pdb.set_trace() + return arg + + @staticmethod + def translate_volume_name(volumes, target_volume): + ''' + This filter matches a device string /dev/sdX to /dev/xvdX + It will then return the AWS volume ID + ''' + for vol in volumes: + translated_name = vol["attachment_set"]["device"].replace("/dev/sd", "/dev/xvd") + if target_volume.startswith(translated_name): + return vol["id"] + + return None + + + def filters(self): + ''' returns a mapping of filters to methods ''' + return { + "translate_volume_name": self.translate_volume_name, + } diff --git a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml new file mode 100644 index 000000000..63d473146 --- /dev/null +++ b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml @@ -0,0 +1,206 @@ +--- +# This playbook grows the docker VG on a node by: +# * add a new volume +# * add volume to the existing VG. +# * pv move to the new volume. +# * remove old volume +# * detach volume +# * mark old volume in AWS with "REMOVE ME" tag +# * grow docker LVM to 90% of the VG +# +# To run: +# 1. Source your AWS credentials (make sure it's the corresponding AWS account) into your environment +# export AWS_ACCESS_KEY_ID='XXXXX' +# export AWS_SECRET_ACCESS_KEY='XXXXXX' +# +# 2. run the playbook: +# ansible-playbook -e 'cli_tag_name=<tag-name>' grow_docker_vg.yml +# +# Example: +# ansible-playbook -e 'cli_tag_name=ops-compute-12345' grow_docker_vg.yml +# +# Notes: +# * By default this will do a 55GB GP2 volume. The can be overidden with the "-e 'cli_volume_size=100'" variable +# * This does a GP2 by default. Support for Provisioned IOPS has not been added +# * This will assign the new volume to /dev/xvdc. This is not variablized, yet. +# * This can be done with NO downtime on the host +# * This playbook assumes that there is a Logical Volume that is installed and called "docker-pool". This is +# the LV that gets created via the "docker-storage-setup" command +# + +- name: Grow the docker volume group + hosts: "tag_Name_{{ cli_tag_name }}" + user: root + connection: ssh + gather_facts: no + + vars: + cli_volume_type: gp2 + cli_volume_size: 55 +# cli_volume_iops: "{{ 30 * cli_volume_size }}" + + pre_tasks: + - fail: + msg: "This playbook requires {{item}} to be set." + when: "{{ item }} is not defined or {{ item }} == ''" + with_items: + - cli_tag_name + - cli_volume_size + + - debug: + var: hosts + + - name: start docker + service: + name: docker + state: started + + - name: Determine if Storage Driver (docker info) is devicemapper + shell: docker info | grep 'Storage Driver:.*devicemapper' + register: device_mapper_check + ignore_errors: yes + + - debug: + var: device_mapper_check + + - name: fail if we don't detect devicemapper + fail: + msg: The "Storage Driver" in "docker info" is not set to "devicemapper"! Please investigate manually. + when: device_mapper_check.rc == 1 + + # docker-storage-setup creates a docker-pool as the lvm. I am using docker-pool lvm to test + # and find the volume group. + - name: Attempt to find the Volume Group that docker is using + shell: lvs | grep docker-pool | awk '{print $2}' + register: docker_vg_name + ignore_errors: yes + + - debug: + var: docker_vg_name + + - name: fail if we don't find a docker volume group + fail: + msg: Unable to find docker volume group. Please investigate manually. + when: docker_vg_name.stdout_lines|length != 1 + + # docker-storage-setup creates a docker-pool as the lvm. I am using docker-pool lvm to test + # and find the physical volume. + - name: Attempt to find the Phyisical Volume that docker is using + shell: "pvs | grep {{ docker_vg_name.stdout }} | awk '{print $1}'" + register: docker_pv_name + ignore_errors: yes + + - debug: + var: docker_pv_name + + - name: fail if we don't find a docker physical volume + fail: + msg: Unable to find docker physical volume. Please investigate manually. + when: docker_pv_name.stdout_lines|length != 1 + + + - name: get list of volumes from AWS + delegate_to: localhost + ec2_vol: + state: list + instance: "{{ ec2_id }}" + region: "{{ ec2_region }}" + register: attached_volumes + + - debug: var=attached_volumes + + - name: get volume id of current docker volume + set_fact: + old_docker_volume_id: "{{ attached_volumes.volumes | translate_volume_name(docker_pv_name.stdout) }}" + + - debug: var=old_docker_volume_id + + - name: check to see if /dev/xvdc exists + command: test -e /dev/xvdc + register: xvdc_check + ignore_errors: yes + + - debug: var=xvdc_check + + - name: fail if /dev/xvdc already exists + fail: + msg: /dev/xvdc already exists. Please investigate + when: xvdc_check.rc == 0 + + - name: Create a volume and attach it + delegate_to: localhost + ec2_vol: + state: present + instance: "{{ ec2_id }}" + region: "{{ ec2_region }}" + volume_size: "{{ cli_volume_size | default(30, True)}}" + volume_type: "{{ cli_volume_type }}" + device_name: /dev/xvdc + register: create_volume + + - debug: var=create_volume + + - name: Fail when problems creating volumes and attaching + fail: + msg: "Failed to create or attach volume msg: {{ create_volume.msg }}" + when: create_volume.msg is defined + + - name: tag the vol with a name + delegate_to: localhost + ec2_tag: region={{ ec2_region }} resource={{ create_volume.volume_id }} + args: + tags: + Name: "{{ ec2_tag_Name }}" + env: "{{ ec2_tag_environment }}" + register: voltags + + - name: check for attached drive + command: test -b /dev/xvdc + register: attachment_check + until: attachment_check.rc == 0 + retries: 30 + delay: 2 + + - name: partition the new drive and make it lvm + command: parted /dev/xvdc --script -- mklabel msdos mkpart primary 0% 100% set 1 lvm + + - name: pvcreate /dev/xvdc + command: pvcreate /dev/xvdc1 + + - name: Extend the docker volume group + command: vgextend "{{ docker_vg_name.stdout }}" /dev/xvdc1 + + - name: pvmove onto new volume + command: "pvmove {{ docker_pv_name.stdout }} /dev/xvdc1" + async: 43200 + poll: 10 + + - name: Remove the old docker drive from the volume group + command: "vgreduce {{ docker_vg_name.stdout }} {{ docker_pv_name.stdout }}" + + - name: Remove the pv from the old drive + command: "pvremove {{ docker_pv_name.stdout }}" + + - name: Extend the docker lvm + command: "lvextend -l '90%VG' /dev/{{ docker_vg_name.stdout }}/docker-pool" + + - name: detach old docker volume + delegate_to: localhost + ec2_vol: + region: "{{ ec2_region }}" + id: "{{ old_docker_volume_id }}" + instance: None + + - name: tag the old vol valid label + delegate_to: localhost + ec2_tag: region={{ ec2_region }} resource={{old_docker_volume_id}} + args: + tags: + Name: "{{ ec2_tag_Name }} REMOVE ME" + register: voltags + + - name: Update the /etc/sysconfig/docker-storage-setup with new device + lineinfile: + dest: /etc/sysconfig/docker-storage-setup + regexp: ^DEVS= + line: DEVS=/dev/xvdc diff --git a/playbooks/adhoc/noc/create_host.yml b/playbooks/adhoc/noc/create_host.yml new file mode 100644 index 000000000..d250e6e69 --- /dev/null +++ b/playbooks/adhoc/noc/create_host.yml @@ -0,0 +1,55 @@ +--- +- name: 'Create a host object in zabbix' + hosts: localhost + gather_facts: no + roles: + - os_zabbix + post_tasks: + + - zbxapi: + server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php + zbx_class: Template + state: list + params: + host: ctr_test_kwoodson + filter: + host: + - ctr_kwoodson_test_tmpl + + register: tmpl_results + + - debug: var=tmpl_results + +#ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml +- name: 'Create a host object in zabbix' + hosts: localhost + gather_facts: no + roles: + - os_zabbix + post_tasks: + + - zbxapi: + server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php + zbx_class: Host + state: absent + params: + host: ctr_test_kwoodson + interfaces: + - type: 1 + main: 1 + useip: 1 + ip: 127.0.0.1 + dns: "" + port: 10050 + groups: + - groupid: 1 + templates: "{{ tmpl_results.results | oo_collect('templateid') | oo_build_zabbix_list_dict('templateid') }}" + output: extend + filter: + host: + - ctr_test_kwoodson + + register: host_results + + - debug: var=host_results + diff --git a/playbooks/adhoc/noc/create_maintenance.yml b/playbooks/adhoc/noc/create_maintenance.yml new file mode 100644 index 000000000..c0ec57ce1 --- /dev/null +++ b/playbooks/adhoc/noc/create_maintenance.yml @@ -0,0 +1,36 @@ +--- +#ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml +- name: 'Create a maintenace object in zabbix' + hosts: localhost + gather_facts: no + roles: + - os_zabbix + vars: + oo_hostids: '' + oo_groupids: '' + post_tasks: + - assert: + that: oo_desc is defined + + - zbxapi: + server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php + zbx_class: Maintenance + state: present + params: + name: "{{ oo_name }}" + description: "{{ oo_desc }}" + active_since: "{{ oo_start }}" + active_till: "{{ oo_stop }}" + maintenance_type: "0" + output: extend + hostids: "{{ oo_hostids.split(',') | default([]) }}" +#groupids: "{{ oo_groupids.split(',') | default([]) }}" + timeperiods: + - start_time: "{{ oo_start }}" + period: "{{ oo_stop }}" + selectTimeperiods: extend + + register: maintenance + + - debug: var=maintenance + diff --git a/playbooks/adhoc/noc/get_zabbix_problems.yml b/playbooks/adhoc/noc/get_zabbix_problems.yml index 02bffc1d2..4b94fa228 100644 --- a/playbooks/adhoc/noc/get_zabbix_problems.yml +++ b/playbooks/adhoc/noc/get_zabbix_problems.yml @@ -11,7 +11,7 @@ - zbxapi: server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php zbx_class: Trigger - action: get + state: list params: only_true: true output: extend diff --git a/playbooks/adhoc/s3_registry/s3_registry.j2 b/playbooks/adhoc/s3_registry/s3_registry.j2 new file mode 100644 index 000000000..acfa89515 --- /dev/null +++ b/playbooks/adhoc/s3_registry/s3_registry.j2 @@ -0,0 +1,20 @@ +version: 0.1 +log: + level: debug +http: + addr: :5000 +storage: + cache: + layerinfo: inmemory + s3: + accesskey: {{ aws_access_key }} + secretkey: {{ aws_secret_key }} + region: us-east-1 + bucket: {{ clusterid }}-docker + encrypt: true + secure: true + v4auth: true + rootdirectory: /registry +middleware: + repository: + - name: openshift diff --git a/playbooks/adhoc/s3_registry/s3_registry.yml b/playbooks/adhoc/s3_registry/s3_registry.yml new file mode 100644 index 000000000..4dcef1a42 --- /dev/null +++ b/playbooks/adhoc/s3_registry/s3_registry.yml @@ -0,0 +1,71 @@ +--- +# This playbook creates an S3 bucket named after your cluster and configures the docker-registry service to use the bucket as its backend storage. +# Usage: +# ansible-playbook s3_registry.yml -e clusterid="mycluster" +# +# The AWS access/secret keys should be the keys of a separate user (not your main user), containing only the necessary S3 access role. +# The 'clusterid' is the short name of your cluster. + +- hosts: tag_env-host-type_{{ clusterid }}-openshift-master + remote_user: root + gather_facts: False + + vars: + aws_access_key: "{{ lookup('env', 'S3_ACCESS_KEY_ID') }}" + aws_secret_key: "{{ lookup('env', 'S3_SECRET_ACCESS_KEY') }}" + + tasks: + + - name: Check for AWS creds + fail: + msg: "Couldn't find {{ item }} creds in ENV" + when: "{{ item }} == ''" + with_items: + - aws_access_key + - aws_secret_key + + - name: Scale down registry + command: oc scale --replicas=0 dc/docker-registry + + - name: Create S3 bucket + local_action: + module: s3 bucket="{{ clusterid }}-docker" mode=create + + - name: Set up registry environment variable + command: oc env dc/docker-registry REGISTRY_CONFIGURATION_PATH=/etc/registryconfig/config.yml + + - name: Generate docker registry config + template: src="s3_registry.j2" dest="/root/config.yml" owner=root mode=0600 + + - name: Determine if new secrets are needed + command: oc get secrets + register: secrets + + - name: Create registry secrets + command: oc secrets new dockerregistry /root/config.yml + when: "'dockerregistry' not in secrets.stdout" + + - name: Determine if service account contains secrets + command: oc describe serviceaccount/registry + register: serviceaccount + + - name: Add secrets to registry service account + command: oc secrets add serviceaccount/registry secrets/dockerregistry + when: "'dockerregistry' not in serviceaccount.stdout" + + - name: Determine if deployment config contains secrets + command: oc volume dc/docker-registry --list + register: dc + + - name: Add secrets to registry deployment config + command: oc volume dc/docker-registry --add --name=dockersecrets -m /etc/registryconfig --type=secret --secret-name=dockerregistry + when: "'dockersecrets' not in dc.stdout" + + - name: Wait for deployment config to take effect before scaling up + pause: seconds=30 + + - name: Scale up registry + command: oc scale --replicas=1 dc/docker-registry + + - name: Delete temporary config file + file: path=/root/config.yml state=absent diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml new file mode 100644 index 000000000..7d1544be8 --- /dev/null +++ b/playbooks/adhoc/uninstall.yml @@ -0,0 +1,145 @@ +# This deletes *ALL* Origin, Atomic Enterprise Platform and OpenShift +# Enterprise content installed by ansible. This includes: +# +# configuration +# containers +# example templates and imagestreams +# images +# RPMs +--- +- hosts: + - OSEv3:children + + sudo: yes + + tasks: + - name: Detecting Operating System + shell: ls /run/ostree-booted + ignore_errors: yes + failed_when: false + register: ostree_output + + - set_fact: + is_atomic: "{{ ostree_output.rc == 0 }}" + + - service: name={{ item }} state=stopped + with_items: + - atomic-enterprise-master + - atomic-enterprise-node + - atomic-openshift-master + - atomic-openshift-master-api + - atomic-openshift-master-controllers + - atomic-openshift-node + - etcd + - openshift-master + - openshift-master-api + - openshift-master-controllers + - openshift-node + - openvswitch + - origin-master + - origin-master-api + - origin-master-controllers + - origin-node + + - yum: name={{ item }} state=absent + when: not is_atomic | bool + with_items: + - atomic-enterprise + - atomic-enterprise-master + - atomic-enterprise-node + - atomic-enterprise-sdn-ovs + - atomic-openshift + - atomic-openshift-clients + - atomic-openshift-master + - atomic-openshift-node + - atomic-openshift-sdn-ovs + - etcd + - openshift + - openshift-master + - openshift-node + - openshift-sdn + - openshift-sdn-ovs + - openvswitch + - origin + - origin-master + - origin-node + - origin-sdn-ovs + - tuned-profiles-atomic-enterprise-node + - tuned-profiles-atomic-openshift-node + - tuned-profiles-openshift-node + - tuned-profiles-origin-node + + - shell: systemctl reset-failed + changed_when: False + + - shell: systemctl daemon-reload + changed_when: False + + - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true + changed_when: False + + - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true + changed_when: False + + - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true + changed_when: False + + - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node + changed_when: False + failed_when: False + with_items: + - openshift-enterprise + - atomic-enterprise + - origin + + - shell: docker ps -a | grep Exited | grep "{{ item }}" | awk '{print $1}' + changed_when: False + failed_when: False + register: exited_containers_to_delete + with_items: + - aep3/aep + - openshift3/ose + - openshift/origin + + - shell: "docker rm {{ item.stdout_lines | join(' ') }}" + changed_when: False + failed_when: False + with_items: "{{ exited_containers_to_delete.results }}" + + - shell: docker images | grep {{ item }} | awk '{ print $3 }' + changed_when: False + failed_when: False + register: images_to_delete + with_items: + - registry.access.redhat.com/openshift3 + - registry.access.redhat.com/aep3 + - docker.io/openshift + + - shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}" + changed_when: False + failed_when: False + with_items: "{{ images_to_delete.results }}" + + - file: path={{ item }} state=absent + with_items: + - /etc/ansible/facts.d/openshift.fact + - /etc/atomic-enterprise + - /etc/etcd + - /etc/openshift + - /etc/openshift-sdn + - /etc/origin + - /etc/sysconfig/atomic-enterprise-master + - /etc/sysconfig/atomic-enterprise-node + - /etc/sysconfig/atomic-openshift-master + - /etc/sysconfig/atomic-openshift-node + - /etc/sysconfig/openshift-master + - /etc/sysconfig/openshift-node + - /etc/sysconfig/origin-master + - /etc/sysconfig/origin-node + - /root/.kube + - "~{{ ansible_ssh_user }}/.kube" + - /usr/share/openshift/examples + - /var/lib/atomic-enterprise + - /var/lib/etcd + - /var/lib/openshift + - /var/lib/origin diff --git a/playbooks/adhoc/upgrades/README.md b/playbooks/adhoc/upgrades/README.md new file mode 100644 index 000000000..6de8a970f --- /dev/null +++ b/playbooks/adhoc/upgrades/README.md @@ -0,0 +1,21 @@ +# [NOTE] +This playbook will re-run installation steps overwriting any local +modifications. You should ensure that your inventory has been updated with any +modifications you've made after your initial installation. If you find any items +that cannot be configured via ansible please open an issue at +https://github.com/openshift/openshift-ansible + +# Overview +This playbook is available as a technical preview. It currently performs the +following steps. + + * Upgrade and restart master services + * Upgrade and restart node services + * Applies latest configuration by re-running the installation playbook + * Applies the latest cluster policies + * Updates the default router if one exists + * Updates the default registry if one exists + * Updates image streams and quickstarts + +# Usage +ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/adhoc/upgrades/upgrade.yml diff --git a/playbooks/adhoc/upgrades/filter_plugins b/playbooks/adhoc/upgrades/filter_plugins new file mode 120000 index 000000000..b0b7a3414 --- /dev/null +++ b/playbooks/adhoc/upgrades/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins/
\ No newline at end of file diff --git a/playbooks/adhoc/upgrades/lookup_plugins b/playbooks/adhoc/upgrades/lookup_plugins new file mode 120000 index 000000000..73cafffe5 --- /dev/null +++ b/playbooks/adhoc/upgrades/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins/
\ No newline at end of file diff --git a/playbooks/adhoc/upgrades/roles b/playbooks/adhoc/upgrades/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/playbooks/adhoc/upgrades/roles @@ -0,0 +1 @@ +../../../roles/
\ No newline at end of file diff --git a/playbooks/adhoc/upgrades/upgrade.yml b/playbooks/adhoc/upgrades/upgrade.yml new file mode 100644 index 000000000..ae1d0127c --- /dev/null +++ b/playbooks/adhoc/upgrades/upgrade.yml @@ -0,0 +1,138 @@ +--- +- name: Upgrade base package on masters + hosts: masters + roles: + - openshift_facts + vars: + openshift_version: "{{ openshift_pkg_version | default('') }}" + tasks: + - name: Upgrade base package + yum: pkg={{ openshift.common.service_type }}{{ openshift_version }} state=latest + +- name: Re-Run cluster configuration to apply latest configuration changes + include: ../../common/openshift-cluster/config.yml + vars: + g_etcd_group: "{{ 'etcd' }}" + g_masters_group: "{{ 'masters' }}" + g_nodes_group: "{{ 'nodes' }}" + openshift_cluster_id: "{{ cluster_id | default('default') }}" + openshift_deployment_type: "{{ deployment_type }}" + +- name: Upgrade masters + hosts: masters + vars: + openshift_version: "{{ openshift_pkg_version | default('') }}" + tasks: + - name: Upgrade master packages + yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=latest + - name: Restart master services + service: name="{{ openshift.common.service_type}}-master" state=restarted + +- name: Upgrade nodes + hosts: nodes + vars: + openshift_version: "{{ openshift_pkg_version | default('') }}" + tasks: + - name: Upgrade node packages + yum: pkg={{ openshift.common.service_type }}-node{{ openshift_version }} state=latest + - name: Restart node services + service: name="{{ openshift.common.service_type }}-node" state=restarted + +- name: Determine new master version + hosts: oo_first_master + tasks: + - name: Determine new version + command: > + rpm -q --queryformat '%{version}' {{ openshift.common.service_type }}-master + register: _new_version + +- name: Ensure AOS 3.0.2 or Origin 1.0.6 + hosts: oo_first_master + tasks: + fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later + when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') ) + +- name: Update cluster policy + hosts: oo_first_master + tasks: + - name: oadm policy reconcile-cluster-roles --confirm + command: > + {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + policy reconcile-cluster-roles --confirm + +- name: Update cluster policy bindings + hosts: oo_first_master + tasks: + - name: oadm policy reconcile-cluster-role-bindings --confirm + command: > + {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + policy reconcile-cluster-role-bindings + --exclude-groups=system:authenticated + --exclude-groups=system:unauthenticated + --exclude-users=system:anonymous + --additive-only=true --confirm + when: ( _new_version.stdout | version_compare('1.0.6', '>') and _new_version.stdout | version_compare('3.0','<') ) or _new_version.stdout | version_compare('3.0.2','>') + +- name: Upgrade default router + hosts: oo_first_master + vars: + - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}" + - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig" + tasks: + - name: Check for default router + command: > + {{ oc_cmd }} get -n default dc/router + register: _default_router + failed_when: false + changed_when: false + - name: Check for allowHostNetwork and allowHostPorts + when: _default_router.rc == 0 + shell: > + {{ oc_cmd }} get -o yaml scc/privileged | /usr/bin/grep -e allowHostPorts -e allowHostNetwork + register: _scc + - name: Grant allowHostNetwork and allowHostPorts + when: + - _default_router.rc == 0 + - "'false' in _scc.stdout" + command: > + {{ oc_cmd }} patch scc/privileged -p '{"allowHostPorts":true,"allowHostNetwork":true}' --loglevel=9 + - name: Update deployment config to 1.0.4/3.0.1 spec + when: _default_router.rc == 0 + command: > + {{ oc_cmd }} patch dc/router -p + '{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}' + - name: Switch to hostNetwork=true + when: _default_router.rc == 0 + command: > + {{ oc_cmd }} patch dc/router -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}' + - name: Update router image to current version + when: _default_router.rc == 0 + command: > + {{ oc_cmd }} patch dc/router -p + '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}' + +- name: Upgrade default + hosts: oo_first_master + vars: + - registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}" + - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig" + tasks: + - name: Check for default registry + command: > + {{ oc_cmd }} get -n default dc/docker-registry + register: _default_registry + failed_when: false + changed_when: false + - name: Update registry image to current version + when: _default_registry.rc == 0 + command: > + {{ oc_cmd }} patch dc/docker-registry -p + '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}' + +- name: Update image streams and templates + hosts: oo_first_master + vars: + openshift_examples_import_command: "update" + openshift_deployment_type: "{{ deployment_type }}" + roles: + - openshift_examples diff --git a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml new file mode 100644 index 000000000..1e884240a --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml @@ -0,0 +1,58 @@ +--- +- hosts: localhost + gather_facts: no + vars: + g_server: http://localhost:8080/zabbix/api_jsonrpc.php + g_user: '' + g_password: '' + + roles: + - lib_zabbix + + post_tasks: + - name: CLEAN List template for heartbeat + zbx_template: + zbx_server: "{{ g_server }}" + zbx_user: "{{ g_user }}" + zbx_password: "{{ g_password }}" + state: list + name: 'Template Heartbeat' + register: templ_heartbeat + + - name: CLEAN List template app zabbix server + zbx_template: + zbx_server: "{{ g_server }}" + zbx_user: "{{ g_user }}" + zbx_password: "{{ g_password }}" + state: list + name: 'Template App Zabbix Server' + register: templ_zabbix_server + + - name: CLEAN List template app zabbix server + zbx_template: + zbx_server: "{{ g_server }}" + zbx_user: "{{ g_user }}" + zbx_password: "{{ g_password }}" + state: list + name: 'Template App Zabbix Agent' + register: templ_zabbix_agent + + - name: CLEAN List all templates + zbx_template: + zbx_server: "{{ g_server }}" + zbx_user: "{{ g_user }}" + zbx_password: "{{ g_password }}" + state: list + register: templates + + - debug: var=templ_heartbeat.results + + - name: Remove templates if heartbeat template is missing + zbx_template: + zbx_server: "{{ g_server }}" + zbx_user: "{{ g_user }}" + zbx_password: "{{ g_password }}" + name: "{{ item }}" + state: absent + with_items: "{{ templates.results | difference(templ_zabbix_agent.results) | difference(templ_zabbix_server.results) | oo_collect('host') }}" + when: templ_heartbeat.results | length == 0 diff --git a/playbooks/adhoc/zabbix_setup/filter_plugins b/playbooks/adhoc/zabbix_setup/filter_plugins new file mode 120000 index 000000000..b0b7a3414 --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins/
\ No newline at end of file diff --git a/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml new file mode 100755 index 000000000..0fe65b338 --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml @@ -0,0 +1,7 @@ +#!/usr/bin/env ansible-playbook +--- +- include: clean_zabbix.yml + vars: + g_server: http://localhost/zabbix/api_jsonrpc.php + g_user: Admin + g_password: zabbix diff --git a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml new file mode 100755 index 000000000..e2b8150c6 --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml @@ -0,0 +1,13 @@ +#!/usr/bin/ansible-playbook +--- +- hosts: localhost + gather_facts: no + vars: + g_server: http://localhost/zabbix/api_jsonrpc.php + g_user: Admin + g_password: zabbix + roles: + - role: os_zabbix + ozb_server: "{{ g_server }}" + ozb_user: "{{ g_user }}" + ozb_password: "{{ g_password }}" diff --git a/playbooks/adhoc/zabbix_setup/roles b/playbooks/adhoc/zabbix_setup/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/roles @@ -0,0 +1 @@ +../../../roles
\ No newline at end of file |