summaryrefslogtreecommitdiffstats
path: root/playbooks
diff options
context:
space:
mode:
authorKenny Woodson <kwoodson@redhat.com>2015-10-29 11:14:51 -0400
committerKenny Woodson <kwoodson@redhat.com>2015-10-29 11:14:51 -0400
commit9bbaa824da5e1a049cdec1a6523c3841d713386c (patch)
tree93e80f1577ad0f2f5f8931b493c50cd9aa657c77 /playbooks
parent15df494fb781dd1509854eeb366e981930b52c22 (diff)
parent16d1bce0be2f8c3942489630adcb7030aecadc55 (diff)
downloadopenshift-9bbaa824da5e1a049cdec1a6523c3841d713386c.tar.gz
openshift-9bbaa824da5e1a049cdec1a6523c3841d713386c.tar.bz2
openshift-9bbaa824da5e1a049cdec1a6523c3841d713386c.tar.xz
openshift-9bbaa824da5e1a049cdec1a6523c3841d713386c.zip
Merge pull request #763 from openshift/master
Merge master into prod.
Diffstat (limited to 'playbooks')
-rw-r--r--playbooks/adhoc/atomic_openshift_tutorial_reset.yml29
-rw-r--r--playbooks/adhoc/create_pv/create_pv.yaml159
-rw-r--r--playbooks/adhoc/create_pv/pv-template.j216
-rw-r--r--playbooks/adhoc/docker_loopback_to_lvm/docker-storage-setup2
-rw-r--r--playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml142
-rwxr-xr-xplaybooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml115
-rw-r--r--playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml69
-rw-r--r--playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py41
-rw-r--r--playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml206
-rw-r--r--playbooks/adhoc/noc/create_host.yml55
-rw-r--r--playbooks/adhoc/noc/create_maintenance.yml36
-rw-r--r--playbooks/adhoc/noc/get_zabbix_problems.yml2
-rw-r--r--playbooks/adhoc/s3_registry/s3_registry.j220
-rw-r--r--playbooks/adhoc/s3_registry/s3_registry.yml71
-rw-r--r--playbooks/adhoc/uninstall.yml145
-rw-r--r--playbooks/adhoc/upgrades/README.md21
l---------playbooks/adhoc/upgrades/filter_plugins1
l---------playbooks/adhoc/upgrades/lookup_plugins1
l---------playbooks/adhoc/upgrades/roles1
-rw-r--r--playbooks/adhoc/upgrades/upgrade.yml138
-rw-r--r--playbooks/adhoc/zabbix_setup/clean_zabbix.yml58
l---------playbooks/adhoc/zabbix_setup/filter_plugins1
-rwxr-xr-xplaybooks/adhoc/zabbix_setup/oo-clean-zaio.yml7
-rwxr-xr-xplaybooks/adhoc/zabbix_setup/oo-config-zaio.yml13
l---------playbooks/adhoc/zabbix_setup/roles (renamed from playbooks/aws/openshift-master/roles)0
-rw-r--r--playbooks/aws/ansible-tower/launch.yml2
-rw-r--r--playbooks/aws/openshift-cluster/config.yml36
-rw-r--r--playbooks/aws/openshift-cluster/launch.yml36
-rw-r--r--playbooks/aws/openshift-cluster/library/ec2_ami_find.py2
l---------playbooks/aws/openshift-cluster/lookup_plugins1
-rw-r--r--playbooks/aws/openshift-cluster/tasks/launch_instances.yml79
-rw-r--r--playbooks/aws/openshift-cluster/templates/user_data.j242
-rw-r--r--playbooks/aws/openshift-cluster/terminate.yml61
-rw-r--r--playbooks/aws/openshift-cluster/update.yml4
-rw-r--r--playbooks/aws/openshift-cluster/vars.online.int.yml12
-rw-r--r--playbooks/aws/openshift-cluster/vars.online.prod.yml12
-rw-r--r--playbooks/aws/openshift-cluster/vars.online.stage.yml12
-rw-r--r--playbooks/aws/openshift-cluster/vars.yml12
-rw-r--r--playbooks/aws/openshift-master/config.yml19
-rw-r--r--playbooks/aws/openshift-master/launch.yml70
-rw-r--r--playbooks/aws/openshift-master/terminate.yml2
-rw-r--r--playbooks/aws/openshift-node/config.yml26
-rw-r--r--playbooks/aws/openshift-node/launch.yml72
-rw-r--r--playbooks/aws/openshift-node/terminate.yml2
-rw-r--r--playbooks/aws/terminate.yml64
-rw-r--r--playbooks/byo/config.yml8
l---------playbooks/byo/lookup_plugins1
-rw-r--r--playbooks/byo/openshift-cluster/config.yml9
l---------playbooks/byo/openshift-cluster/filter_plugins (renamed from playbooks/aws/openshift-master/filter_plugins)0
l---------playbooks/byo/openshift-cluster/lookup_plugins1
l---------playbooks/byo/openshift-cluster/roles (renamed from playbooks/aws/openshift-node/roles)0
-rw-r--r--playbooks/byo/openshift-master/config.yml15
l---------playbooks/byo/openshift-master/filter_plugins1
l---------playbooks/byo/openshift-master/roles1
-rw-r--r--playbooks/byo/openshift-node/config.yml23
l---------playbooks/byo/openshift-node/filter_plugins1
l---------playbooks/byo/openshift-node/roles1
-rw-r--r--playbooks/byo/openshift_facts.yml2
-rw-r--r--playbooks/byo/rhel_subscribe.yml12
-rw-r--r--playbooks/byo/vagrant.yml4
-rw-r--r--playbooks/common/openshift-cluster/config.yml70
-rw-r--r--playbooks/common/openshift-cluster/create_services.yml8
l---------playbooks/common/openshift-cluster/lookup_plugins1
-rw-r--r--playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml13
-rw-r--r--playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml6
-rw-r--r--playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml12
-rw-r--r--playbooks/common/openshift-cluster/update_repos_and_packages.yml5
-rw-r--r--playbooks/common/openshift-etcd/config.yml97
l---------playbooks/common/openshift-etcd/filter_plugins (renamed from playbooks/aws/openshift-node/filter_plugins)0
l---------playbooks/common/openshift-etcd/lookup_plugins1
l---------playbooks/common/openshift-etcd/roles1
-rw-r--r--playbooks/common/openshift-etcd/service.yml18
-rw-r--r--playbooks/common/openshift-master/config.yml264
l---------playbooks/common/openshift-master/lookup_plugins1
-rw-r--r--playbooks/common/openshift-master/service.yml4
-rw-r--r--playbooks/common/openshift-node/config.yml57
l---------playbooks/common/openshift-node/lookup_plugins1
-rw-r--r--playbooks/common/openshift-node/service.yml4
-rw-r--r--playbooks/gce/openshift-cluster/config.yml41
-rw-r--r--playbooks/gce/openshift-cluster/join_node.yml49
-rw-r--r--playbooks/gce/openshift-cluster/launch.yml47
-rw-r--r--playbooks/gce/openshift-cluster/list.yml4
l---------playbooks/gce/openshift-cluster/lookup_plugins1
-rw-r--r--playbooks/gce/openshift-cluster/tasks/launch_instances.yml31
-rw-r--r--playbooks/gce/openshift-cluster/terminate.yml64
-rw-r--r--playbooks/gce/openshift-cluster/update.yml4
-rw-r--r--playbooks/gce/openshift-cluster/vars.yml8
-rw-r--r--playbooks/gce/openshift-master/config.yml18
l---------playbooks/gce/openshift-master/filter_plugins1
-rw-r--r--playbooks/gce/openshift-master/launch.yml51
l---------playbooks/gce/openshift-master/roles1
-rw-r--r--playbooks/gce/openshift-master/terminate.yml35
-rw-r--r--playbooks/gce/openshift-node/config.yml25
l---------playbooks/gce/openshift-node/filter_plugins1
-rw-r--r--playbooks/gce/openshift-node/launch.yml51
l---------playbooks/gce/openshift-node/roles1
-rw-r--r--playbooks/gce/openshift-node/terminate.yml35
-rw-r--r--playbooks/libvirt/openshift-cluster/config.yml35
-rw-r--r--playbooks/libvirt/openshift-cluster/launch.yml26
l---------playbooks/libvirt/openshift-cluster/lookup_plugins1
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml8
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/network.xml2
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/user-data2
-rw-r--r--playbooks/libvirt/openshift-cluster/terminate.yml17
-rw-r--r--playbooks/libvirt/openshift-cluster/update.yml4
-rw-r--r--playbooks/libvirt/openshift-cluster/vars.yml18
-rw-r--r--playbooks/openstack/openshift-cluster/config.yml35
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack.yaml370
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack.yml149
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml130
-rw-r--r--playbooks/openstack/openshift-cluster/launch.yml125
l---------playbooks/openstack/openshift-cluster/lookup_plugins1
-rw-r--r--playbooks/openstack/openshift-cluster/tasks/configure_openstack.yml27
-rw-r--r--playbooks/openstack/openshift-cluster/tasks/launch_instances.yml48
-rw-r--r--playbooks/openstack/openshift-cluster/terminate.yml40
-rw-r--r--playbooks/openstack/openshift-cluster/update.yml4
-rw-r--r--playbooks/openstack/openshift-cluster/vars.yml44
117 files changed, 3053 insertions, 1058 deletions
diff --git a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml
new file mode 100644
index 000000000..c14d08e87
--- /dev/null
+++ b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml
@@ -0,0 +1,29 @@
+# This deletes *ALL* Docker images, and uninstalls OpenShift and
+# Atomic Enterprise RPMs. It is primarily intended for use
+# with the tutorial as well as for developers to reset state.
+#
+---
+- include: uninstall.yml
+
+- hosts:
+ - OSEv3:children
+
+ sudo: yes
+
+ tasks:
+ - shell: docker ps -a -q | xargs docker stop
+ changed_when: False
+ failed_when: False
+
+ - shell: docker ps -a -q| xargs docker rm
+ changed_when: False
+ failed_when: False
+
+ - shell: docker images -q |xargs docker rmi
+ changed_when: False
+ failed_when: False
+
+ - user: name={{ item }} state=absent remove=yes
+ with_items:
+ - alice
+ - joe
diff --git a/playbooks/adhoc/create_pv/create_pv.yaml b/playbooks/adhoc/create_pv/create_pv.yaml
new file mode 100644
index 000000000..4f0ef7a75
--- /dev/null
+++ b/playbooks/adhoc/create_pv/create_pv.yaml
@@ -0,0 +1,159 @@
+---
+#example run:
+# ansible-playbook -e "cli_volume_size=1" \
+# -e "cli_device_name=/dev/xvdf" \
+# -e "cli_hosttype=master" \
+# -e "cli_environment=ops" \
+# create_pv.yaml
+# FIXME: we need to change "environment" to "clusterid" as that's what it really is now.
+#
+- name: Create a volume and attach it to master
+ hosts: localhost
+ gather_facts: no
+ vars:
+ cli_volume_type: gp2
+ cli_volume_iops: ''
+ oo_name: "{{ groups['tag_host-type_' ~ cli_hosttype] |
+ intersect(groups['tag_environment_' ~ cli_environment]) |
+ first }}"
+ pre_tasks:
+ - fail:
+ msg: "This playbook requires {{item}} to be set."
+ when: "{{ item }} is not defined or {{ item }} == ''"
+ with_items:
+ - cli_volume_size
+ - cli_device_name
+ - cli_hosttype
+ - cli_environment
+
+ - name: set oo_name fact
+ set_fact:
+ oo_name: "{{ oo_name }}"
+
+
+ - name: Select a single master to run this on
+ add_host:
+ hostname: "{{ oo_name }}"
+ ansible_ssh_host: "{{ hostvars[oo_name].ec2_public_dns_name }}"
+ groups: oo_master
+
+ - name: Create a volume and attach it
+ ec2_vol:
+ state: present
+ instance: "{{ hostvars[oo_name]['ec2_id'] }}"
+ region: "{{ hostvars[oo_name]['ec2_region'] }}"
+ volume_size: "{{ cli_volume_size }}"
+ volume_type: "{{ cli_volume_type }}"
+ device_name: "{{ cli_device_name }}"
+ iops: "{{ cli_volume_iops }}"
+ register: vol
+
+ - debug: var=vol
+
+ - name: tag the vol with a name
+ ec2_tag: region={{ hostvars[oo_name]['ec2_region'] }} resource={{vol.volume_id}}
+ args:
+ tags:
+ Name: "pv-{{ hostvars[oo_name]['ec2_tag_Name'] }}"
+ env: "{{cli_environment}}"
+ register: voltags
+
+ - debug: var=voltags
+
+- name: Configure the drive
+ gather_facts: no
+ hosts: oo_master
+ user: root
+ connection: ssh
+ vars:
+ pv_tmpdir: /tmp/persistentvolumes
+
+ post_tasks:
+ - name: Setting facts for template
+ set_fact:
+ pv_name: "pv-{{cli_volume_size}}-{{ hostvars[hostvars.localhost.oo_name]['ec2_tag_Name'] }}-{{hostvars.localhost.vol.volume_id }}"
+ vol_az: "{{ hostvars[hostvars.localhost.oo_name]['ec2_placement'] }}"
+ vol_id: "{{ hostvars.localhost.vol.volume_id }}"
+ vol_size: "{{ cli_volume_size }}"
+ pv_mntdir: "{{ pv_tmpdir }}/mnt-{{ 1000 | random }}"
+
+ - set_fact:
+ pv_template: "{{ pv_tmpdir }}/{{ pv_name }}.yaml"
+
+ - name: "Mkdir {{ pv_tmpdir }}"
+ file:
+ state: directory
+ path: "{{ pv_tmpdir }}"
+ mode: '0750'
+
+ - name: "Mkdir {{ pv_mntdir }}"
+ file:
+ state: directory
+ path: "{{ pv_mntdir }}"
+ mode: '0750'
+
+ - name: Create pv file from template
+ template:
+ src: ./pv-template.j2
+ dest: "{{ pv_template }}"
+ owner: root
+ mode: '0640'
+
+ - name: mkfs
+ filesystem:
+ dev: "{{ cli_device_name }}"
+ fstype: ext4
+
+ - name: Mount the dev
+ mount:
+ name: "{{ pv_mntdir }}"
+ src: "{{ cli_device_name }}"
+ fstype: ext4
+ state: mounted
+
+ - name: chgrp g+rwXs
+ file:
+ path: "{{ pv_mntdir }}"
+ mode: 'g+rwXs'
+ recurse: yes
+ seuser: system_u
+ serole: object_r
+ setype: svirt_sandbox_file_t
+ selevel: s0
+
+ - name: umount
+ mount:
+ name: "{{ pv_mntdir }}"
+ src: "{{ cli_device_name }}"
+ state: unmounted
+ fstype: ext4
+
+ - name: remove from fstab
+ mount:
+ name: "{{ pv_mntdir }}"
+ src: "{{ cli_device_name }}"
+ state: absent
+ fstype: ext4
+
+ - name: detach drive
+ delegate_to: localhost
+ ec2_vol:
+ region: "{{ hostvars[hostvars.localhost.oo_name].ec2_region }}"
+ id: "{{ hostvars.localhost.vol.volume_id }}"
+ instance: None
+
+ - name: "Remove {{ pv_mntdir }}"
+ file:
+ state: absent
+ path: "{{ pv_mntdir }}"
+
+ # We have to use the shell module because we can't set env vars with the command module.
+ - name: "Place PV into oc"
+ shell: "KUBECONFIG=/etc/openshift/master/admin.kubeconfig oc create -f {{ pv_template | quote }}"
+ register: oc_output
+
+ - debug: var=oc_output
+
+ - fail:
+ msg: "Failed to add {{ pv_template }} to master."
+ when: oc_output.rc != 0
diff --git a/playbooks/adhoc/create_pv/pv-template.j2 b/playbooks/adhoc/create_pv/pv-template.j2
new file mode 100644
index 000000000..5654ef6c4
--- /dev/null
+++ b/playbooks/adhoc/create_pv/pv-template.j2
@@ -0,0 +1,16 @@
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: {{ pv_name }}
+ labels:
+ type: ebs
+spec:
+ capacity:
+ storage: {{ vol_size }}Gi
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Recycle
+ awsElasticBlockStore:
+ volumeID: aws://{{ vol_az }}/{{ vol_id }}
+ fsType: ext4
diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker-storage-setup b/playbooks/adhoc/docker_loopback_to_lvm/docker-storage-setup
new file mode 100644
index 000000000..059058823
--- /dev/null
+++ b/playbooks/adhoc/docker_loopback_to_lvm/docker-storage-setup
@@ -0,0 +1,2 @@
+DEVS=/dev/xvdb
+VG=docker_vg
diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
new file mode 100644
index 000000000..b6a2d2f26
--- /dev/null
+++ b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
@@ -0,0 +1,142 @@
+---
+# This playbook coverts docker to go from loopback to direct-lvm (the Red Hat recommended way to run docker)
+# in AWS. This adds an additional EBS volume and creates the Volume Group on this EBS volume to use.
+#
+# To run:
+# 1. Source your AWS credentials (make sure it's the corresponding AWS account) into your environment
+# export AWS_ACCESS_KEY_ID='XXXXX'
+# export AWS_SECRET_ACCESS_KEY='XXXXXX'
+#
+# 2. run the playbook:
+# ansible-playbook -e 'cli_tag_name=<tag-name>' -e "cli_volume_size=30" docker_loopback_to_direct_lvm.yml
+#
+# Example:
+# ansible-playbook -e 'cli_tag_name=ops-master-12345' -e "cli_volume_size=30" docker_loopback_to_direct_lvm.yml
+#
+# Notes:
+# * By default this will do a 30GB volume.
+# * iops are calculated by Disk Size * 30. e.g ( 30GB * 30) = 900 iops
+# * This will remove /var/lib/docker!
+# * You may need to re-deploy docker images after this is run (like monitoring)
+#
+
+- name: Fix docker to have a provisioned iops drive
+ hosts: "tag_Name_{{ cli_tag_name }}"
+ user: root
+ connection: ssh
+ gather_facts: no
+
+ vars:
+ cli_volume_type: gp2
+ cli_volume_size: 30
+
+ pre_tasks:
+ - fail:
+ msg: "This playbook requires {{item}} to be set."
+ when: "{{ item }} is not defined or {{ item }} == ''"
+ with_items:
+ - cli_tag_name
+ - cli_volume_size
+
+ - debug:
+ var: hosts
+
+ - name: start docker
+ service:
+ name: docker
+ state: started
+
+ - name: Determine if loopback
+ shell: docker info | grep 'Data file:.*loop'
+ register: loop_device_check
+ ignore_errors: yes
+
+ - debug:
+ var: loop_device_check
+
+ - name: fail if we don't detect loopback
+ fail:
+ msg: loopback not detected! Please investigate manually.
+ when: loop_device_check.rc == 1
+
+ - name: stop zagg client monitoring container
+ service:
+ name: oso-rhel7-zagg-client
+ state: stopped
+ ignore_errors: yes
+
+ - name: stop pcp client monitoring container
+ service:
+ name: oso-f22-host-monitoring
+ state: stopped
+ ignore_errors: yes
+
+ - name: stop docker
+ service:
+ name: docker
+ state: stopped
+
+ - name: delete /var/lib/docker
+ command: rm -rf /var/lib/docker
+
+ - name: remove /var/lib/docker
+ command: rm -rf /var/lib/docker
+
+ - name: check to see if /dev/xvdb exists
+ command: test -e /dev/xvdb
+ register: xvdb_check
+ ignore_errors: yes
+
+ - debug: var=xvdb_check
+
+ - name: fail if /dev/xvdb already exists
+ fail:
+ msg: /dev/xvdb already exists. Please investigate
+ when: xvdb_check.rc == 0
+
+ - name: Create a volume and attach it
+ delegate_to: localhost
+ ec2_vol:
+ state: present
+ instance: "{{ ec2_id }}"
+ region: "{{ ec2_region }}"
+ volume_size: "{{ cli_volume_size | default(30, True)}}"
+ volume_type: "{{ cli_volume_type }}"
+ device_name: /dev/xvdb
+ register: vol
+
+ - debug: var=vol
+
+ - name: tag the vol with a name
+ delegate_to: localhost
+ ec2_tag: region={{ ec2_region }} resource={{ vol.volume_id }}
+ args:
+ tags:
+ Name: "{{ ec2_tag_Name }}"
+ env: "{{ ec2_tag_environment }}"
+ register: voltags
+
+ - name: Wait for volume to attach
+ pause:
+ seconds: 30
+
+ - name: copy the docker-storage-setup config file
+ copy:
+ src: docker-storage-setup
+ dest: /etc/sysconfig/docker-storage-setup
+ owner: root
+ group: root
+ mode: 0664
+
+ - name: docker storage setup
+ command: docker-storage-setup
+ register: setup_output
+
+ - debug: var=setup_output
+
+ - name: start docker
+ command: systemctl start docker.service
+ register: dockerstart
+
+ - debug: var=dockerstart
+
diff --git a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
new file mode 100755
index 000000000..72fcd77b3
--- /dev/null
+++ b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
@@ -0,0 +1,115 @@
+#!/usr/bin/ansible-playbook
+---
+# This playbook coverts docker to go from loopback to direct-lvm (the Red Hat recommended way to run docker).
+#
+# It requires the block device to be already provisioned and attached to the host. This is a generic playbook,
+# meant to be used for manual conversion. For AWS specific conversions, use the other playbook in this directory.
+#
+# To run:
+# ./ops-docker-loopback-to-direct-lvm.yml -e cli_host=<host to run on> -e cli_docker_device=<path to device>
+#
+# Example:
+# ./ops-docker-loopback-to-direct-lvm.yml -e cli_host=twiesttest-master-fd32 -e cli_docker_device=/dev/sdb
+#
+# Notes:
+# * This will remove /var/lib/docker!
+# * You may need to re-deploy docker images after this is run (like monitoring)
+
+- name: Fix docker to have a provisioned iops drive
+ hosts: "{{ cli_name }}"
+ user: root
+ connection: ssh
+ gather_facts: no
+
+ pre_tasks:
+ - fail:
+ msg: "This playbook requires {{item}} to be set."
+ when: "{{ item }} is not defined or {{ item }} == ''"
+ with_items:
+ - cli_docker_device
+
+ - name: start docker
+ service:
+ name: docker
+ state: started
+
+ - name: Determine if loopback
+ shell: docker info | grep 'Data file:.*loop'
+ register: loop_device_check
+ ignore_errors: yes
+
+ - debug:
+ var: loop_device_check
+
+ - name: fail if we don't detect loopback
+ fail:
+ msg: loopback not detected! Please investigate manually.
+ when: loop_device_check.rc == 1
+
+ - name: stop zagg client monitoring container
+ service:
+ name: oso-rhel7-zagg-client
+ state: stopped
+ ignore_errors: yes
+
+ - name: stop pcp client monitoring container
+ service:
+ name: oso-f22-host-monitoring
+ state: stopped
+ ignore_errors: yes
+
+ - name: "check to see if {{ cli_docker_device }} exists"
+ command: "test -e {{ cli_docker_device }}"
+ register: docker_dev_check
+ ignore_errors: yes
+
+ - debug: var=docker_dev_check
+
+ - name: "fail if {{ cli_docker_device }} doesn't exist"
+ fail:
+ msg: "{{ cli_docker_device }} doesn't exist. Please investigate"
+ when: docker_dev_check.rc != 0
+
+ - name: stop docker
+ service:
+ name: docker
+ state: stopped
+
+ - name: delete /var/lib/docker
+ command: rm -rf /var/lib/docker
+
+ - name: remove /var/lib/docker
+ command: rm -rf /var/lib/docker
+
+ - name: copy the docker-storage-setup config file
+ copy:
+ content: >
+ DEVS={{ cli_docker_device }}
+ VG=docker_vg
+ dest: /etc/sysconfig/docker-storage-setup
+ owner: root
+ group: root
+ mode: 0664
+
+ - name: docker storage setup
+ command: docker-storage-setup
+ register: setup_output
+
+ - debug: var=setup_output
+
+ - name: extend the vg
+ command: lvextend -l 90%VG /dev/docker_vg/docker-pool
+ register: extend_output
+
+ - debug: var=extend_output
+
+ - name: start docker
+ service:
+ name: docker
+ state: restarted
+
+ - name: docker info
+ command: docker info
+ register: dockerinfo
+
+ - debug: var=dockerinfo
diff --git a/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml
new file mode 100644
index 000000000..a19291a9f
--- /dev/null
+++ b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml
@@ -0,0 +1,69 @@
+---
+# This playbook attempts to cleanup unwanted docker files to help alleviate docker disk space issues.
+#
+# To run:
+#
+# 1. run the playbook:
+#
+# ansible-playbook -e 'cli_tag_name=<tag-name>' docker_storage_cleanup.yml
+#
+# Example:
+#
+# ansible-playbook -e 'cli_tag_name=ops-node-compute-12345' docker_storage_cleanup.yml
+#
+# Notes:
+# * This *should* not interfere with running docker images
+#
+
+- name: Clean up Docker Storage
+ gather_facts: no
+ hosts: "tag_Name_{{ cli_tag_name }}"
+ user: root
+ connection: ssh
+
+ pre_tasks:
+
+ - fail:
+ msg: "This playbook requires {{item}} to be set."
+ when: "{{ item }} is not defined or {{ item }} == ''"
+ with_items:
+ - cli_tag_name
+
+ - name: Ensure docker is running
+ service:
+ name: docker
+ state: started
+ enabled: yes
+
+ - name: Get docker info
+ command: docker info
+ register: docker_info
+
+ - name: Show docker info
+ debug:
+ var: docker_info.stdout_lines
+
+ - name: Remove exited and dead containers
+ shell: "docker ps -a | awk '/Exited|Dead/ {print $1}' | xargs --no-run-if-empty docker rm"
+ ignore_errors: yes
+
+ - name: Remove dangling docker images
+ shell: "docker images -q -f dangling=true | xargs --no-run-if-empty docker rmi"
+ ignore_errors: yes
+
+ - name: Remove non-running docker images
+ shell: "docker images | grep -v -e registry.access.redhat.com -e docker-registry.usersys.redhat.com -e docker-registry.ops.rhcloud.com | awk '{print $3}' | xargs --no-run-if-empty docker rmi 2>/dev/null"
+ ignore_errors: yes
+
+ # leaving off the '-t' for docker exec. With it, it doesn't work with ansible and tty support
+ - name: update zabbix docker items
+ command: docker exec -i oso-rhel7-zagg-client /usr/local/bin/cron-send-docker-metrics.py
+
+ # Get and show docker info again.
+ - name: Get docker info
+ command: docker info
+ register: docker_info
+
+ - name: Show docker info
+ debug:
+ var: docker_info.stdout_lines
diff --git a/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py b/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py
new file mode 100644
index 000000000..d0264cde9
--- /dev/null
+++ b/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py
@@ -0,0 +1,41 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+'''
+Custom filters for use in openshift-ansible
+'''
+
+import pdb
+
+
+class FilterModule(object):
+ ''' Custom ansible filters '''
+
+ @staticmethod
+ def oo_pdb(arg):
+ ''' This pops you into a pdb instance where arg is the data passed in
+ from the filter.
+ Ex: "{{ hostvars | oo_pdb }}"
+ '''
+ pdb.set_trace()
+ return arg
+
+ @staticmethod
+ def translate_volume_name(volumes, target_volume):
+ '''
+ This filter matches a device string /dev/sdX to /dev/xvdX
+ It will then return the AWS volume ID
+ '''
+ for vol in volumes:
+ translated_name = vol["attachment_set"]["device"].replace("/dev/sd", "/dev/xvd")
+ if target_volume.startswith(translated_name):
+ return vol["id"]
+
+ return None
+
+
+ def filters(self):
+ ''' returns a mapping of filters to methods '''
+ return {
+ "translate_volume_name": self.translate_volume_name,
+ }
diff --git a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
new file mode 100644
index 000000000..63d473146
--- /dev/null
+++ b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
@@ -0,0 +1,206 @@
+---
+# This playbook grows the docker VG on a node by:
+# * add a new volume
+# * add volume to the existing VG.
+# * pv move to the new volume.
+# * remove old volume
+# * detach volume
+# * mark old volume in AWS with "REMOVE ME" tag
+# * grow docker LVM to 90% of the VG
+#
+# To run:
+# 1. Source your AWS credentials (make sure it's the corresponding AWS account) into your environment
+# export AWS_ACCESS_KEY_ID='XXXXX'
+# export AWS_SECRET_ACCESS_KEY='XXXXXX'
+#
+# 2. run the playbook:
+# ansible-playbook -e 'cli_tag_name=<tag-name>' grow_docker_vg.yml
+#
+# Example:
+# ansible-playbook -e 'cli_tag_name=ops-compute-12345' grow_docker_vg.yml
+#
+# Notes:
+# * By default this will do a 55GB GP2 volume. The can be overidden with the "-e 'cli_volume_size=100'" variable
+# * This does a GP2 by default. Support for Provisioned IOPS has not been added
+# * This will assign the new volume to /dev/xvdc. This is not variablized, yet.
+# * This can be done with NO downtime on the host
+# * This playbook assumes that there is a Logical Volume that is installed and called "docker-pool". This is
+# the LV that gets created via the "docker-storage-setup" command
+#
+
+- name: Grow the docker volume group
+ hosts: "tag_Name_{{ cli_tag_name }}"
+ user: root
+ connection: ssh
+ gather_facts: no
+
+ vars:
+ cli_volume_type: gp2
+ cli_volume_size: 55
+# cli_volume_iops: "{{ 30 * cli_volume_size }}"
+
+ pre_tasks:
+ - fail:
+ msg: "This playbook requires {{item}} to be set."
+ when: "{{ item }} is not defined or {{ item }} == ''"
+ with_items:
+ - cli_tag_name
+ - cli_volume_size
+
+ - debug:
+ var: hosts
+
+ - name: start docker
+ service:
+ name: docker
+ state: started
+
+ - name: Determine if Storage Driver (docker info) is devicemapper
+ shell: docker info | grep 'Storage Driver:.*devicemapper'
+ register: device_mapper_check
+ ignore_errors: yes
+
+ - debug:
+ var: device_mapper_check
+
+ - name: fail if we don't detect devicemapper
+ fail:
+ msg: The "Storage Driver" in "docker info" is not set to "devicemapper"! Please investigate manually.
+ when: device_mapper_check.rc == 1
+
+ # docker-storage-setup creates a docker-pool as the lvm. I am using docker-pool lvm to test
+ # and find the volume group.
+ - name: Attempt to find the Volume Group that docker is using
+ shell: lvs | grep docker-pool | awk '{print $2}'
+ register: docker_vg_name
+ ignore_errors: yes
+
+ - debug:
+ var: docker_vg_name
+
+ - name: fail if we don't find a docker volume group
+ fail:
+ msg: Unable to find docker volume group. Please investigate manually.
+ when: docker_vg_name.stdout_lines|length != 1
+
+ # docker-storage-setup creates a docker-pool as the lvm. I am using docker-pool lvm to test
+ # and find the physical volume.
+ - name: Attempt to find the Phyisical Volume that docker is using
+ shell: "pvs | grep {{ docker_vg_name.stdout }} | awk '{print $1}'"
+ register: docker_pv_name
+ ignore_errors: yes
+
+ - debug:
+ var: docker_pv_name
+
+ - name: fail if we don't find a docker physical volume
+ fail:
+ msg: Unable to find docker physical volume. Please investigate manually.
+ when: docker_pv_name.stdout_lines|length != 1
+
+
+ - name: get list of volumes from AWS
+ delegate_to: localhost
+ ec2_vol:
+ state: list
+ instance: "{{ ec2_id }}"
+ region: "{{ ec2_region }}"
+ register: attached_volumes
+
+ - debug: var=attached_volumes
+
+ - name: get volume id of current docker volume
+ set_fact:
+ old_docker_volume_id: "{{ attached_volumes.volumes | translate_volume_name(docker_pv_name.stdout) }}"
+
+ - debug: var=old_docker_volume_id
+
+ - name: check to see if /dev/xvdc exists
+ command: test -e /dev/xvdc
+ register: xvdc_check
+ ignore_errors: yes
+
+ - debug: var=xvdc_check
+
+ - name: fail if /dev/xvdc already exists
+ fail:
+ msg: /dev/xvdc already exists. Please investigate
+ when: xvdc_check.rc == 0
+
+ - name: Create a volume and attach it
+ delegate_to: localhost
+ ec2_vol:
+ state: present
+ instance: "{{ ec2_id }}"
+ region: "{{ ec2_region }}"
+ volume_size: "{{ cli_volume_size | default(30, True)}}"
+ volume_type: "{{ cli_volume_type }}"
+ device_name: /dev/xvdc
+ register: create_volume
+
+ - debug: var=create_volume
+
+ - name: Fail when problems creating volumes and attaching
+ fail:
+ msg: "Failed to create or attach volume msg: {{ create_volume.msg }}"
+ when: create_volume.msg is defined
+
+ - name: tag the vol with a name
+ delegate_to: localhost
+ ec2_tag: region={{ ec2_region }} resource={{ create_volume.volume_id }}
+ args:
+ tags:
+ Name: "{{ ec2_tag_Name }}"
+ env: "{{ ec2_tag_environment }}"
+ register: voltags
+
+ - name: check for attached drive
+ command: test -b /dev/xvdc
+ register: attachment_check
+ until: attachment_check.rc == 0
+ retries: 30
+ delay: 2
+
+ - name: partition the new drive and make it lvm
+ command: parted /dev/xvdc --script -- mklabel msdos mkpart primary 0% 100% set 1 lvm
+
+ - name: pvcreate /dev/xvdc
+ command: pvcreate /dev/xvdc1
+
+ - name: Extend the docker volume group
+ command: vgextend "{{ docker_vg_name.stdout }}" /dev/xvdc1
+
+ - name: pvmove onto new volume
+ command: "pvmove {{ docker_pv_name.stdout }} /dev/xvdc1"
+ async: 43200
+ poll: 10
+
+ - name: Remove the old docker drive from the volume group
+ command: "vgreduce {{ docker_vg_name.stdout }} {{ docker_pv_name.stdout }}"
+
+ - name: Remove the pv from the old drive
+ command: "pvremove {{ docker_pv_name.stdout }}"
+
+ - name: Extend the docker lvm
+ command: "lvextend -l '90%VG' /dev/{{ docker_vg_name.stdout }}/docker-pool"
+
+ - name: detach old docker volume
+ delegate_to: localhost
+ ec2_vol:
+ region: "{{ ec2_region }}"
+ id: "{{ old_docker_volume_id }}"
+ instance: None
+
+ - name: tag the old vol valid label
+ delegate_to: localhost
+ ec2_tag: region={{ ec2_region }} resource={{old_docker_volume_id}}
+ args:
+ tags:
+ Name: "{{ ec2_tag_Name }} REMOVE ME"
+ register: voltags
+
+ - name: Update the /etc/sysconfig/docker-storage-setup with new device
+ lineinfile:
+ dest: /etc/sysconfig/docker-storage-setup
+ regexp: ^DEVS=
+ line: DEVS=/dev/xvdc
diff --git a/playbooks/adhoc/noc/create_host.yml b/playbooks/adhoc/noc/create_host.yml
new file mode 100644
index 000000000..d250e6e69
--- /dev/null
+++ b/playbooks/adhoc/noc/create_host.yml
@@ -0,0 +1,55 @@
+---
+- name: 'Create a host object in zabbix'
+ hosts: localhost
+ gather_facts: no
+ roles:
+ - os_zabbix
+ post_tasks:
+
+ - zbxapi:
+ server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php
+ zbx_class: Template
+ state: list
+ params:
+ host: ctr_test_kwoodson
+ filter:
+ host:
+ - ctr_kwoodson_test_tmpl
+
+ register: tmpl_results
+
+ - debug: var=tmpl_results
+
+#ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml
+- name: 'Create a host object in zabbix'
+ hosts: localhost
+ gather_facts: no
+ roles:
+ - os_zabbix
+ post_tasks:
+
+ - zbxapi:
+ server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php
+ zbx_class: Host
+ state: absent
+ params:
+ host: ctr_test_kwoodson
+ interfaces:
+ - type: 1
+ main: 1
+ useip: 1
+ ip: 127.0.0.1
+ dns: ""
+ port: 10050
+ groups:
+ - groupid: 1
+ templates: "{{ tmpl_results.results | oo_collect('templateid') | oo_build_zabbix_list_dict('templateid') }}"
+ output: extend
+ filter:
+ host:
+ - ctr_test_kwoodson
+
+ register: host_results
+
+ - debug: var=host_results
+
diff --git a/playbooks/adhoc/noc/create_maintenance.yml b/playbooks/adhoc/noc/create_maintenance.yml
new file mode 100644
index 000000000..c0ec57ce1
--- /dev/null
+++ b/playbooks/adhoc/noc/create_maintenance.yml
@@ -0,0 +1,36 @@
+---
+#ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml
+- name: 'Create a maintenace object in zabbix'
+ hosts: localhost
+ gather_facts: no
+ roles:
+ - os_zabbix
+ vars:
+ oo_hostids: ''
+ oo_groupids: ''
+ post_tasks:
+ - assert:
+ that: oo_desc is defined
+
+ - zbxapi:
+ server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php
+ zbx_class: Maintenance
+ state: present
+ params:
+ name: "{{ oo_name }}"
+ description: "{{ oo_desc }}"
+ active_since: "{{ oo_start }}"
+ active_till: "{{ oo_stop }}"
+ maintenance_type: "0"
+ output: extend
+ hostids: "{{ oo_hostids.split(',') | default([]) }}"
+#groupids: "{{ oo_groupids.split(',') | default([]) }}"
+ timeperiods:
+ - start_time: "{{ oo_start }}"
+ period: "{{ oo_stop }}"
+ selectTimeperiods: extend
+
+ register: maintenance
+
+ - debug: var=maintenance
+
diff --git a/playbooks/adhoc/noc/get_zabbix_problems.yml b/playbooks/adhoc/noc/get_zabbix_problems.yml
index 02bffc1d2..4b94fa228 100644
--- a/playbooks/adhoc/noc/get_zabbix_problems.yml
+++ b/playbooks/adhoc/noc/get_zabbix_problems.yml
@@ -11,7 +11,7 @@
- zbxapi:
server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php
zbx_class: Trigger
- action: get
+ state: list
params:
only_true: true
output: extend
diff --git a/playbooks/adhoc/s3_registry/s3_registry.j2 b/playbooks/adhoc/s3_registry/s3_registry.j2
new file mode 100644
index 000000000..acfa89515
--- /dev/null
+++ b/playbooks/adhoc/s3_registry/s3_registry.j2
@@ -0,0 +1,20 @@
+version: 0.1
+log:
+ level: debug
+http:
+ addr: :5000
+storage:
+ cache:
+ layerinfo: inmemory
+ s3:
+ accesskey: {{ aws_access_key }}
+ secretkey: {{ aws_secret_key }}
+ region: us-east-1
+ bucket: {{ clusterid }}-docker
+ encrypt: true
+ secure: true
+ v4auth: true
+ rootdirectory: /registry
+middleware:
+ repository:
+ - name: openshift
diff --git a/playbooks/adhoc/s3_registry/s3_registry.yml b/playbooks/adhoc/s3_registry/s3_registry.yml
new file mode 100644
index 000000000..4dcef1a42
--- /dev/null
+++ b/playbooks/adhoc/s3_registry/s3_registry.yml
@@ -0,0 +1,71 @@
+---
+# This playbook creates an S3 bucket named after your cluster and configures the docker-registry service to use the bucket as its backend storage.
+# Usage:
+# ansible-playbook s3_registry.yml -e clusterid="mycluster"
+#
+# The AWS access/secret keys should be the keys of a separate user (not your main user), containing only the necessary S3 access role.
+# The 'clusterid' is the short name of your cluster.
+
+- hosts: tag_env-host-type_{{ clusterid }}-openshift-master
+ remote_user: root
+ gather_facts: False
+
+ vars:
+ aws_access_key: "{{ lookup('env', 'S3_ACCESS_KEY_ID') }}"
+ aws_secret_key: "{{ lookup('env', 'S3_SECRET_ACCESS_KEY') }}"
+
+ tasks:
+
+ - name: Check for AWS creds
+ fail:
+ msg: "Couldn't find {{ item }} creds in ENV"
+ when: "{{ item }} == ''"
+ with_items:
+ - aws_access_key
+ - aws_secret_key
+
+ - name: Scale down registry
+ command: oc scale --replicas=0 dc/docker-registry
+
+ - name: Create S3 bucket
+ local_action:
+ module: s3 bucket="{{ clusterid }}-docker" mode=create
+
+ - name: Set up registry environment variable
+ command: oc env dc/docker-registry REGISTRY_CONFIGURATION_PATH=/etc/registryconfig/config.yml
+
+ - name: Generate docker registry config
+ template: src="s3_registry.j2" dest="/root/config.yml" owner=root mode=0600
+
+ - name: Determine if new secrets are needed
+ command: oc get secrets
+ register: secrets
+
+ - name: Create registry secrets
+ command: oc secrets new dockerregistry /root/config.yml
+ when: "'dockerregistry' not in secrets.stdout"
+
+ - name: Determine if service account contains secrets
+ command: oc describe serviceaccount/registry
+ register: serviceaccount
+
+ - name: Add secrets to registry service account
+ command: oc secrets add serviceaccount/registry secrets/dockerregistry
+ when: "'dockerregistry' not in serviceaccount.stdout"
+
+ - name: Determine if deployment config contains secrets
+ command: oc volume dc/docker-registry --list
+ register: dc
+
+ - name: Add secrets to registry deployment config
+ command: oc volume dc/docker-registry --add --name=dockersecrets -m /etc/registryconfig --type=secret --secret-name=dockerregistry
+ when: "'dockersecrets' not in dc.stdout"
+
+ - name: Wait for deployment config to take effect before scaling up
+ pause: seconds=30
+
+ - name: Scale up registry
+ command: oc scale --replicas=1 dc/docker-registry
+
+ - name: Delete temporary config file
+ file: path=/root/config.yml state=absent
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
new file mode 100644
index 000000000..7d1544be8
--- /dev/null
+++ b/playbooks/adhoc/uninstall.yml
@@ -0,0 +1,145 @@
+# This deletes *ALL* Origin, Atomic Enterprise Platform and OpenShift
+# Enterprise content installed by ansible. This includes:
+#
+# configuration
+# containers
+# example templates and imagestreams
+# images
+# RPMs
+---
+- hosts:
+ - OSEv3:children
+
+ sudo: yes
+
+ tasks:
+ - name: Detecting Operating System
+ shell: ls /run/ostree-booted
+ ignore_errors: yes
+ failed_when: false
+ register: ostree_output
+
+ - set_fact:
+ is_atomic: "{{ ostree_output.rc == 0 }}"
+
+ - service: name={{ item }} state=stopped
+ with_items:
+ - atomic-enterprise-master
+ - atomic-enterprise-node
+ - atomic-openshift-master
+ - atomic-openshift-master-api
+ - atomic-openshift-master-controllers
+ - atomic-openshift-node
+ - etcd
+ - openshift-master
+ - openshift-master-api
+ - openshift-master-controllers
+ - openshift-node
+ - openvswitch
+ - origin-master
+ - origin-master-api
+ - origin-master-controllers
+ - origin-node
+
+ - yum: name={{ item }} state=absent
+ when: not is_atomic | bool
+ with_items:
+ - atomic-enterprise
+ - atomic-enterprise-master
+ - atomic-enterprise-node
+ - atomic-enterprise-sdn-ovs
+ - atomic-openshift
+ - atomic-openshift-clients
+ - atomic-openshift-master
+ - atomic-openshift-node
+ - atomic-openshift-sdn-ovs
+ - etcd
+ - openshift
+ - openshift-master
+ - openshift-node
+ - openshift-sdn
+ - openshift-sdn-ovs
+ - openvswitch
+ - origin
+ - origin-master
+ - origin-node
+ - origin-sdn-ovs
+ - tuned-profiles-atomic-enterprise-node
+ - tuned-profiles-atomic-openshift-node
+ - tuned-profiles-openshift-node
+ - tuned-profiles-origin-node
+
+ - shell: systemctl reset-failed
+ changed_when: False
+
+ - shell: systemctl daemon-reload
+ changed_when: False
+
+ - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+ changed_when: False
+
+ - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+ changed_when: False
+
+ - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+ changed_when: False
+
+ - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node
+ changed_when: False
+ failed_when: False
+ with_items:
+ - openshift-enterprise
+ - atomic-enterprise
+ - origin
+
+ - shell: docker ps -a | grep Exited | grep "{{ item }}" | awk '{print $1}'
+ changed_when: False
+ failed_when: False
+ register: exited_containers_to_delete
+ with_items:
+ - aep3/aep
+ - openshift3/ose
+ - openshift/origin
+
+ - shell: "docker rm {{ item.stdout_lines | join(' ') }}"
+ changed_when: False
+ failed_when: False
+ with_items: "{{ exited_containers_to_delete.results }}"
+
+ - shell: docker images | grep {{ item }} | awk '{ print $3 }'
+ changed_when: False
+ failed_when: False
+ register: images_to_delete
+ with_items:
+ - registry.access.redhat.com/openshift3
+ - registry.access.redhat.com/aep3
+ - docker.io/openshift
+
+ - shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}"
+ changed_when: False
+ failed_when: False
+ with_items: "{{ images_to_delete.results }}"
+
+ - file: path={{ item }} state=absent
+ with_items:
+ - /etc/ansible/facts.d/openshift.fact
+ - /etc/atomic-enterprise
+ - /etc/etcd
+ - /etc/openshift
+ - /etc/openshift-sdn
+ - /etc/origin
+ - /etc/sysconfig/atomic-enterprise-master
+ - /etc/sysconfig/atomic-enterprise-node
+ - /etc/sysconfig/atomic-openshift-master
+ - /etc/sysconfig/atomic-openshift-node
+ - /etc/sysconfig/openshift-master
+ - /etc/sysconfig/openshift-node
+ - /etc/sysconfig/origin-master
+ - /etc/sysconfig/origin-node
+ - /root/.kube
+ - "~{{ ansible_ssh_user }}/.kube"
+ - /usr/share/openshift/examples
+ - /var/lib/atomic-enterprise
+ - /var/lib/etcd
+ - /var/lib/openshift
+ - /var/lib/origin
diff --git a/playbooks/adhoc/upgrades/README.md b/playbooks/adhoc/upgrades/README.md
new file mode 100644
index 000000000..6de8a970f
--- /dev/null
+++ b/playbooks/adhoc/upgrades/README.md
@@ -0,0 +1,21 @@
+# [NOTE]
+This playbook will re-run installation steps overwriting any local
+modifications. You should ensure that your inventory has been updated with any
+modifications you've made after your initial installation. If you find any items
+that cannot be configured via ansible please open an issue at
+https://github.com/openshift/openshift-ansible
+
+# Overview
+This playbook is available as a technical preview. It currently performs the
+following steps.
+
+ * Upgrade and restart master services
+ * Upgrade and restart node services
+ * Applies latest configuration by re-running the installation playbook
+ * Applies the latest cluster policies
+ * Updates the default router if one exists
+ * Updates the default registry if one exists
+ * Updates image streams and quickstarts
+
+# Usage
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/adhoc/upgrades/upgrade.yml
diff --git a/playbooks/adhoc/upgrades/filter_plugins b/playbooks/adhoc/upgrades/filter_plugins
new file mode 120000
index 000000000..b0b7a3414
--- /dev/null
+++ b/playbooks/adhoc/upgrades/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/adhoc/upgrades/lookup_plugins b/playbooks/adhoc/upgrades/lookup_plugins
new file mode 120000
index 000000000..73cafffe5
--- /dev/null
+++ b/playbooks/adhoc/upgrades/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins/ \ No newline at end of file
diff --git a/playbooks/adhoc/upgrades/roles b/playbooks/adhoc/upgrades/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/playbooks/adhoc/upgrades/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/playbooks/adhoc/upgrades/upgrade.yml b/playbooks/adhoc/upgrades/upgrade.yml
new file mode 100644
index 000000000..ae1d0127c
--- /dev/null
+++ b/playbooks/adhoc/upgrades/upgrade.yml
@@ -0,0 +1,138 @@
+---
+- name: Upgrade base package on masters
+ hosts: masters
+ roles:
+ - openshift_facts
+ vars:
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ tasks:
+ - name: Upgrade base package
+ yum: pkg={{ openshift.common.service_type }}{{ openshift_version }} state=latest
+
+- name: Re-Run cluster configuration to apply latest configuration changes
+ include: ../../common/openshift-cluster/config.yml
+ vars:
+ g_etcd_group: "{{ 'etcd' }}"
+ g_masters_group: "{{ 'masters' }}"
+ g_nodes_group: "{{ 'nodes' }}"
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_deployment_type: "{{ deployment_type }}"
+
+- name: Upgrade masters
+ hosts: masters
+ vars:
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ tasks:
+ - name: Upgrade master packages
+ yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=latest
+ - name: Restart master services
+ service: name="{{ openshift.common.service_type}}-master" state=restarted
+
+- name: Upgrade nodes
+ hosts: nodes
+ vars:
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ tasks:
+ - name: Upgrade node packages
+ yum: pkg={{ openshift.common.service_type }}-node{{ openshift_version }} state=latest
+ - name: Restart node services
+ service: name="{{ openshift.common.service_type }}-node" state=restarted
+
+- name: Determine new master version
+ hosts: oo_first_master
+ tasks:
+ - name: Determine new version
+ command: >
+ rpm -q --queryformat '%{version}' {{ openshift.common.service_type }}-master
+ register: _new_version
+
+- name: Ensure AOS 3.0.2 or Origin 1.0.6
+ hosts: oo_first_master
+ tasks:
+ fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later
+ when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') )
+
+- name: Update cluster policy
+ hosts: oo_first_master
+ tasks:
+ - name: oadm policy reconcile-cluster-roles --confirm
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-roles --confirm
+
+- name: Update cluster policy bindings
+ hosts: oo_first_master
+ tasks:
+ - name: oadm policy reconcile-cluster-role-bindings --confirm
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-role-bindings
+ --exclude-groups=system:authenticated
+ --exclude-groups=system:unauthenticated
+ --exclude-users=system:anonymous
+ --additive-only=true --confirm
+ when: ( _new_version.stdout | version_compare('1.0.6', '>') and _new_version.stdout | version_compare('3.0','<') ) or _new_version.stdout | version_compare('3.0.2','>')
+
+- name: Upgrade default router
+ hosts: oo_first_master
+ vars:
+ - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}"
+ - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
+ tasks:
+ - name: Check for default router
+ command: >
+ {{ oc_cmd }} get -n default dc/router
+ register: _default_router
+ failed_when: false
+ changed_when: false
+ - name: Check for allowHostNetwork and allowHostPorts
+ when: _default_router.rc == 0
+ shell: >
+ {{ oc_cmd }} get -o yaml scc/privileged | /usr/bin/grep -e allowHostPorts -e allowHostNetwork
+ register: _scc
+ - name: Grant allowHostNetwork and allowHostPorts
+ when:
+ - _default_router.rc == 0
+ - "'false' in _scc.stdout"
+ command: >
+ {{ oc_cmd }} patch scc/privileged -p '{"allowHostPorts":true,"allowHostNetwork":true}' --loglevel=9
+ - name: Update deployment config to 1.0.4/3.0.1 spec
+ when: _default_router.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/router -p
+ '{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}'
+ - name: Switch to hostNetwork=true
+ when: _default_router.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/router -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}'
+ - name: Update router image to current version
+ when: _default_router.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/router -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'
+
+- name: Upgrade default
+ hosts: oo_first_master
+ vars:
+ - registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}"
+ - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
+ tasks:
+ - name: Check for default registry
+ command: >
+ {{ oc_cmd }} get -n default dc/docker-registry
+ register: _default_registry
+ failed_when: false
+ changed_when: false
+ - name: Update registry image to current version
+ when: _default_registry.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/docker-registry -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
+
+- name: Update image streams and templates
+ hosts: oo_first_master
+ vars:
+ openshift_examples_import_command: "update"
+ openshift_deployment_type: "{{ deployment_type }}"
+ roles:
+ - openshift_examples
diff --git a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml
new file mode 100644
index 000000000..1e884240a
--- /dev/null
+++ b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml
@@ -0,0 +1,58 @@
+---
+- hosts: localhost
+ gather_facts: no
+ vars:
+ g_server: http://localhost:8080/zabbix/api_jsonrpc.php
+ g_user: ''
+ g_password: ''
+
+ roles:
+ - lib_zabbix
+
+ post_tasks:
+ - name: CLEAN List template for heartbeat
+ zbx_template:
+ zbx_server: "{{ g_server }}"
+ zbx_user: "{{ g_user }}"
+ zbx_password: "{{ g_password }}"
+ state: list
+ name: 'Template Heartbeat'
+ register: templ_heartbeat
+
+ - name: CLEAN List template app zabbix server
+ zbx_template:
+ zbx_server: "{{ g_server }}"
+ zbx_user: "{{ g_user }}"
+ zbx_password: "{{ g_password }}"
+ state: list
+ name: 'Template App Zabbix Server'
+ register: templ_zabbix_server
+
+ - name: CLEAN List template app zabbix server
+ zbx_template:
+ zbx_server: "{{ g_server }}"
+ zbx_user: "{{ g_user }}"
+ zbx_password: "{{ g_password }}"
+ state: list
+ name: 'Template App Zabbix Agent'
+ register: templ_zabbix_agent
+
+ - name: CLEAN List all templates
+ zbx_template:
+ zbx_server: "{{ g_server }}"
+ zbx_user: "{{ g_user }}"
+ zbx_password: "{{ g_password }}"
+ state: list
+ register: templates
+
+ - debug: var=templ_heartbeat.results
+
+ - name: Remove templates if heartbeat template is missing
+ zbx_template:
+ zbx_server: "{{ g_server }}"
+ zbx_user: "{{ g_user }}"
+ zbx_password: "{{ g_password }}"
+ name: "{{ item }}"
+ state: absent
+ with_items: "{{ templates.results | difference(templ_zabbix_agent.results) | difference(templ_zabbix_server.results) | oo_collect('host') }}"
+ when: templ_heartbeat.results | length == 0
diff --git a/playbooks/adhoc/zabbix_setup/filter_plugins b/playbooks/adhoc/zabbix_setup/filter_plugins
new file mode 120000
index 000000000..b0b7a3414
--- /dev/null
+++ b/playbooks/adhoc/zabbix_setup/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml
new file mode 100755
index 000000000..0fe65b338
--- /dev/null
+++ b/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml
@@ -0,0 +1,7 @@
+#!/usr/bin/env ansible-playbook
+---
+- include: clean_zabbix.yml
+ vars:
+ g_server: http://localhost/zabbix/api_jsonrpc.php
+ g_user: Admin
+ g_password: zabbix
diff --git a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml
new file mode 100755
index 000000000..e2b8150c6
--- /dev/null
+++ b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml
@@ -0,0 +1,13 @@
+#!/usr/bin/ansible-playbook
+---
+- hosts: localhost
+ gather_facts: no
+ vars:
+ g_server: http://localhost/zabbix/api_jsonrpc.php
+ g_user: Admin
+ g_password: zabbix
+ roles:
+ - role: os_zabbix
+ ozb_server: "{{ g_server }}"
+ ozb_user: "{{ g_user }}"
+ ozb_password: "{{ g_password }}"
diff --git a/playbooks/aws/openshift-master/roles b/playbooks/adhoc/zabbix_setup/roles
index 20c4c58cf..20c4c58cf 120000
--- a/playbooks/aws/openshift-master/roles
+++ b/playbooks/adhoc/zabbix_setup/roles
diff --git a/playbooks/aws/ansible-tower/launch.yml b/playbooks/aws/ansible-tower/launch.yml
index 4bcc8b8dc..850238ffb 100644
--- a/playbooks/aws/ansible-tower/launch.yml
+++ b/playbooks/aws/ansible-tower/launch.yml
@@ -6,7 +6,7 @@
vars:
inst_region: us-east-1
- rhel7_ami: ami-78756d10
+ rhel7_ami: ami-9101c8fa
user_data_file: user_data.txt
vars_files:
diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml
index 7188312ed..a8e3e27bb 100644
--- a/playbooks/aws/openshift-cluster/config.yml
+++ b/playbooks/aws/openshift-cluster/config.yml
@@ -1,37 +1,23 @@
---
-- name: Populate oo_masters_to_config host group
- hosts: localhost
+- hosts: localhost
gather_facts: no
vars_files:
- vars.yml
tasks:
- - name: Evaluate oo_masters_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_masters_to_config
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([])
- - name: Evaluate oo_nodes_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_nodes_to_config
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([])
- - name: Evaluate oo_first_master
- add_host:
- name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}"
- groups: oo_first_master
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- when: "'tag_env-host-type_{{ cluster_id }}-openshift-master' in groups"
+ - set_fact:
+ g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}"
+ g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}"
- include: ../../common/openshift-cluster/config.yml
vars:
+ g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}"
+ g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}"
+ g_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}"
+ g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
+ g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
+ g_nodeonmaster: true
openshift_cluster_id: "{{ cluster_id }}"
- openshift_debug_level: 4
+ openshift_debug_level: 2
openshift_deployment_type: "{{ deployment_type }}"
- openshift_first_master: "{{ groups.oo_first_master.0 }}"
openshift_hostname: "{{ ec2_private_ip_address }}"
openshift_public_hostname: "{{ ec2_ip_address }}"
diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml
index 33e1ec25d..786918929 100644
--- a/playbooks/aws/openshift-cluster/launch.yml
+++ b/playbooks/aws/openshift-cluster/launch.yml
@@ -11,28 +11,48 @@
msg: Deployment type not supported for aws provider yet
when: deployment_type == 'enterprise'
+ - include: ../../common/openshift-cluster/set_etcd_launch_facts_tasks.yml
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ etcd_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+ g_sub_host_type: "default"
+
- include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
- include: tasks/launch_instances.yml
vars:
instances: "{{ master_names }}"
cluster: "{{ cluster_id }}"
type: "{{ k8s_type }}"
+ g_sub_host_type: "default"
- include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ vars:
+ type: "compute"
+ count: "{{ num_nodes }}"
- include: tasks/launch_instances.yml
vars:
instances: "{{ node_names }}"
cluster: "{{ cluster_id }}"
type: "{{ k8s_type }}"
+ g_sub_host_type: "{{ sub_host_type }}"
- - set_fact:
- a_master: "{{ master_names[0] }}"
- - add_host: name={{ a_master }} groups=service_master
-
-- include: update.yml
+ - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ vars:
+ type: "infra"
+ count: "{{ num_infra }}"
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ node_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+ g_sub_host_type: "{{ sub_host_type }}"
-- include: ../../common/openshift-cluster/create_services.yml
- vars:
- g_svc_master: "{{ service_master }}"
+ - add_host:
+ name: "{{ master_names.0 }}"
+ groups: service_master
+ when: master_names is defined and master_names.0 is defined
+- include: update.yml
- include: list.yml
diff --git a/playbooks/aws/openshift-cluster/library/ec2_ami_find.py b/playbooks/aws/openshift-cluster/library/ec2_ami_find.py
index 29e594a65..2b1db62d8 100644
--- a/playbooks/aws/openshift-cluster/library/ec2_ami_find.py
+++ b/playbooks/aws/openshift-cluster/library/ec2_ami_find.py
@@ -158,7 +158,7 @@ EXAMPLES = '''
# Launch an EC2 instance
- ec2:
image: "{{ ami_search.results[0].ami_id }}"
- instance_type: m3.medium
+ instance_type: m4.medium
key_name: mykey
wait: yes
'''
diff --git a/playbooks/aws/openshift-cluster/lookup_plugins b/playbooks/aws/openshift-cluster/lookup_plugins
new file mode 120000
index 000000000..ac79701db
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
index 060147659..9c699120b 100644
--- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
@@ -5,6 +5,7 @@
env: "{{ cluster }}"
env_host_type: "{{ cluster }}-openshift-{{ type }}"
host_type: "{{ type }}"
+ sub_host_type: "{{ g_sub_host_type }}"
- set_fact:
ec2_region: "{{ lookup('env', 'ec2_region')
@@ -34,6 +35,35 @@
ec2_assign_public_ip: "{{ lookup('env', 'ec2_assign_public_ip')
| default(deployment_vars[deployment_type].assign_public_ip, true) }}"
when: ec2_assign_public_ip is not defined
+
+- set_fact:
+ ec2_instance_type: "{{ ec2_master_instance_type | default(deployment_vars[deployment_type].type, true) }}"
+ ec2_security_groups: "{{ ec2_master_security_groups
+ | default(deployment_vars[deployment_type].security_groups, true) }}"
+ when: host_type == "master" and sub_host_type == "default"
+
+- set_fact:
+ ec2_instance_type: "{{ ec2_etcd_instance_type | default(deployment_vars[deployment_type].type, true) }}"
+ ec2_security_groups: "{{ ec2_etcd_security_groups
+ | default(deployment_vars[deployment_type].security_groups, true)}}"
+ when: host_type == "etcd" and sub_host_type == "default"
+
+- set_fact:
+ ec2_instance_type: "{{ ec2_infra_instance_type | default(deployment_vars[deployment_type].type, true) }}"
+ ec2_security_groups: "{{ ec2_infra_security_groups
+ | default(deployment_vars[deployment_type].security_groups, true) }}"
+ when: host_type == "node" and sub_host_type == "infra"
+
+- set_fact:
+ ec2_instance_type: "{{ ec2_node_instance_type | default(deployment_vars[deployment_type].type, true) }}"
+ ec2_security_groups: "{{ ec2_node_security_groups
+ | default(deployment_vars[deployment_type].security_groups, true) }}"
+ when: host_type == "node" and sub_host_type == "compute"
+
+- set_fact:
+ ec2_instance_type: "{{ lookup('env', 'ec2_instance_type')
+ | default(deployment_vars[deployment_type].type, true) }}"
+ when: ec2_instance_type is not defined
- set_fact:
ec2_security_groups: "{{ lookup('env', 'ec2_security_groups')
| default(deployment_vars[deployment_type].security_groups, true) }}"
@@ -51,8 +81,17 @@
- set_fact:
latest_ami: "{{ ami_result.results | oo_ami_selector(ec2_image_name) }}"
- user_data: "{{ lookup('template', '../templates/user_data.j2') if type == 'node' else None | default('omit') }}"
+ user_data: "{{ lookup('template', '../templates/user_data.j2') }}"
volume_defs:
+ etcd:
+ root:
+ volume_size: "{{ lookup('env', 'os_etcd_root_vol_size') | default(25, true) }}"
+ device_type: "{{ lookup('env', 'os_etcd_root_vol_type') | default('gp2', true) }}"
+ iops: "{{ lookup('env', 'os_etcd_root_vol_iops') | default(500, true) }}"
+ etcd:
+ volume_size: "{{ lookup('env', 'os_etcd_vol_size') | default(32, true) }}"
+ device_type: "{{ lookup('env', 'os_etcd_vol_type') | default('gp2', true) }}"
+ iops: "{{ lookup('env', 'os_etcd_vol_iops') | default(500, true) }}"
master:
root:
volume_size: "{{ lookup('env', 'os_master_root_vol_size') | default(25, true) }}"
@@ -60,7 +99,7 @@
iops: "{{ lookup('env', 'os_master_root_vol_iops') | default(500, true) }}"
node:
root:
- volume_size: "{{ lookup('env', 'os_node_root_vol_size') | default(25, true) }}"
+ volume_size: "{{ lookup('env', 'os_node_root_vol_size') | default(85, true) }}"
device_type: "{{ lookup('env', 'os_node_root_vol_type') | default('gp2', true) }}"
iops: "{{ lookup('env', 'os_node_root_vol_iops') | default(500, true) }}"
docker:
@@ -90,6 +129,7 @@
env: "{{ env }}"
host-type: "{{ host_type }}"
env-host-type: "{{ env_host_type }}"
+ sub-host-type: "{{ sub_host_type }}"
volumes: "{{ volumes }}"
register: ec2
@@ -103,7 +143,38 @@
Name: "{{ item.0 }}"
- set_fact:
- instance_groups: tag_created-by_{{ created_by }}, tag_env_{{ env }}, tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }}
+ instance_groups: "tag_created-by_{{ created_by }}, tag_env_{{ env }},
+ tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }},
+ tag_sub-host-type_{{ sub_host_type }}"
+
+- set_fact:
+ node_label:
+ region: "{{ec2_region}}"
+ type: "{{sub_host_type}}"
+ when: host_type == "node"
+
+- set_fact:
+ node_label:
+ region: "{{ec2_region}}"
+ type: "{{host_type}}"
+ when: host_type != "node"
+
+- set_fact:
+ logrotate:
+ - name: syslog
+ path: "/var/log/cron
+ \n/var/log/maillog
+ \n/var/log/messages
+ \n/var/log/secure
+ \n/var/log/spooler \n"
+ options:
+ - daily
+ - rotate 7
+ - compress
+ - sharedscripts
+ - missingok
+ scripts:
+ postrotate: "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"
- name: Add new instances groups and variables
add_host:
@@ -114,6 +185,8 @@
groups: "{{ instance_groups }}"
ec2_private_ip_address: "{{ item.1.private_ip }}"
ec2_ip_address: "{{ item.1.public_ip }}"
+ openshift_node_labels: "{{ node_label }}"
+ logrotate_scripts: "{{ logrotate }}"
with_together:
- instances
- ec2.instances
diff --git a/playbooks/aws/openshift-cluster/templates/user_data.j2 b/playbooks/aws/openshift-cluster/templates/user_data.j2
index 7dbc8f552..82c2f4d57 100644
--- a/playbooks/aws/openshift-cluster/templates/user_data.j2
+++ b/playbooks/aws/openshift-cluster/templates/user_data.j2
@@ -1,17 +1,25 @@
#cloud-config
-yum_repos:
- jdetiber-copr:
- name: Copr repo for origin owned by jdetiber
- baseurl: https://copr-be.cloud.fedoraproject.org/results/jdetiber/origin/epel-7-$basearch/
- skip_if_unavailable: true
- gpgcheck: true
- gpgkey: https://copr-be.cloud.fedoraproject.org/results/jdetiber/origin/pubkey.gpg
- enabled: true
+{% if type =='etcd' %}
+cloud_config_modules:
+- disk_setup
+- mounts
-packages:
-- xfsprogs # can be dropped after docker-storage-setup properly requires it: https://github.com/projectatomic/docker-storage-setup/pull/8
-- docker-storage-setup
+mounts:
+- [ xvdb, /var/lib/etcd, xfs, "defaults" ]
+
+disk_setup:
+ xvdb:
+ table_type: mbr
+ layout: True
+fs_setup:
+- label: etcd_storage
+ filesystem: xfs
+ device: /dev/xvdb
+ partition: auto
+{% endif %}
+
+{% if type == 'node' %}
mounts:
- [ xvdb ]
- [ ephemeral0 ]
@@ -23,7 +31,15 @@ write_files:
path: /etc/sysconfig/docker-storage-setup
owner: root:root
permissions: '0644'
+{% endif %}
+
+{% if deployment_type == 'online' %}
+devices: ['/var'] # Workaround for https://bugs.launchpad.net/bugs/1455436
+disable_root: 0
+growpart:
+ mode: auto
+ devices: ['/var']
runcmd:
-- systemctl daemon-reload
-- systemctl enable lvm2-lvmetad.service docker-storage-setup.service
+- xfs_growfs /var
+{% endif %}
diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml
index 617d0d456..77287cad0 100644
--- a/playbooks/aws/openshift-cluster/terminate.yml
+++ b/playbooks/aws/openshift-cluster/terminate.yml
@@ -13,4 +13,63 @@
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
with_items: groups[scratch_group] | default([]) | difference(['localhost'])
-- include: ../terminate.yml
+- name: Unsubscribe VMs
+ hosts: oo_hosts_to_terminate
+ roles:
+ - role: rhel_unsubscribe
+ when: deployment_type == "enterprise" and
+ ansible_distribution == "RedHat" and
+ lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
+ default('no', True) | lower in ['no', 'false']
+
+- name: Terminate instances
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ vars:
+ host_vars: "{{ hostvars
+ | oo_select_keys(groups['oo_hosts_to_terminate']) }}"
+ tasks:
+ - name: Remove tags from instances
+ ec2_tag: resource={{ item.ec2_id }} region={{ item.ec2_region }} state=absent
+ args:
+ tags:
+ env: "{{ item['ec2_tag_env'] }}"
+ host-type: "{{ item['ec2_tag_host-type'] }}"
+ env-host-type: "{{ item['ec2_tag_env-host-type'] }}"
+ sub_host_type: "{{ item['ec2_tag_sub-host-type'] }}"
+ with_items: host_vars
+ when: "'oo_hosts_to_terminate' in groups"
+
+ - name: Terminate instances
+ ec2:
+ state: absent
+ instance_ids: ["{{ item.ec2_id }}"]
+ region: "{{ item.ec2_region }}"
+ ignore_errors: yes
+ register: ec2_term
+ with_items: host_vars
+ when: "'oo_hosts_to_terminate' in groups"
+
+ # Fail if any of the instances failed to terminate with an error other
+ # than 403 Forbidden
+ - fail: msg=Terminating instance {{ item.ec2_id }} failed with message {{ item.msg }}
+ when: "'oo_hosts_to_terminate' in groups and item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")"
+ with_items: ec2_term.results
+
+ - name: Stop instance if termination failed
+ ec2:
+ state: stopped
+ instance_ids: ["{{ item.item.ec2_id }}"]
+ region: "{{ item.item.ec2_region }}"
+ register: ec2_stop
+ when: "'oo_hosts_to_terminate' in groups and item.failed"
+ with_items: ec2_term.results
+
+ - name: Rename stopped instances
+ ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
+ args:
+ tags:
+ Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
+ with_items: ec2_stop.results
+ when: "'oo_hosts_to_terminate' in groups"
diff --git a/playbooks/aws/openshift-cluster/update.yml b/playbooks/aws/openshift-cluster/update.yml
index 5e7ab4e58..e006aa74a 100644
--- a/playbooks/aws/openshift-cluster/update.yml
+++ b/playbooks/aws/openshift-cluster/update.yml
@@ -11,7 +11,9 @@
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"]) | default([])
+ with_items: (groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([]))
+ | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([]))
+ | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-etcd"] | default([]))
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/aws/openshift-cluster/vars.online.int.yml b/playbooks/aws/openshift-cluster/vars.online.int.yml
index e115615d5..2e2f25ccd 100644
--- a/playbooks/aws/openshift-cluster/vars.online.int.yml
+++ b/playbooks/aws/openshift-cluster/vars.online.int.yml
@@ -1,9 +1,15 @@
---
-ec2_image: ami-78756d10
+ec2_image: ami-9101c8fa
ec2_image_name: libra-ops-rhel7*
ec2_region: us-east-1
ec2_keypair: mmcgrath_libra
-ec2_instance_type: m3.large
-ec2_security_groups: [ 'int-v3' ]
+ec2_master_instance_type: t2.medium
+ec2_master_security_groups: [ 'integration', 'integration-master' ]
+ec2_infra_instance_type: c4.large
+ec2_infra_security_groups: [ 'integration', 'integration-infra' ]
+ec2_node_instance_type: m4.large
+ec2_node_security_groups: [ 'integration', 'integration-node' ]
+ec2_etcd_instance_type: m4.large
+ec2_etcd_security_groups: [ 'integration', 'integration-etcd' ]
ec2_vpc_subnet: subnet-987c0def
ec2_assign_public_ip: yes
diff --git a/playbooks/aws/openshift-cluster/vars.online.prod.yml b/playbooks/aws/openshift-cluster/vars.online.prod.yml
index e115615d5..18a53e12e 100644
--- a/playbooks/aws/openshift-cluster/vars.online.prod.yml
+++ b/playbooks/aws/openshift-cluster/vars.online.prod.yml
@@ -1,9 +1,15 @@
---
-ec2_image: ami-78756d10
+ec2_image: ami-9101c8fa
ec2_image_name: libra-ops-rhel7*
ec2_region: us-east-1
ec2_keypair: mmcgrath_libra
-ec2_instance_type: m3.large
-ec2_security_groups: [ 'int-v3' ]
+ec2_master_instance_type: t2.medium
+ec2_master_security_groups: [ 'production', 'production-master' ]
+ec2_infra_instance_type: c4.large
+ec2_infra_security_groups: [ 'production', 'production-infra' ]
+ec2_node_instance_type: m4.large
+ec2_node_security_groups: [ 'production', 'production-node' ]
+ec2_etcd_instance_type: m4.large
+ec2_etcd_security_groups: [ 'production', 'production-etcd' ]
ec2_vpc_subnet: subnet-987c0def
ec2_assign_public_ip: yes
diff --git a/playbooks/aws/openshift-cluster/vars.online.stage.yml b/playbooks/aws/openshift-cluster/vars.online.stage.yml
index e115615d5..1f9ac4252 100644
--- a/playbooks/aws/openshift-cluster/vars.online.stage.yml
+++ b/playbooks/aws/openshift-cluster/vars.online.stage.yml
@@ -1,9 +1,15 @@
---
-ec2_image: ami-78756d10
+ec2_image: ami-9101c8fa
ec2_image_name: libra-ops-rhel7*
ec2_region: us-east-1
ec2_keypair: mmcgrath_libra
-ec2_instance_type: m3.large
-ec2_security_groups: [ 'int-v3' ]
+ec2_master_instance_type: t2.medium
+ec2_master_security_groups: [ 'stage', 'stage-master' ]
+ec2_infra_instance_type: c4.large
+ec2_infra_security_groups: [ 'stage', 'stage-infra' ]
+ec2_node_instance_type: m4.large
+ec2_node_security_groups: [ 'stage', 'stage-node' ]
+ec2_etcd_instance_type: m4.large
+ec2_etcd_security_groups: [ 'stage', 'stage-etcd' ]
ec2_vpc_subnet: subnet-987c0def
ec2_assign_public_ip: yes
diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml
index 07e453f89..95bc4b3e2 100644
--- a/playbooks/aws/openshift-cluster/vars.yml
+++ b/playbooks/aws/openshift-cluster/vars.yml
@@ -1,14 +1,14 @@
---
deployment_vars:
origin:
- # fedora, since centos requires marketplace
- image: ami-acd999c4
+ # centos-7, requires marketplace
+ image: ami-96a818fe
image_name:
region: us-east-1
- ssh_user: fedora
+ ssh_user: centos
sudo: yes
keypair: libra
- type: m3.large
+ type: m4.large
security_groups: [ 'public' ]
vpc_subnet:
assign_public_ip:
@@ -20,7 +20,7 @@ deployment_vars:
ssh_user: root
sudo: no
keypair: libra
- type: m3.large
+ type: m4.large
security_groups: [ 'public' ]
vpc_subnet:
assign_public_ip:
@@ -32,7 +32,7 @@ deployment_vars:
ssh_user: ec2-user
sudo: yes
keypair: libra
- type: m3.large
+ type: m4.large
security_groups: [ 'public' ]
vpc_subnet:
assign_public_ip:
diff --git a/playbooks/aws/openshift-master/config.yml b/playbooks/aws/openshift-master/config.yml
deleted file mode 100644
index 37ab4fbe6..000000000
--- a/playbooks/aws/openshift-master/config.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-- name: Populate oo_masters_to_config host group
- hosts: localhost
- gather_facts: no
- tasks:
- - name: Evaluate oo_masters_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_masters_to_config
- ansible_ssh_user: root
- with_items: oo_host_group_exp | default([])
-
-- include: ../../common/openshift-master/config.yml
- vars:
- openshift_cluster_id: "{{ cluster_id }}"
- openshift_debug_level: 4
- openshift_deployment_type: "{{ deployment_type }}"
- openshift_hostname: "{{ ec2_private_ip_address }}"
- openshift_public_hostname: "{{ ec2_ip_address }}"
diff --git a/playbooks/aws/openshift-master/launch.yml b/playbooks/aws/openshift-master/launch.yml
deleted file mode 100644
index 1cefad492..000000000
--- a/playbooks/aws/openshift-master/launch.yml
+++ /dev/null
@@ -1,70 +0,0 @@
----
-- name: Launch instance(s)
- hosts: localhost
- connection: local
- gather_facts: no
-
-# TODO: modify g_ami based on deployment_type
- vars:
- inst_region: us-east-1
- g_ami: ami-86781fee
- user_data_file: user_data.txt
-
- tasks:
- - name: Launch instances
- ec2:
- state: present
- region: "{{ inst_region }}"
- keypair: libra
- group: ['public']
- instance_type: m3.large
- image: "{{ g_ami }}"
- count: "{{ oo_new_inst_names | length }}"
- user_data: "{{ lookup('file', user_data_file) }}"
- wait: yes
- register: ec2
-
- - name: Add new instances public IPs to the host group
- add_host: "hostname={{ item.public_ip }} groupname=new_ec2_instances"
- with_items: ec2.instances
-
- - name: Add Name and environment tags to instances
- ec2_tag: "resource={{ item.1.id }} region={{ inst_region }} state=present"
- with_together:
- - oo_new_inst_names
- - ec2.instances
- args:
- tags:
- Name: "{{ item.0 }}"
-
- - name: Add other tags to instances
- ec2_tag: resource={{ item.id }} region={{ inst_region }} state=present
- with_items: ec2.instances
- args:
- tags: "{{ oo_new_inst_tags }}"
-
- - name: Add new instances public IPs to oo_masters_to_config
- add_host:
- hostname: "{{ item.0 }}"
- ansible_ssh_host: "{{ item.1.dns_name }}"
- groupname: oo_masters_to_config
- ec2_private_ip_address: "{{ item.1.private_ip }}"
- ec2_ip_address: "{{ item.1.public_ip }}"
- with_together:
- - oo_new_inst_names
- - ec2.instances
-
- - name: Wait for ssh
- wait_for: port=22 host={{ item.dns_name }}
- with_items: ec2.instances
-
- - name: Wait for root user setup
- command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.dns_name }} echo root user is setup"
- register: result
- until: result.rc == 0
- retries: 20
- delay: 10
- with_items: ec2.instances
-
-# Apply the configs, seprate so that just the configs can be run by themselves
-- include: config.yml
diff --git a/playbooks/aws/openshift-master/terminate.yml b/playbooks/aws/openshift-master/terminate.yml
deleted file mode 100644
index 07d9961bc..000000000
--- a/playbooks/aws/openshift-master/terminate.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: ../terminate.yml
diff --git a/playbooks/aws/openshift-node/config.yml b/playbooks/aws/openshift-node/config.yml
deleted file mode 100644
index a993a1e99..000000000
--- a/playbooks/aws/openshift-node/config.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Populate oo_nodes_to_config and oo_first_master host groups
- hosts: localhost
- gather_facts: no
- tasks:
- - name: Evaluate oo_nodes_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_nodes_to_config
- ansible_ssh_user: root
- with_items: oo_host_group_exp | default([])
- - name: Evaluate oo_first_master
- add_host:
- name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}"
- groups: oo_first_master
- ansible_ssh_user: root
-
-
-- include: ../../common/openshift-node/config.yml
- vars:
- openshift_cluster_id: "{{ cluster_id }}"
- openshift_debug_level: 4
- openshift_deployment_type: "{{ deployment_type }}"
- openshift_first_master: "{{ groups.oo_first_master.0 }}"
- openshift_hostname: "{{ ec2_private_ip_address }}"
- openshift_public_hostname: "{{ ec2_ip_address }}"
diff --git a/playbooks/aws/openshift-node/launch.yml b/playbooks/aws/openshift-node/launch.yml
deleted file mode 100644
index e7d1f7310..000000000
--- a/playbooks/aws/openshift-node/launch.yml
+++ /dev/null
@@ -1,72 +0,0 @@
----
-- name: Launch instance(s)
- hosts: localhost
- connection: local
- gather_facts: no
-
-# TODO: modify g_ami based on deployment_type
- vars:
- inst_region: us-east-1
- g_ami: ami-86781fee
- user_data_file: user_data.txt
-
- tasks:
- - name: Launch instances
- ec2:
- state: present
- region: "{{ inst_region }}"
- keypair: libra
- group: ['public']
- instance_type: m3.large
- image: "{{ g_ami }}"
- count: "{{ oo_new_inst_names | length }}"
- user_data: "{{ lookup('file', user_data_file) }}"
- wait: yes
- register: ec2
-
- - name: Add new instances public IPs to the host group
- add_host:
- hostname: "{{ item.public_ip }}"
- groupname: new_ec2_instances"
- with_items: ec2.instances
-
- - name: Add Name and environment tags to instances
- ec2_tag: resource={{ item.1.id }} region={{ inst_region }} state=present
- with_together:
- - oo_new_inst_names
- - ec2.instances
- args:
- tags:
- Name: "{{ item.0 }}"
-
- - name: Add other tags to instances
- ec2_tag: resource={{ item.id }} region={{ inst_region }} state=present
- with_items: ec2.instances
- args:
- tags: "{{ oo_new_inst_tags }}"
-
- - name: Add new instances public IPs to oo_nodes_to_config
- add_host:
- hostname: "{{ item.0 }}"
- ansible_ssh_host: "{{ item.1.dns_name }}"
- groupname: oo_nodes_to_config
- ec2_private_ip_address: "{{ item.1.private_ip }}"
- ec2_ip_address: "{{ item.1.public_ip }}"
- with_together:
- - oo_new_inst_names
- - ec2.instances
-
- - name: Wait for ssh
- wait_for: port=22 host={{ item.dns_name }}
- with_items: ec2.instances
-
- - name: Wait for root user setup
- command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.dns_name }} echo root user is setup"
- register: result
- until: result.rc == 0
- retries: 20
- delay: 10
- with_items: ec2.instances
-
-# Apply the configs, seprate so that just the configs can be run by themselves
-- include: config.yml
diff --git a/playbooks/aws/openshift-node/terminate.yml b/playbooks/aws/openshift-node/terminate.yml
deleted file mode 100644
index 07d9961bc..000000000
--- a/playbooks/aws/openshift-node/terminate.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: ../terminate.yml
diff --git a/playbooks/aws/terminate.yml b/playbooks/aws/terminate.yml
deleted file mode 100644
index e9767b260..000000000
--- a/playbooks/aws/terminate.yml
+++ /dev/null
@@ -1,64 +0,0 @@
----
-- name: Populate oo_hosts_to_terminate host group
- hosts: localhost
- gather_facts: no
- tasks:
- - name: Evaluate oo_hosts_to_terminate
- add_host: name={{ item }} groups=oo_hosts_to_terminate
- with_items: oo_host_group_exp | default([])
-
-- name: Gather dynamic inventory variables for hosts to terminate
- hosts: oo_hosts_to_terminate
- gather_facts: no
-
-- name: Terminate instances
- hosts: localhost
- connection: local
- gather_facts: no
- vars:
- host_vars: "{{ hostvars
- | oo_select_keys(groups['oo_hosts_to_terminate']) }}"
- tasks:
- - name: Remove tags from instances
- ec2_tag: resource={{ item.ec2_id }} region={{ item.ec2_region }} state=absent
- args:
- tags:
- env: "{{ item['ec2_tag_env'] }}"
- host-type: "{{ item['ec2_tag_host-type'] }}"
- env-host-type: "{{ item['ec2_tag_env-host-type'] }}"
- with_items: host_vars
- when: "'oo_hosts_to_terminate' in groups"
-
- - name: Terminate instances
- ec2:
- state: absent
- instance_ids: ["{{ item.ec2_id }}"]
- region: "{{ item.ec2_region }}"
- ignore_errors: yes
- register: ec2_term
- with_items: host_vars
- when: "'oo_hosts_to_terminate' in groups"
-
- # Fail if any of the instances failed to terminate with an error other
- # than 403 Forbidden
- - fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }}
- when: "'oo_hosts_to_terminate' in groups and item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")"
- with_items: ec2_term.results
-
- - name: Stop instance if termination failed
- ec2:
- state: stopped
- instance_ids: ["{{ item.item.ec2_id }}"]
- region: "{{ item.item.ec2_region }}"
- register: ec2_stop
- when: item.failed
- with_items: ec2_term.results
- when: "'oo_hosts_to_terminate' in groups"
-
- - name: Rename stopped instances
- ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
- args:
- tags:
- Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
- with_items: ec2_stop.results
- when: "'oo_hosts_to_terminate' in groups"
diff --git a/playbooks/byo/config.yml b/playbooks/byo/config.yml
index e059514db..7d03914a2 100644
--- a/playbooks/byo/config.yml
+++ b/playbooks/byo/config.yml
@@ -1,8 +1,2 @@
---
-- name: Run the openshift-master config playbook
- include: openshift-master/config.yml
- when: groups.masters is defined and groups.masters
-
-- name: Run the openshift-node config playbook
- include: openshift-node/config.yml
- when: groups.nodes is defined and groups.nodes and groups.masters is defined and groups.masters
+- include: openshift-cluster/config.yml
diff --git a/playbooks/byo/lookup_plugins b/playbooks/byo/lookup_plugins
new file mode 120000
index 000000000..c528bcd1d
--- /dev/null
+++ b/playbooks/byo/lookup_plugins
@@ -0,0 +1 @@
+../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
new file mode 100644
index 000000000..9e50a4a18
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/config.yml
@@ -0,0 +1,9 @@
+---
+- include: ../../common/openshift-cluster/config.yml
+ vars:
+ g_etcd_group: "{{ 'etcd' }}"
+ g_masters_group: "{{ 'masters' }}"
+ g_nodes_group: "{{ 'nodes' }}"
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_debug_level: 2
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/aws/openshift-master/filter_plugins b/playbooks/byo/openshift-cluster/filter_plugins
index 99a95e4ca..99a95e4ca 120000
--- a/playbooks/aws/openshift-master/filter_plugins
+++ b/playbooks/byo/openshift-cluster/filter_plugins
diff --git a/playbooks/byo/openshift-cluster/lookup_plugins b/playbooks/byo/openshift-cluster/lookup_plugins
new file mode 120000
index 000000000..ac79701db
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/aws/openshift-node/roles b/playbooks/byo/openshift-cluster/roles
index 20c4c58cf..20c4c58cf 120000
--- a/playbooks/aws/openshift-node/roles
+++ b/playbooks/byo/openshift-cluster/roles
diff --git a/playbooks/byo/openshift-master/config.yml b/playbooks/byo/openshift-master/config.yml
deleted file mode 100644
index f61d277c6..000000000
--- a/playbooks/byo/openshift-master/config.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-- name: Populate oo_masters_to_config host group
- hosts: localhost
- gather_facts: no
- tasks:
- - add_host:
- name: "{{ item }}"
- groups: oo_masters_to_config
- with_items: groups['masters']
-
-- include: ../../common/openshift-master/config.yml
- vars:
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_debug_level: 4
- openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-master/filter_plugins b/playbooks/byo/openshift-master/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/byo/openshift-master/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-master/roles b/playbooks/byo/openshift-master/roles
deleted file mode 120000
index 20c4c58cf..000000000
--- a/playbooks/byo/openshift-master/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-node/config.yml b/playbooks/byo/openshift-node/config.yml
deleted file mode 100644
index f50903061..000000000
--- a/playbooks/byo/openshift-node/config.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Populate oo_nodes_to_config and oo_first_master host groups
- hosts: localhost
- gather_facts: no
- tasks:
- - name: Evaluate oo_nodes_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_nodes_to_config
- with_items: groups.nodes
- - name: Evaluate oo_first_master
- add_host:
- name: "{{ item }}"
- groups: oo_first_master
- with_items: groups.masters.0
-
-
-- include: ../../common/openshift-node/config.yml
- vars:
- openshift_first_master: "{{ groups.masters.0 }}"
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_debug_level: 4
- openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-node/filter_plugins b/playbooks/byo/openshift-node/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/byo/openshift-node/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-node/roles b/playbooks/byo/openshift-node/roles
deleted file mode 120000
index 20c4c58cf..000000000
--- a/playbooks/byo/openshift-node/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml
index cd282270f..6d7c12fd4 100644
--- a/playbooks/byo/openshift_facts.yml
+++ b/playbooks/byo/openshift_facts.yml
@@ -1,5 +1,5 @@
---
-- name: Gather OpenShift facts
+- name: Gather Cluster facts
hosts: all
gather_facts: no
roles:
diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml
new file mode 100644
index 000000000..f564905ea
--- /dev/null
+++ b/playbooks/byo/rhel_subscribe.yml
@@ -0,0 +1,12 @@
+---
+- hosts: all
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
+ roles:
+ - role: rhel_subscribe
+ when: deployment_type == "enterprise" and
+ ansible_distribution == "RedHat" and
+ lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
+ default('no', True) | lower in ['no', 'false']
+ - openshift_repos
+ - os_update_latest
diff --git a/playbooks/byo/vagrant.yml b/playbooks/byo/vagrant.yml
new file mode 100644
index 000000000..76246e7b0
--- /dev/null
+++ b/playbooks/byo/vagrant.yml
@@ -0,0 +1,4 @@
+---
+- include: rhel_subscribe.yml
+
+- include: config.yml
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 14ffa928f..4c74f96db 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -1,4 +1,74 @@
---
+- name: Populate config host groups
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - fail:
+ msg: This playbook rquires g_etcd_group to be set
+ when: g_etcd_group is not defined
+
+ - fail:
+ msg: This playbook rquires g_masters_group to be set
+ when: g_masters_group is not defined
+
+ - fail:
+ msg: This playbook rquires g_nodes_group to be set
+ when: g_nodes_group is not defined
+
+ - name: Evaluate oo_etcd_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_etcd_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: groups[g_etcd_group] | default([])
+
+ - name: Evaluate oo_masters_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_masters_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: groups[g_masters_group] | default([])
+
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: groups[g_nodes_group] | default([])
+
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: groups[g_masters_group] | default([])
+ when: g_nodeonmaster is defined and g_nodeonmaster == true
+
+ - name: Evaluate oo_first_etcd
+ add_host:
+ name: "{{ groups[g_etcd_group][0] }}"
+ groups: oo_first_etcd
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ when: g_etcd_group in groups and (groups[g_etcd_group] | length) > 0
+
+ - name: Evaluate oo_first_master
+ add_host:
+ name: "{{ groups[g_masters_group][0] }}"
+ groups: oo_first_master
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ when: g_masters_group in groups and (groups[g_masters_group] | length) > 0
+
+- include: ../openshift-etcd/config.yml
+
- include: ../openshift-master/config.yml
- include: ../openshift-node/config.yml
+ vars:
+ osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}"
+ osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"
diff --git a/playbooks/common/openshift-cluster/create_services.yml b/playbooks/common/openshift-cluster/create_services.yml
deleted file mode 100644
index e70709d19..000000000
--- a/playbooks/common/openshift-cluster/create_services.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Deploy OpenShift Services
- hosts: "{{ g_svc_master }}"
- connection: ssh
- gather_facts: yes
- roles:
- - openshift_registry
- - openshift_router
diff --git a/playbooks/common/openshift-cluster/lookup_plugins b/playbooks/common/openshift-cluster/lookup_plugins
new file mode 120000
index 000000000..ac79701db
--- /dev/null
+++ b/playbooks/common/openshift-cluster/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml
new file mode 100644
index 000000000..1a6580795
--- /dev/null
+++ b/playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml
@@ -0,0 +1,13 @@
+---
+- set_fact: k8s_type="etcd"
+
+- name: Generate etcd instance names(s)
+ set_fact:
+ scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
+ register: etcd_names_output
+ with_sequence: count={{ num_etcd }}
+
+- set_fact:
+ etcd_names: "{{ etcd_names_output.results | default([])
+ | oo_collect('ansible_facts')
+ | oo_collect('scratch_name') }}"
diff --git a/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml
index 118727273..36d7b7870 100644
--- a/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml
+++ b/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml
@@ -5,7 +5,9 @@
set_fact:
scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
register: master_names_output
- with_sequence: start=1 end={{ num_masters }}
+ with_sequence: count={{ num_masters }}
- set_fact:
- master_names: "{{ master_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}"
+ master_names: "{{ master_names_output.results | default([])
+ | oo_collect('ansible_facts')
+ | oo_collect('scratch_name') }}"
diff --git a/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml
index 162315d46..278942f8b 100644
--- a/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml
+++ b/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml
@@ -1,11 +1,15 @@
---
-- set_fact: k8s_type="node"
+- set_fact: k8s_type=node
+- set_fact: sub_host_type="{{ type }}"
+- set_fact: number_nodes="{{ count }}"
- name: Generate node instance names(s)
set_fact:
- scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
+ scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}"
register: node_names_output
- with_sequence: start=1 end={{ num_nodes }}
+ with_sequence: count={{ number_nodes }}
- set_fact:
- node_names: "{{ node_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}"
+ node_names: "{{ node_names_output.results | default([])
+ | oo_collect('ansible_facts')
+ | oo_collect('scratch_name') }}"
diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
index e92c6f1ee..190e2d862 100644
--- a/playbooks/common/openshift-cluster/update_repos_and_packages.yml
+++ b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
@@ -3,5 +3,10 @@
vars:
openshift_deployment_type: "{{ deployment_type }}"
roles:
+ - role: rhel_subscribe
+ when: deployment_type == "enterprise" and
+ ansible_distribution == "RedHat" and
+ lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
+ default('no', True) | lower in ['no', 'false']
- openshift_repos
- os_update_latest
diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml
new file mode 100644
index 000000000..952960652
--- /dev/null
+++ b/playbooks/common/openshift-etcd/config.yml
@@ -0,0 +1,97 @@
+---
+- name: Set etcd facts needed for generating certs
+ hosts: oo_etcd_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ - openshift_facts:
+ role: "{{ item.role }}"
+ local_facts: "{{ item.local_facts }}"
+ with_items:
+ - role: common
+ local_facts:
+ hostname: "{{ openshift_hostname | default(None) }}"
+ public_hostname: "{{ openshift_public_hostname | default(None) }}"
+ deployment_type: "{{ openshift_deployment_type }}"
+ - name: Check status of etcd certificates
+ stat:
+ path: "{{ item }}"
+ with_items:
+ - /etc/etcd/server.crt
+ - /etc/etcd/peer.crt
+ - /etc/etcd/ca.crt
+ register: g_etcd_server_cert_stat_result
+ - set_fact:
+ etcd_server_certs_missing: "{{ g_etcd_server_cert_stat_result.results | map(attribute='stat.exists')
+ | list | intersect([false])}}"
+ etcd_cert_subdir: etcd-{{ openshift.common.hostname }}
+ etcd_cert_config_dir: /etc/etcd
+ etcd_cert_prefix:
+
+- name: Create temp directory for syncing certs
+ hosts: localhost
+ connection: local
+ sudo: false
+ gather_facts: no
+ tasks:
+ - name: Create local temp directory for syncing certs
+ local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: g_etcd_mktemp
+ changed_when: False
+
+- name: Configure etcd certificates
+ hosts: oo_first_etcd
+ vars:
+ etcd_generated_certs_dir: /etc/etcd/generated_certs
+ etcd_needing_server_certs: "{{ hostvars
+ | oo_select_keys(groups['oo_etcd_to_config'])
+ | oo_filter_list(filter_attr='etcd_server_certs_missing') }}"
+ sync_tmpdir: "{{ hostvars.localhost.g_etcd_mktemp.stdout }}"
+ roles:
+ - etcd_certificates
+ post_tasks:
+ - name: Create a tarball of the etcd certs
+ command: >
+ tar -czvf {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz
+ -C {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }} .
+ args:
+ creates: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
+ with_items: etcd_needing_server_certs
+ - name: Retrieve the etcd cert tarballs
+ fetch:
+ src: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
+ dest: "{{ sync_tmpdir }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+ with_items: etcd_needing_server_certs
+
+- name: Configure etcd hosts
+ hosts: oo_etcd_to_config
+ vars:
+ sync_tmpdir: "{{ hostvars.localhost.g_etcd_mktemp.stdout }}"
+ etcd_url_scheme: https
+ etcd_peer_url_scheme: https
+ etcd_peers_group: oo_etcd_to_config
+ pre_tasks:
+ - name: Ensure certificate directory exists
+ file:
+ path: "{{ etcd_cert_config_dir }}"
+ state: directory
+ - name: Unarchive the tarball on the etcd host
+ unarchive:
+ src: "{{ sync_tmpdir }}/{{ etcd_cert_subdir }}.tgz"
+ dest: "{{ etcd_cert_config_dir }}"
+ when: etcd_server_certs_missing
+ roles:
+ - etcd
+ - role: nickhammond.logrotate
+
+- name: Delete temporary directory on localhost
+ hosts: localhost
+ connection: local
+ sudo: false
+ gather_facts: no
+ tasks:
+ - file: name={{ g_etcd_mktemp.stdout }} state=absent
+ changed_when: False
diff --git a/playbooks/aws/openshift-node/filter_plugins b/playbooks/common/openshift-etcd/filter_plugins
index 99a95e4ca..99a95e4ca 120000
--- a/playbooks/aws/openshift-node/filter_plugins
+++ b/playbooks/common/openshift-etcd/filter_plugins
diff --git a/playbooks/common/openshift-etcd/lookup_plugins b/playbooks/common/openshift-etcd/lookup_plugins
new file mode 120000
index 000000000..ac79701db
--- /dev/null
+++ b/playbooks/common/openshift-etcd/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-etcd/roles b/playbooks/common/openshift-etcd/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/playbooks/common/openshift-etcd/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-etcd/service.yml b/playbooks/common/openshift-etcd/service.yml
new file mode 100644
index 000000000..0bf69b22f
--- /dev/null
+++ b/playbooks/common/openshift-etcd/service.yml
@@ -0,0 +1,18 @@
+---
+- name: Populate g_service_masters host group if needed
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - fail: msg="new_cluster_state is required to be injected in this playbook"
+ when: new_cluster_state is not defined
+
+ - name: Evaluate g_service_etcd
+ add_host: name={{ item }} groups=g_service_etcd
+ with_items: oo_host_group_exp | default([])
+
+- name: Change etcd state on etcd instance(s)
+ hosts: g_service_etcd
+ connection: ssh
+ gather_facts: no
+ tasks:
+ - service: name=etcd state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 052ed14c7..1dec923fc 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -1,18 +1,280 @@
---
+- name: Set master facts and determine if external etcd certs need to be generated
+ hosts: oo_masters_to_config
+ pre_tasks:
+ - name: Check for RPM generated config marker file .config_managed
+ stat:
+ path: /etc/origin/.config_managed
+ register: rpmgenerated_config
+
+ - name: Remove RPM generated config files if present
+ file:
+ path: "/etc/origin/{{ item }}"
+ state: absent
+ when: rpmgenerated_config.stat.exists == true and deployment_type in ['openshift-enterprise', 'atomic-enterprise']
+ with_items:
+ - master
+ - node
+ - .config_managed
+
+ - set_fact:
+ openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}"
+ openshift_master_etcd_hosts: "{{ hostvars
+ | oo_select_keys(groups['oo_etcd_to_config']
+ | default([]))
+ | oo_collect('openshift.common.hostname')
+ | default(none, true) }}"
+ roles:
+ - openshift_facts
+ post_tasks:
+ - openshift_facts:
+ role: "{{ item.role }}"
+ local_facts: "{{ item.local_facts }}"
+ with_items:
+ - role: common
+ local_facts:
+ hostname: "{{ openshift_hostname | default(None) }}"
+ public_hostname: "{{ openshift_public_hostname | default(None) }}"
+ deployment_type: "{{ openshift_deployment_type }}"
+ - role: master
+ local_facts:
+ api_port: "{{ openshift_master_api_port | default(None) }}"
+ api_url: "{{ openshift_master_api_url | default(None) }}"
+ api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}"
+ public_api_url: "{{ openshift_master_public_api_url | default(None) }}"
+ cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"
+ cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}"
+ cluster_defer_ha: "{{ openshift_master_cluster_defer_ha | default(None) }}"
+ console_path: "{{ openshift_master_console_path | default(None) }}"
+ console_port: "{{ openshift_master_console_port | default(None) }}"
+ console_url: "{{ openshift_master_console_url | default(None) }}"
+ console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
+ public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
+ - name: Check status of external etcd certificatees
+ stat:
+ path: "{{ openshift.common.config_base }}/master/{{ item }}"
+ with_items:
+ - master.etcd-client.crt
+ - master.etcd-ca.crt
+ register: g_external_etcd_cert_stat_result
+ - set_fact:
+ etcd_client_certs_missing: "{{ g_external_etcd_cert_stat_result.results
+ | map(attribute='stat.exists')
+ | list | intersect([false])}}"
+ etcd_cert_subdir: openshift-master-{{ openshift.common.hostname }}
+ etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
+ etcd_cert_prefix: master.etcd-
+ when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
+
+- name: Create temp directory for syncing certs
+ hosts: localhost
+ connection: local
+ sudo: false
+ gather_facts: no
+ tasks:
+ - name: Create local temp directory for syncing certs
+ local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: g_master_mktemp
+ changed_when: False
+
+- name: Configure etcd certificates
+ hosts: oo_first_etcd
+ vars:
+ etcd_generated_certs_dir: /etc/etcd/generated_certs
+ etcd_needing_client_certs: "{{ hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_filter_list(filter_attr='etcd_client_certs_missing') }}"
+ sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
+ roles:
+ - etcd_certificates
+ post_tasks:
+ - name: Create a tarball of the etcd certs
+ command: >
+ tar -czvf {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz
+ -C {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }} .
+ args:
+ creates: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
+ with_items: etcd_needing_client_certs
+ - name: Retrieve the etcd cert tarballs
+ fetch:
+ src: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
+ dest: "{{ sync_tmpdir }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+ with_items: etcd_needing_client_certs
+
+- name: Copy the external etcd certs to the masters
+ hosts: oo_masters_to_config
+ vars:
+ sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
+ tasks:
+ - name: Ensure certificate directory exists
+ file:
+ path: "{{ openshift.common.config_base }}/master"
+ state: directory
+ when: etcd_client_certs_missing is defined and etcd_client_certs_missing
+ - name: Unarchive the tarball on the master
+ unarchive:
+ src: "{{ sync_tmpdir }}/{{ etcd_cert_subdir }}.tgz"
+ dest: "{{ etcd_cert_config_dir }}"
+ when: etcd_client_certs_missing is defined and etcd_client_certs_missing
+ - file:
+ path: "{{ etcd_cert_config_dir }}/{{ item }}"
+ owner: root
+ group: root
+ mode: 0600
+ with_items:
+ - master.etcd-client.crt
+ - master.etcd-client.key
+ - master.etcd-ca.crt
+ when: etcd_client_certs_missing is defined and etcd_client_certs_missing
+
+- name: Determine if master certificates need to be generated
+ hosts: oo_masters_to_config
+ tasks:
+ - set_fact:
+ openshift_master_certs_no_etcd:
+ - admin.crt
+ - master.kubelet-client.crt
+ - "{{ 'master.proxy-client.crt' if openshift.common.version_greater_than_3_1_or_1_1 else omit }}"
+ - master.server.crt
+ - openshift-master.crt
+ - openshift-registry.crt
+ - openshift-router.crt
+ - etcd.server.crt
+ openshift_master_certs_etcd:
+ - master.etcd-client.crt
+
+ - set_fact:
+ openshift_master_certs: "{{ (openshift_master_certs_no_etcd | union(openshift_master_certs_etcd)) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else openshift_master_certs_no_etcd }}"
+
+ - name: Check status of master certificates
+ stat:
+ path: "{{ openshift.common.config_base }}/master/{{ item }}"
+ with_items: openshift_master_certs
+ register: g_master_cert_stat_result
+ - set_fact:
+ master_certs_missing: "{{ False in (g_master_cert_stat_result.results
+ | map(attribute='stat.exists')
+ | list ) }}"
+ master_cert_subdir: master-{{ openshift.common.hostname }}
+ master_cert_config_dir: "{{ openshift.common.config_base }}/master"
+
+- name: Configure master certificates
+ hosts: oo_first_master
+ vars:
+ master_generated_certs_dir: "{{ openshift.common.config_base }}/generated-configs"
+ masters_needing_certs: "{{ hostvars
+ | oo_select_keys(groups['oo_masters_to_config'] | difference(groups['oo_first_master']))
+ | oo_filter_list(filter_attr='master_certs_missing') }}"
+ sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
+ roles:
+ - openshift_master_certificates
+ post_tasks:
+ - name: Remove generated etcd client certs when using external etcd
+ file:
+ path: "{{ master_generated_certs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}"
+ state: absent
+ when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
+ with_nested:
+ - masters_needing_certs
+ - - master.etcd-client.crt
+ - master.etcd-client.key
+
+ - name: Create a tarball of the master certs
+ command: >
+ tar -czvf {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz
+ -C {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }} .
+ args:
+ creates: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz"
+ with_items: masters_needing_certs
+
+ - name: Retrieve the master cert tarball from the master
+ fetch:
+ src: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz"
+ dest: "{{ sync_tmpdir }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+ with_items: masters_needing_certs
+
- name: Configure master instances
hosts: oo_masters_to_config
+ vars:
+ sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ embedded_etcd: "{{ openshift.master.embedded_etcd }}"
+ pre_tasks:
+ - name: Ensure certificate directory exists
+ file:
+ path: "{{ openshift.common.config_base }}/master"
+ state: directory
+ when: master_certs_missing and 'oo_first_master' not in group_names
+ - name: Unarchive the tarball on the master
+ unarchive:
+ src: "{{ sync_tmpdir }}/{{ master_cert_subdir }}.tgz"
+ dest: "{{ master_cert_config_dir }}"
+ when: master_certs_missing and 'oo_first_master' not in group_names
roles:
- openshift_master
+ - role: nickhammond.logrotate
- role: fluentd_master
when: openshift.common.use_fluentd | bool
- tasks:
+ post_tasks:
- name: Create group for deployment type
group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
changed_when: False
+- name: Additional master configuration
+ hosts: oo_first_master
+ vars:
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ omc_cluster_hosts: "{{ groups.oo_masters_to_config | join(' ')}}"
+ roles:
+ - role: openshift_master_cluster
+ when: openshift_master_ha | bool
+ - openshift_examples
+ - role: openshift_cluster_metrics
+ when: openshift.common.use_cluster_metrics | bool
+
+- name: Enable cockpit
+ hosts: oo_first_master
+ vars:
+ cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}"
+ roles:
+ - role: cockpit
+ when: ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and
+ (osm_use_cockpit | bool or osm_use_cockpit is undefined )
+
# Additional instance config for online deployments
- name: Additional instance config
hosts: oo_masters_deployment_type_online
roles:
- pods
- os_env_extras
+
+- name: Delete temporary directory on localhost
+ hosts: localhost
+ connection: local
+ sudo: false
+ gather_facts: no
+ tasks:
+ - file: name={{ g_master_mktemp.stdout }} state=absent
+ changed_when: False
+
+- name: Configure service accounts
+ hosts: oo_first_master
+
+ vars:
+ accounts: ["router", "registry"]
+
+ roles:
+ - openshift_serviceaccounts
+
+- name: Create services
+ hosts: oo_first_master
+ roles:
+ - role: openshift_router
+ when: openshift.master.infra_nodes is defined
+ #- role: openshift_registry
diff --git a/playbooks/common/openshift-master/lookup_plugins b/playbooks/common/openshift-master/lookup_plugins
new file mode 120000
index 000000000..ac79701db
--- /dev/null
+++ b/playbooks/common/openshift-master/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-master/service.yml b/playbooks/common/openshift-master/service.yml
index 5636ad156..27e1e66f9 100644
--- a/playbooks/common/openshift-master/service.yml
+++ b/playbooks/common/openshift-master/service.yml
@@ -10,9 +10,9 @@
add_host: name={{ item }} groups=g_service_masters
with_items: oo_host_group_exp | default([])
-- name: Change openshift-master state on master instance(s)
+- name: Change state on master instance(s)
hosts: g_service_masters
connection: ssh
gather_facts: no
tasks:
- - service: name=openshift-master state="{{ new_cluster_state }}"
+ - service: name={{ openshift.common.service_type }}-master state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 9e642f3d3..a14ca8e11 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -18,26 +18,26 @@
deployment_type: "{{ openshift_deployment_type }}"
- role: node
local_facts:
- resources_cpu: "{{ openshift_node_resources_cpu | default(None) }}"
- resources_memory: "{{ openshift_node_resources_memory | default(None) }}"
- pod_cidr: "{{ openshift_node_pod_cidr | default(None) }}"
labels: "{{ openshift_node_labels | default(None) }}"
annotations: "{{ openshift_node_annotations | default(None) }}"
+ schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
- name: Check status of node certificates
stat:
- path: "{{ item }}"
+ path: "{{ openshift.common.config_base }}/node/{{ item }}"
with_items:
- - "/etc/openshift/node/node.key"
- - "/etc/openshift/node/node.kubeconfig"
- - "/etc/openshift/node/ca.crt"
- - "/etc/openshift/node/server.key"
+ - "system:node:{{ openshift.common.hostname }}.crt"
+ - "system:node:{{ openshift.common.hostname }}.key"
+ - "system:node:{{ openshift.common.hostname }}.kubeconfig"
+ - ca.crt
+ - server.key
+ - server.crt
register: stat_result
- set_fact:
certs_missing: "{{ stat_result.results | map(attribute='stat.exists')
| list | intersect([false])}}"
node_subdir: node-{{ openshift.common.hostname }}
- config_dir: /etc/openshift/generated-configs/node-{{ openshift.common.hostname }}
- node_cert_dir: /etc/openshift/node
+ config_dir: "{{ openshift.common.config_base }}/generated-configs/node-{{ openshift.common.hostname }}"
+ node_cert_dir: "{{ openshift.common.config_base }}/node"
- name: Create temp directory for syncing certs
hosts: localhost
@@ -50,17 +50,16 @@
register: mktemp
changed_when: False
-- name: Register nodes
+- name: Create node certificates
hosts: oo_first_master
vars:
nodes_needing_certs: "{{ hostvars
- | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_select_keys(groups['oo_nodes_to_config']
+ | default([]))
| oo_filter_list(filter_attr='certs_missing') }}"
- openshift_nodes: "{{ hostvars
- | oo_select_keys(groups['oo_nodes_to_config']) }}"
sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
roles:
- - openshift_register_nodes
+ - openshift_node_certificates
post_tasks:
- name: Create a tarball of the node config directories
command: >
@@ -84,16 +83,16 @@
hosts: oo_nodes_to_config
vars:
sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
- openshift_node_master_api_url: "{{ hostvars[openshift_first_master].openshift.master.api_url }}"
+ openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
pre_tasks:
- name: Ensure certificate directory exists
file:
path: "{{ node_cert_dir }}"
state: directory
- # TODO: notify restart openshift-node
+ # TODO: notify restart node
# possibly test service started time against certificate/config file
- # timestamps in openshift-node to trigger notify
+ # timestamps in node to trigger notify
- name: Unarchive the tarball on the node
unarchive:
src: "{{ sync_tmpdir }}/{{ node_subdir }}.tgz"
@@ -101,6 +100,7 @@
when: certs_missing
roles:
- openshift_node
+ - role: nickhammond.logrotate
- role: fluentd_node
when: openshift.common.use_fluentd | bool
tasks:
@@ -108,15 +108,6 @@
group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }}
changed_when: False
-- name: Delete the temporary directory on the master
- hosts: oo_first_master
- gather_facts: no
- vars:
- sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
- tasks:
- - file: name={{ sync_tmpdir }} state=absent
- changed_when: False
-
- name: Delete temporary directory on localhost
hosts: localhost
connection: local
@@ -133,3 +124,15 @@
roles:
- os_env_extras
- os_env_extras_node
+
+- name: Set schedulability
+ hosts: oo_first_master
+ vars:
+ openshift_nodes: "{{ hostvars
+ | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_collect('openshift.common.hostname') }}"
+ openshift_node_vars: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) }}"
+ pre_tasks:
+
+ roles:
+ - openshift_manage_node
diff --git a/playbooks/common/openshift-node/lookup_plugins b/playbooks/common/openshift-node/lookup_plugins
new file mode 120000
index 000000000..ac79701db
--- /dev/null
+++ b/playbooks/common/openshift-node/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-node/service.yml b/playbooks/common/openshift-node/service.yml
index f76df089f..5cf83e186 100644
--- a/playbooks/common/openshift-node/service.yml
+++ b/playbooks/common/openshift-node/service.yml
@@ -10,9 +10,9 @@
add_host: name={{ item }} groups=g_service_nodes
with_items: oo_host_group_exp | default([])
-- name: Change openshift-node state on node instance(s)
+- name: Change state on node instance(s)
hosts: g_service_nodes
connection: ssh
gather_facts: no
tasks:
- - service: name=openshift-node state="{{ new_cluster_state }}"
+ - service: name={{ service_type }}-node state="{{ new_cluster_state }}"
diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml
index 8c320dbd2..6ca4f7395 100644
--- a/playbooks/gce/openshift-cluster/config.yml
+++ b/playbooks/gce/openshift-cluster/config.yml
@@ -1,38 +1,29 @@
---
# TODO: fix firewall related bug with GCE and origin, since GCE is overriding
# /etc/sysconfig/iptables
-- name: Populate oo_masters_to_config host group
- hosts: localhost
+
+- hosts: localhost
gather_facts: no
vars_files:
- vars.yml
tasks:
- - name: Evaluate oo_masters_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_masters_to_config
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([])
- - name: Evaluate oo_nodes_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_nodes_to_config
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([])
- - name: Evaluate oo_first_master
- add_host:
- name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
- groups: oo_first_master
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups"
+ - set_fact:
+ g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}"
+ g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}"
+ use_sdn: "{{ do_we_use_openshift_sdn }}"
+ sdn_plugin: "{{ sdn_network_plugin }}"
- include: ../../common/openshift-cluster/config.yml
vars:
+ g_etcd_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-etcd' }}"
+ g_masters_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-master' }}"
+ g_nodes_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-node' }}"
+ g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
+ g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
+ g_nodeonmaster: true
openshift_cluster_id: "{{ cluster_id }}"
- openshift_debug_level: 4
+ openshift_debug_level: 2
openshift_deployment_type: "{{ deployment_type }}"
- openshift_first_master: "{{ groups.oo_first_master.0 }}"
openshift_hostname: "{{ gce_private_ip }}"
+ openshift_use_openshift_sdn: "{{ hostvars.localhost.use_sdn }}"
+ os_sdn_network_plugin_name: "{{ hostvars.localhost.sdn_plugin }}"
diff --git a/playbooks/gce/openshift-cluster/join_node.yml b/playbooks/gce/openshift-cluster/join_node.yml
new file mode 100644
index 000000000..0dfa3e9d7
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/join_node.yml
@@ -0,0 +1,49 @@
+---
+- name: Populate oo_hosts_to_update group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_hosts_to_update
+ add_host:
+ name: "{{ node_ip }}"
+ groups: oo_hosts_to_update
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+
+- include: ../../common/openshift-cluster/update_repos_and_packages.yml
+
+- name: Populate oo_masters_to_config host group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ node_ip }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: oo_nodes_to_config
+
+ - name: Evaluate oo_first_master
+ add_host:
+ name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: oo_first_master
+ when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups"
+
+#- include: config.yml
+- include: ../../common/openshift-node/config.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
+ openshift_hostname: "{{ ansible_default_ipv4.address }}"
+ openshift_use_openshift_sdn: true
+ openshift_node_labels: "{{ lookup('oo_option', 'openshift_node_labels') }} "
+ os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet"
+ osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}"
+ osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"
diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml
index 35737f03d..c22b897d5 100644
--- a/playbooks/gce/openshift-cluster/launch.yml
+++ b/playbooks/gce/openshift-cluster/launch.yml
@@ -15,30 +15,47 @@
instances: "{{ master_names }}"
cluster: "{{ cluster_id }}"
type: "{{ k8s_type }}"
+ g_sub_host_type: "default"
- include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ vars:
+ type: "compute"
+ count: "{{ num_nodes }}"
- include: tasks/launch_instances.yml
vars:
instances: "{{ node_names }}"
cluster: "{{ cluster_id }}"
type: "{{ k8s_type }}"
+ g_sub_host_type: "{{ sub_host_type }}"
- - set_fact:
- a_master: "{{ master_names[0] }}"
- - add_host: name={{ a_master }} groups=service_master
-
-- include: update.yml
+ - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ vars:
+ type: "infra"
+ count: "{{ num_infra }}"
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ node_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+ g_sub_host_type: "{{ sub_host_type }}"
-- name: Deploy OpenShift Services
- hosts: service_master
- connection: ssh
- gather_facts: yes
- roles:
- - openshift_registry
- - openshift_router
+ - add_host:
+ name: "{{ master_names.0 }}"
+ groups: service_master
+ when: master_names is defined and master_names.0 is defined
-- include: ../../common/openshift-cluster/create_services.yml
- vars:
- g_svc_master: "{{ service_master }}"
+- include: update.yml
+#
+#- name: Deploy OpenShift Services
+# hosts: service_master
+# connection: ssh
+# gather_facts: yes
+# roles:
+# - openshift_registry
+# - openshift_router
+#
+#- include: ../../common/openshift-cluster/create_services.yml
+# vars:
+# g_svc_master: "{{ service_master }}"
- include: list.yml
diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml
index 5ba0f5a48..53b2b9a5e 100644
--- a/playbooks/gce/openshift-cluster/list.yml
+++ b/playbooks/gce/openshift-cluster/list.yml
@@ -14,11 +14,11 @@
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
+ with_items: groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true))
- name: List instance(s)
hosts: oo_list_hosts
gather_facts: no
tasks:
- debug:
- msg: "public ip:{{ hostvars[inventory_hostname].gce_public_ip }} private ip:{{ hostvars[inventory_hostname].gce_private_ip }}"
+ msg: "private ip:{{ hostvars[inventory_hostname].gce_private_ip }}"
diff --git a/playbooks/gce/openshift-cluster/lookup_plugins b/playbooks/gce/openshift-cluster/lookup_plugins
new file mode 120000
index 000000000..ac79701db
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
index 9a9848f05..c428cb465 100644
--- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
@@ -10,13 +10,33 @@
service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
project_id: "{{ lookup('env', 'gce_project_id') }}"
+ zone: "{{ lookup('env', 'zone') }}"
+ network: "{{ lookup('env', 'network') }}"
+# unsupported in 1.9.+
+ #service_account_permissions: "datastore,logging-write"
tags:
- created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }}
- env-{{ cluster }}
- host-type-{{ type }}
+ - sub-host-type-{{ g_sub_host_type }}
- env-host-type-{{ cluster }}-openshift-{{ type }}
+ when: instances |length > 0
register: gce
+- set_fact:
+ node_label:
+ # There doesn't seem to be a way to get the region directly, so parse it out of the zone.
+ region: "{{ gce.zone | regex_replace('^(.*)-.*$', '\\\\1') }}"
+ type: "{{ g_sub_host_type }}"
+ when: instances |length > 0 and type == "node"
+
+- set_fact:
+ node_label:
+ # There doesn't seem to be a way to get the region directly, so parse it out of the zone.
+ region: "{{ gce.zone | regex_replace('^(.*)-.*$', '\\\\1') }}"
+ type: "{{ type }}"
+ when: instances |length > 0 and type != "node"
+
- name: Add new instances to groups and set variables needed
add_host:
hostname: "{{ item.name }}"
@@ -26,16 +46,17 @@
groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}"
gce_public_ip: "{{ item.public_ip }}"
gce_private_ip: "{{ item.private_ip }}"
- with_items: gce.instance_data
+ openshift_node_labels: "{{ node_label }}"
+ with_items: gce.instance_data | default([], true)
- name: Wait for ssh
wait_for: port=22 host={{ item.public_ip }}
- with_items: gce.instance_data
+ with_items: gce.instance_data | default([], true)
- name: Wait for user setup
command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.name].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.name].ansible_ssh_user }} user is setup"
register: result
until: result.rc == 0
- retries: 20
- delay: 10
- with_items: gce.instance_data
+ retries: 30
+ delay: 5
+ with_items: gce.instance_data | default([], true)
diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml
index abe6a4c95..e20e0a8bc 100644
--- a/playbooks/gce/openshift-cluster/terminate.yml
+++ b/playbooks/gce/openshift-cluster/terminate.yml
@@ -1,34 +1,58 @@
---
- name: Terminate instance(s)
hosts: localhost
+ connection: local
gather_facts: no
vars_files:
- vars.yml
tasks:
- - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-node
+ - set_fact: scratch_group=tag_env-{{ cluster_id }}
- add_host:
name: "{{ item }}"
- groups: oo_nodes_to_terminate
+ groups: oo_hosts_to_terminate
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
+ with_items: groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true))
- - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-master
- - add_host:
- name: "{{ item }}"
- groups: oo_masters_to_terminate
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
+- name: Unsubscribe VMs
+ hosts: oo_hosts_to_terminate
+ vars_files:
+ - vars.yml
+ roles:
+ - role: rhel_unsubscribe
+ when: deployment_type == "enterprise" and
+ ansible_distribution == "RedHat" and
+ lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
+ default('no', True) | lower in ['no', 'false']
+
+- name: Terminate instances(s)
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
-- include: ../openshift-node/terminate.yml
- vars:
- gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
- gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
- gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
+ - name: Terminate instances that were previously launched
+ local_action:
+ module: gce
+ state: 'absent'
+ name: "{{ item }}"
+ service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+ pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+ project_id: "{{ lookup('env', 'gce_project_id') }}"
+ zone: "{{ lookup('env', 'zone') }}"
+ with_items: groups['oo_hosts_to_terminate'] | default([], true)
+ when: item is defined
-- include: ../openshift-master/terminate.yml
- vars:
- gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
- gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
- gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
+#- include: ../openshift-node/terminate.yml
+# vars:
+# gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+# gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+# gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
+#
+#- include: ../openshift-master/terminate.yml
+# vars:
+# gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+# gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+# gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
diff --git a/playbooks/gce/openshift-cluster/update.yml b/playbooks/gce/openshift-cluster/update.yml
index 9ebf39a13..8096aa654 100644
--- a/playbooks/gce/openshift-cluster/update.yml
+++ b/playbooks/gce/openshift-cluster/update.yml
@@ -11,7 +11,9 @@
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]) | default([])
+ with_items: (groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([]))
+ | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([]))
+ | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-etcd"] | default([]))
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml
index ae33083b9..6de007807 100644
--- a/playbooks/gce/openshift-cluster/vars.yml
+++ b/playbooks/gce/openshift-cluster/vars.yml
@@ -1,8 +1,11 @@
---
+do_we_use_openshift_sdn: true
+sdn_network_plugin: redhat/openshift-ovs-subnet
+# os_sdn_network_plugin_name can be ovssubnet or multitenant, see https://docs.openshift.org/latest/architecture/additional_concepts/sdn.html#ovssubnet-plugin-operation
deployment_vars:
origin:
- image: centos-7
- ssh_user:
+ image: preinstalled-slave-50g-v5
+ ssh_user: root
sudo: yes
online:
image: libra-rhel7
@@ -12,4 +15,3 @@ deployment_vars:
image: rhel-7
ssh_user:
sudo: yes
-
diff --git a/playbooks/gce/openshift-master/config.yml b/playbooks/gce/openshift-master/config.yml
deleted file mode 100644
index af6000bc8..000000000
--- a/playbooks/gce/openshift-master/config.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- name: Populate oo_masters_to_config host group
- hosts: localhost
- gather_facts: no
- tasks:
- - name: Evaluate oo_masters_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_masters_to_config
- ansible_ssh_user: root
- with_items: oo_host_group_exp | default([])
-
-- include: ../../common/openshift-master/config.yml
- vars:
- openshift_cluster_id: "{{ cluster_id }}"
- openshift_debug_level: 4
- openshift_deployment_type: "{{ deployment_type }}"
- openshift_hostname: "{{ gce_private_ip }}"
diff --git a/playbooks/gce/openshift-master/filter_plugins b/playbooks/gce/openshift-master/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/gce/openshift-master/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/gce/openshift-master/launch.yml b/playbooks/gce/openshift-master/launch.yml
deleted file mode 100644
index ef10b6cf0..000000000
--- a/playbooks/gce/openshift-master/launch.yml
+++ /dev/null
@@ -1,51 +0,0 @@
----
-# TODO: when we are ready to go to ansible 1.9+ support only, we can update to
-# the gce task to use the disk_auto_delete parameter to avoid having to delete
-# the disk as a separate step on termination
-
-- name: Launch instance(s)
- hosts: localhost
- connection: local
- gather_facts: no
-
-# TODO: modify image based on deployment_type
- vars:
- inst_names: "{{ oo_new_inst_names }}"
- machine_type: n1-standard-1
- image: libra-rhel7
-
- tasks:
- - name: Launch instances
- gce:
- instance_names: "{{ inst_names }}"
- machine_type: "{{ machine_type }}"
- image: "{{ image }}"
- service_account_email: "{{ gce_service_account_email }}"
- pem_file: "{{ gce_pem_file }}"
- project_id: "{{ gce_project_id }}"
- tags: "{{ oo_new_inst_tags }}"
- register: gce
-
- - name: Add new instances public IPs to oo_masters_to_config
- add_host:
- hostname: "{{ item.name }}"
- ansible_ssh_host: "{{ item.public_ip }}"
- groupname: oo_masters_to_config
- gce_private_ip: "{{ item.private_ip }}"
- with_items: gce.instance_data
-
- - name: Wait for ssh
- wait_for: port=22 host={{ item.public_ip }}
- with_items: gce.instance_data
-
- - name: Wait for root user setup
- command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup"
- register: result
- until: result.rc == 0
- retries: 20
- delay: 10
- with_items: gce.instance_data
-
-
-# Apply the configs, separate so that just the configs can be run by themselves
-- include: config.yml
diff --git a/playbooks/gce/openshift-master/roles b/playbooks/gce/openshift-master/roles
deleted file mode 120000
index 20c4c58cf..000000000
--- a/playbooks/gce/openshift-master/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles \ No newline at end of file
diff --git a/playbooks/gce/openshift-master/terminate.yml b/playbooks/gce/openshift-master/terminate.yml
deleted file mode 100644
index 452ac5199..000000000
--- a/playbooks/gce/openshift-master/terminate.yml
+++ /dev/null
@@ -1,35 +0,0 @@
----
-- name: Populate oo_masters_to_terminate host group if needed
- hosts: localhost
- gather_facts: no
- tasks:
- - name: Evaluate oo_masters_to_terminate
- add_host: name={{ item }} groups=oo_masters_to_terminate
- with_items: oo_host_group_exp | default([])
-
-- name: Terminate master instances
- hosts: localhost
- connection: local
- gather_facts: no
- tasks:
- - name: Terminate master instances
- gce:
- service_account_email: "{{ gce_service_account_email }}"
- pem_file: "{{ gce_pem_file }}"
- project_id: "{{ gce_project_id }}"
- state: 'absent'
- instance_names: "{{ groups['oo_masters_to_terminate'] }}"
- disks: "{{ groups['oo_masters_to_terminate'] }}"
- register: gce
- when: "'oo_masters_to_terminate' in groups"
-
- - name: Remove disks of instances
- gce_pd:
- service_account_email: "{{ gce_service_account_email }}"
- pem_file: "{{ gce_pem_file }}"
- project_id: "{{ gce_project_id }}"
- name: "{{ item }}"
- zone: "{{ gce.zone }}"
- state: absent
- with_items: gce.instance_names
- when: "'oo_masters_to_terminate' in groups"
diff --git a/playbooks/gce/openshift-node/config.yml b/playbooks/gce/openshift-node/config.yml
deleted file mode 100644
index 54b0da2ca..000000000
--- a/playbooks/gce/openshift-node/config.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-- name: Populate oo_nodes_to_config and oo_first_master host groups
- hosts: localhost
- gather_facts: no
- tasks:
- - name: Evaluate oo_nodes_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_nodes_to_config
- ansible_ssh_user: root
- with_items: oo_host_group_exp | default([])
- - name: Evaluate oo_first_master
- add_host:
- name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
- groups: oo_first_master
- ansible_ssh_user: root
-
-
-- include: ../../common/openshift-node/config.yml
- vars:
- openshift_cluster_id: "{{ cluster_id }}"
- openshift_debug_level: 4
- openshift_deployment_type: "{{ deployment_type }}"
- openshift_first_master: "{{ groups.oo_first_master.0 }}"
- openshift_hostname: "{{ gce_private_ip }}"
diff --git a/playbooks/gce/openshift-node/filter_plugins b/playbooks/gce/openshift-node/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/gce/openshift-node/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/gce/openshift-node/launch.yml b/playbooks/gce/openshift-node/launch.yml
deleted file mode 100644
index 086ba58bc..000000000
--- a/playbooks/gce/openshift-node/launch.yml
+++ /dev/null
@@ -1,51 +0,0 @@
----
-# TODO: when we are ready to go to ansible 1.9+ support only, we can update to
-# the gce task to use the disk_auto_delete parameter to avoid having to delete
-# the disk as a separate step on termination
-
-- name: Launch instance(s)
- hosts: localhost
- connection: local
- gather_facts: no
-
-# TODO: modify image based on deployment_type
- vars:
- inst_names: "{{ oo_new_inst_names }}"
- machine_type: n1-standard-1
- image: libra-rhel7
-
- tasks:
- - name: Launch instances
- gce:
- instance_names: "{{ inst_names }}"
- machine_type: "{{ machine_type }}"
- image: "{{ image }}"
- service_account_email: "{{ gce_service_account_email }}"
- pem_file: "{{ gce_pem_file }}"
- project_id: "{{ gce_project_id }}"
- tags: "{{ oo_new_inst_tags }}"
- register: gce
-
- - name: Add new instances public IPs to oo_nodes_to_config
- add_host:
- hostname: "{{ item.name }}"
- ansible_ssh_host: "{{ item.public_ip }}"
- groupname: oo_nodes_to_config
- gce_private_ip: "{{ item.private_ip }}"
- with_items: gce.instance_data
-
- - name: Wait for ssh
- wait_for: port=22 host={{ item.public_ip }}
- with_items: gce.instance_data
-
- - name: Wait for root user setup
- command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup"
- register: result
- until: result.rc == 0
- retries: 20
- delay: 10
- with_items: gce.instance_data
-
-
-# Apply the configs, separate so that just the configs can be run by themselves
-- include: config.yml
diff --git a/playbooks/gce/openshift-node/roles b/playbooks/gce/openshift-node/roles
deleted file mode 120000
index 20c4c58cf..000000000
--- a/playbooks/gce/openshift-node/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles \ No newline at end of file
diff --git a/playbooks/gce/openshift-node/terminate.yml b/playbooks/gce/openshift-node/terminate.yml
deleted file mode 100644
index 357e0c295..000000000
--- a/playbooks/gce/openshift-node/terminate.yml
+++ /dev/null
@@ -1,35 +0,0 @@
----
-- name: Populate oo_nodes_to_terminate host group if needed
- hosts: localhost
- gather_facts: no
- tasks:
- - name: Evaluate oo_nodes_to_terminate
- add_host: name={{ item }} groups=oo_nodes_to_terminate
- with_items: oo_host_group_exp | default([])
-
-- name: Terminate node instances
- hosts: localhost
- connection: local
- gather_facts: no
- tasks:
- - name: Terminate node instances
- gce:
- service_account_email: "{{ gce_service_account_email }}"
- pem_file: "{{ gce_pem_file }}"
- project_id: "{{ gce_project_id }}"
- state: 'absent'
- instance_names: "{{ groups['oo_nodes_to_terminate'] }}"
- disks: "{{ groups['oo_nodes_to_terminate'] }}"
- register: gce
- when: "'oo_nodes_to_terminate' in groups"
-
- - name: Remove disks of instances
- gce_pd:
- service_account_email: "{{ gce_service_account_email }}"
- pem_file: "{{ gce_pem_file }}"
- project_id: "{{ gce_project_id }}"
- name: "{{ item }}"
- zone: "{{ gce.zone }}"
- state: absent
- with_items: gce.instance_names
- when: "'oo_nodes_to_terminate' in groups"
diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml
index 75e2005a2..c208eee81 100644
--- a/playbooks/libvirt/openshift-cluster/config.yml
+++ b/playbooks/libvirt/openshift-cluster/config.yml
@@ -3,37 +3,22 @@
# is localhost, so no hostname value (or public_hostname) value is getting
# assigned
-- name: Populate oo_masters_to_config host group
- hosts: localhost
+- hosts: localhost
gather_facts: no
vars_files:
- vars.yml
tasks:
- - name: Evaluate oo_masters_to_config
- add_host:
- name: "{{ item }}"
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- groups: oo_masters_to_config
- with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([])
- - name: Evaluate oo_nodes_to_config
- add_host:
- name: "{{ item }}"
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- groups: oo_nodes_to_config
- with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([])
- - name: Evaluate oo_first_master
- add_host:
- name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- groups: oo_first_master
- when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups"
+ - set_fact:
+ g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}"
+ g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}"
- include: ../../common/openshift-cluster/config.yml
vars:
+ g_etcd_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-etcd' }}"
+ g_masters_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-master' }}"
+ g_nodes_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-node' }}"
+ g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
+ g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
openshift_cluster_id: "{{ cluster_id }}"
- openshift_debug_level: 4
+ openshift_debug_level: 2
openshift_deployment_type: "{{ deployment_type }}"
- openshift_first_master: "{{ groups.oo_first_master.0 }}"
diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml
index a7ddc1e7e..d3e768de5 100644
--- a/playbooks/libvirt/openshift-cluster/launch.yml
+++ b/playbooks/libvirt/openshift-cluster/launch.yml
@@ -13,23 +13,47 @@
image_name: "{{ deployment_vars[deployment_type].image.name }}"
tasks:
- fail: msg="Deployment type not supported for libvirt provider yet"
- when: deployment_type in ['online', 'enterprise']
+ when: deployment_type == 'online'
- include: tasks/configure_libvirt.yml
+ - include: ../../common/openshift-cluster/set_etcd_launch_facts_tasks.yml
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ etcd_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+ g_sub_host_type: "default"
+
- include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
- include: tasks/launch_instances.yml
vars:
instances: "{{ master_names }}"
cluster: "{{ cluster_id }}"
type: "{{ k8s_type }}"
+ g_sub_host_type: "default"
+
+ - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ vars:
+ type: "compute"
+ count: "{{ num_nodes }}"
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ node_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+ g_sub_host_type: "{{ sub_host_type }}"
- include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ vars:
+ type: "infra"
+ count: "{{ num_infra }}"
- include: tasks/launch_instances.yml
vars:
instances: "{{ node_names }}"
cluster: "{{ cluster_id }}"
type: "{{ k8s_type }}"
+ g_sub_host_type: "{{ sub_host_type }}"
- include: update.yml
diff --git a/playbooks/libvirt/openshift-cluster/lookup_plugins b/playbooks/libvirt/openshift-cluster/lookup_plugins
new file mode 120000
index 000000000..ac79701db
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
index 8291192ab..4b91c6da8 100644
--- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
@@ -14,6 +14,7 @@
url: '{{ image_url }}'
sha256sum: '{{ image_sha256 }}'
dest: '{{ os_libvirt_storage_pool_path }}/{{ image_name }}'
+ when: '{{ ( lookup("oo_option", "skip_image_download") | default("no", True) | lower ) in ["false", "no"] }}'
- name: Create the cloud-init config drive path
file:
@@ -62,8 +63,9 @@
shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases openshift-ansible | egrep -c ''{{ instances | join("|") }}'''
register: nb_allocated_ips
until: nb_allocated_ips.stdout == '{{ instances | length }}'
- retries: 30
- delay: 1
+ retries: 60
+ delay: 3
+ when: instances | length != 0
- name: Collect IP addresses of the VMs
shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases openshift-ansible | awk ''$6 == "{{ item }}" {gsub(/\/.*/, "", $5); print $5}'''
@@ -71,7 +73,7 @@
with_items: instances
- set_fact:
- ips: "{{ scratch_ip.results | oo_collect('stdout') }}"
+ ips: "{{ scratch_ip.results | default([]) | oo_collect('stdout') }}"
- name: Add new instances
add_host:
diff --git a/playbooks/libvirt/openshift-cluster/templates/network.xml b/playbooks/libvirt/openshift-cluster/templates/network.xml
index 86dcd62bb..050bc7ab9 100644
--- a/playbooks/libvirt/openshift-cluster/templates/network.xml
+++ b/playbooks/libvirt/openshift-cluster/templates/network.xml
@@ -8,7 +8,7 @@
<!-- TODO: query for first available virbr interface available -->
<bridge name='virbr3' stp='on' delay='0'/>
<!-- TODO: make overridable -->
- <domain name='example.com'/>
+ <domain name='example.com' localOnly='yes' />
<dns>
<!-- TODO: automatically add host entries -->
</dns>
diff --git a/playbooks/libvirt/openshift-cluster/templates/user-data b/playbooks/libvirt/openshift-cluster/templates/user-data
index 77b788109..eacae7c7e 100644
--- a/playbooks/libvirt/openshift-cluster/templates/user-data
+++ b/playbooks/libvirt/openshift-cluster/templates/user-data
@@ -19,5 +19,5 @@ system_info:
ssh_authorized_keys:
- {{ lookup('file', '~/.ssh/id_rsa.pub') }}
-bootcmd:
+runcmd:
- NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart
diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml
index b173a09dd..8f00812a9 100644
--- a/playbooks/libvirt/openshift-cluster/terminate.yml
+++ b/playbooks/libvirt/openshift-cluster/terminate.yml
@@ -15,6 +15,23 @@
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
with_items: groups[cluster_group] | default([])
+- name: Unsubscribe VMs
+ hosts: oo_hosts_to_terminate
+ vars_files:
+ - vars.yml
+ roles:
+ - role: rhel_unsubscribe
+ when: deployment_type == "enterprise" and
+ ansible_distribution == "RedHat" and
+ lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
+ default('no', True) | lower in ['no', 'false']
+
+- name: Terminate instance(s)
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
- name: Destroy VMs
virt:
name: '{{ item[0] }}'
diff --git a/playbooks/libvirt/openshift-cluster/update.yml b/playbooks/libvirt/openshift-cluster/update.yml
index 57e36db9e..d09832c16 100644
--- a/playbooks/libvirt/openshift-cluster/update.yml
+++ b/playbooks/libvirt/openshift-cluster/update.yml
@@ -11,7 +11,9 @@
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]) | default([])
+ with_items: (groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([]))
+ | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([]))
+ | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-etcd"] | default([]))
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/libvirt/openshift-cluster/vars.yml b/playbooks/libvirt/openshift-cluster/vars.yml
index 65d954fee..c77a0797e 100644
--- a/playbooks/libvirt/openshift-cluster/vars.yml
+++ b/playbooks/libvirt/openshift-cluster/vars.yml
@@ -7,9 +7,12 @@ libvirt_uri: 'qemu:///system'
deployment_vars:
origin:
image:
- url: "http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2"
- name: CentOS-7-x86_64-GenericCloud.qcow2
- sha256: e324e3ab1d24a1bbf035ddb365e7f9058c0b454acf48d7aa15c5519fae5998ab
+ url: "{{ lookup('oo_option', 'image_url') |
+ default('http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2', True) }}"
+ name: "{{ lookup('oo_option', 'image_name') |
+ default('CentOS-7-x86_64-GenericCloud.qcow2', True) }}"
+ sha256: "{{ lookup('oo_option', 'image_sha256') |
+ default('e324e3ab1d24a1bbf035ddb365e7f9058c0b454acf48d7aa15c5519fae5998ab', True) }}"
ssh_user: openshift
sudo: yes
online:
@@ -21,9 +24,12 @@ deployment_vars:
sudo: no
enterprise:
image:
- url:
- name:
- sha256:
+ url: "{{ lookup('oo_option', 'image_url') |
+ default('https://access.cdn.redhat.com//content/origin/files/sha256/ff/ff8198653cfd9c39411fc57077451ac291b3a605d305e905932fd6d5b1890bf3/rhel-guest-image-7.1-20150224.0.x86_64.qcow2', True) }}"
+ name: "{{ lookup('oo_option', 'image_name') |
+ default('rhel-guest-image-7.1-20150224.0.x86_64.qcow2', True) }}"
+ sha256: "{{ lookup('oo_option', 'image_sha256') |
+ default('ff8198653cfd9c39411fc57077451ac291b3a605d305e905932fd6d5b1890bf3', True) }}"
ssh_user: openshift
sudo: yes
# origin:
diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml
index abadaf5ca..a5ee2d6a5 100644
--- a/playbooks/openstack/openshift-cluster/config.yml
+++ b/playbooks/openstack/openshift-cluster/config.yml
@@ -1,35 +1,20 @@
-- name: Populate oo_masters_to_config host group
- hosts: localhost
+- hosts: localhost
gather_facts: no
vars_files:
- vars.yml
tasks:
- - name: Evaluate oo_masters_to_config
- add_host:
- name: "{{ item }}"
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- groups: oo_masters_to_config
- with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([])
- - name: Evaluate oo_nodes_to_config
- add_host:
- name: "{{ item }}"
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- groups: oo_nodes_to_config
- with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([])
- - name: Evaluate oo_first_master
- add_host:
- name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}"
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- groups: oo_first_master
- when: "'tag_env-host-type_{{ cluster_id }}-openshift-master' in groups"
+ - set_fact:
+ g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}"
+ g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}"
- include: ../../common/openshift-cluster/config.yml
vars:
+ g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}"
+ g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}"
+ g_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}"
+ g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
+ g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
openshift_cluster_id: "{{ cluster_id }}"
- openshift_debug_level: 4
+ openshift_debug_level: 2
openshift_deployment_type: "{{ deployment_type }}"
- openshift_first_master: "{{ groups.oo_first_master.0 }}"
openshift_hostname: "{{ ansible_default_ipv4.address }}"
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
new file mode 100644
index 000000000..40e4ab22c
--- /dev/null
+++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
@@ -0,0 +1,370 @@
+heat_template_version: 2014-10-16
+
+description: OpenShift cluster
+
+parameters:
+
+ cluster_id:
+ type: string
+ label: Cluster ID
+ description: Identifier of the cluster
+
+ cidr:
+ type: string
+ label: CIDR
+ description: CIDR of the network of the cluster
+
+ dns_nameservers:
+ type: comma_delimited_list
+ label: DNS nameservers list
+ description: List of DNS nameservers
+
+ external_net:
+ type: string
+ label: External network
+ description: Name of the external network
+ default: external
+
+ floating_ip_pool:
+ type: string
+ label: Floating IP pool
+ description: Floating IP pools
+ default: external
+
+ ssh_public_key:
+ type: string
+ label: SSH public key
+ description: SSH public key
+ hidden: true
+
+ ssh_incoming:
+ type: string
+ label: Source of ssh connections
+ description: Source of legitimate ssh connections
+ default: 0.0.0.0/0
+
+ num_masters:
+ type: number
+ label: Number of masters
+ description: Number of masters
+
+ num_nodes:
+ type: number
+ label: Number of compute nodes
+ description: Number of compute nodes
+
+ num_infra:
+ type: number
+ label: Number of infrastructure nodes
+ description: Number of infrastructure nodes
+
+ master_image:
+ type: string
+ label: Master image
+ description: Name of the image for the master servers
+
+ node_image:
+ type: string
+ label: Node image
+ description: Name of the image for the compute node servers
+
+ infra_image:
+ type: string
+ label: Infra image
+ description: Name of the image for the infra node servers
+
+ master_flavor:
+ type: string
+ label: Master flavor
+ description: Flavor of the master servers
+
+ node_flavor:
+ type: string
+ label: Node flavor
+ description: Flavor of the compute node servers
+
+ infra_flavor:
+ type: string
+ label: Infra flavor
+ description: Flavor of the infra node servers
+
+outputs:
+
+ master_names:
+ description: Name of the masters
+ value: { get_attr: [ masters, name ] }
+
+ master_ips:
+ description: IPs of the masters
+ value: { get_attr: [ masters, private_ip ] }
+
+ master_floating_ips:
+ description: Floating IPs of the masters
+ value: { get_attr: [ masters, floating_ip ] }
+
+ node_names:
+ description: Name of the nodes
+ value: { get_attr: [ compute_nodes, name ] }
+
+ node_ips:
+ description: IPs of the nodes
+ value: { get_attr: [ compute_nodes, private_ip ] }
+
+ node_floating_ips:
+ description: Floating IPs of the nodes
+ value: { get_attr: [ compute_nodes, floating_ip ] }
+
+ infra_names:
+ description: Name of the nodes
+ value: { get_attr: [ infra_nodes, name ] }
+
+ infra_ips:
+ description: IPs of the nodes
+ value: { get_attr: [ infra_nodes, private_ip ] }
+
+ infra_floating_ips:
+ description: Floating IPs of the nodes
+ value: { get_attr: [ infra_nodes, floating_ip ] }
+
+resources:
+
+ net:
+ type: OS::Neutron::Net
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-net
+ params:
+ cluster_id: { get_param: cluster_id }
+
+ subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-subnet
+ params:
+ cluster_id: { get_param: cluster_id }
+ network: { get_resource: net }
+ cidr: { get_param: cidr }
+ dns_nameservers: { get_param: dns_nameservers }
+
+ router:
+ type: OS::Neutron::Router
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-router
+ params:
+ cluster_id: { get_param: cluster_id }
+ external_gateway_info:
+ network: { get_param: external_net }
+
+ interface:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: { get_resource: router }
+ subnet_id: { get_resource: subnet }
+
+ keypair:
+ type: OS::Nova::KeyPair
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-keypair
+ params:
+ cluster_id: { get_param: cluster_id }
+ public_key: { get_param: ssh_public_key }
+
+ master-secgrp:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-master-secgrp
+ params:
+ cluster_id: { get_param: cluster_id }
+ description:
+ str_replace:
+ template: Security group for cluster_id OpenShift cluster master
+ params:
+ cluster_id: { get_param: cluster_id }
+ rules:
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
+ remote_ip_prefix: { get_param: ssh_incoming }
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 4001
+ port_range_max: 4001
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 8443
+ port_range_max: 8443
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 53
+ port_range_max: 53
+ - direction: ingress
+ protocol: udp
+ port_range_min: 53
+ port_range_max: 53
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 24224
+ port_range_max: 24224
+ - direction: ingress
+ protocol: udp
+ port_range_min: 24224
+ port_range_max: 24224
+
+ node-secgrp:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-node-secgrp
+ params:
+ cluster_id: { get_param: cluster_id }
+ description:
+ str_replace:
+ template: Security group for cluster_id OpenShift cluster nodes
+ params:
+ cluster_id: { get_param: cluster_id }
+ rules:
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
+ remote_ip_prefix: { get_param: ssh_incoming }
+ - direction: ingress
+ protocol: udp
+ port_range_min: 4789
+ port_range_max: 4789
+ remote_mode: remote_group_id
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 10250
+ port_range_max: 10250
+ remote_mode: remote_group_id
+ remote_group_id: { get_resource: master-secgrp }
+
+ infra-secgrp:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-infra-secgrp
+ params:
+ cluster_id: { get_param: cluster_id }
+ description:
+ str_replace:
+ template: Security group for cluster_id OpenShift infrastructure cluster nodes
+ params:
+ cluster_id: { get_param: cluster_id }
+ rules:
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 80
+ port_range_max: 80
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 443
+ port_range_max: 443
+
+ masters:
+ type: OS::Heat::ResourceGroup
+ properties:
+ count: { get_param: num_masters }
+ resource_def:
+ type: heat_stack_server.yaml
+ properties:
+ name:
+ str_replace:
+ template: cluster_id-k8s_type-%index%
+ params:
+ cluster_id: { get_param: cluster_id }
+ k8s_type: master
+ cluster_id: { get_param: cluster_id }
+ type: master
+ image: { get_param: master_image }
+ flavor: { get_param: master_flavor }
+ key_name: { get_resource: keypair }
+ net: { get_resource: net }
+ subnet: { get_resource: subnet }
+ secgrp:
+ - { get_resource: master-secgrp }
+ floating_network: { get_param: floating_ip_pool }
+ net_name:
+ str_replace:
+ template: openshift-ansible-cluster_id-net
+ params:
+ cluster_id: { get_param: cluster_id }
+ depends_on: interface
+
+ compute_nodes:
+ type: OS::Heat::ResourceGroup
+ properties:
+ count: { get_param: num_nodes }
+ resource_def:
+ type: heat_stack_server.yaml
+ properties:
+ name:
+ str_replace:
+ template: cluster_id-k8s_type-sub_host_type-%index%
+ params:
+ cluster_id: { get_param: cluster_id }
+ k8s_type: node
+ sub_host_type: compute
+ cluster_id: { get_param: cluster_id }
+ type: node
+ subtype: compute
+ image: { get_param: node_image }
+ flavor: { get_param: node_flavor }
+ key_name: { get_resource: keypair }
+ net: { get_resource: net }
+ subnet: { get_resource: subnet }
+ secgrp:
+ - { get_resource: node-secgrp }
+ floating_network: { get_param: floating_ip_pool }
+ net_name:
+ str_replace:
+ template: openshift-ansible-cluster_id-net
+ params:
+ cluster_id: { get_param: cluster_id }
+ depends_on: interface
+
+ infra_nodes:
+ type: OS::Heat::ResourceGroup
+ properties:
+ count: { get_param: num_infra }
+ resource_def:
+ type: heat_stack_server.yaml
+ properties:
+ name:
+ str_replace:
+ template: cluster_id-k8s_type-sub_host_type-%index%
+ params:
+ cluster_id: { get_param: cluster_id }
+ k8s_type: node
+ sub_host_type: infra
+ cluster_id: { get_param: cluster_id }
+ type: node
+ subtype: infra
+ image: { get_param: infra_image }
+ flavor: { get_param: infra_flavor }
+ key_name: { get_resource: keypair }
+ net: { get_resource: net }
+ subnet: { get_resource: subnet }
+ secgrp:
+ - { get_resource: node-secgrp }
+ - { get_resource: infra-secgrp }
+ floating_network: { get_param: floating_ip_pool }
+ net_name:
+ str_replace:
+ template: openshift-ansible-cluster_id-net
+ params:
+ cluster_id: { get_param: cluster_id }
+ depends_on: interface
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yml b/playbooks/openstack/openshift-cluster/files/heat_stack.yml
deleted file mode 100644
index c5f95d87d..000000000
--- a/playbooks/openstack/openshift-cluster/files/heat_stack.yml
+++ /dev/null
@@ -1,149 +0,0 @@
-heat_template_version: 2014-10-16
-
-description: OpenShift cluster
-
-parameters:
- cluster-id:
- type: string
- label: Cluster ID
- description: Identifier of the cluster
-
- network-prefix:
- type: string
- label: Network prefix
- description: Prefix of the network objects
-
- cidr:
- type: string
- label: CIDR
- description: CIDR of the network of the cluster
-
- dns-nameservers:
- type: comma_delimited_list
- label: DNS nameservers list
- description: List of DNS nameservers
-
- external-net:
- type: string
- label: External network
- description: Name of the external network
- default: external
-
- ssh-incoming:
- type: string
- label: Source of ssh connections
- description: Source of legitimate ssh connections
-
-resources:
- net:
- type: OS::Neutron::Net
- properties:
- name:
- str_replace:
- template: network-prefix-net
- params:
- network-prefix: { get_param: network-prefix }
-
- subnet:
- type: OS::Neutron::Subnet
- properties:
- name:
- str_replace:
- template: network-prefix-subnet
- params:
- network-prefix: { get_param: network-prefix }
- network: { get_resource: net }
- cidr: { get_param: cidr }
- dns_nameservers: { get_param: dns-nameservers }
-
- router:
- type: OS::Neutron::Router
- properties:
- name:
- str_replace:
- template: network-prefix-router
- params:
- network-prefix: { get_param: network-prefix }
- external_gateway_info:
- network: { get_param: external-net }
-
- interface:
- type: OS::Neutron::RouterInterface
- properties:
- router_id: { get_resource: router }
- subnet_id: { get_resource: subnet }
-
- node-secgrp:
- type: OS::Neutron::SecurityGroup
- properties:
- name:
- str_replace:
- template: network-prefix-node-secgrp
- params:
- network-prefix: { get_param: network-prefix }
- description:
- str_replace:
- template: Security group for cluster-id OpenShift cluster nodes
- params:
- cluster-id: { get_param: cluster-id }
- rules:
- - direction: ingress
- protocol: tcp
- port_range_min: 22
- port_range_max: 22
- remote_ip_prefix: { get_param: ssh-incoming }
- - direction: ingress
- protocol: udp
- port_range_min: 4789
- port_range_max: 4789
- remote_mode: remote_group_id
- - direction: ingress
- protocol: tcp
- port_range_min: 10250
- port_range_max: 10250
- remote_mode: remote_group_id
- remote_group_id: { get_resource: master-secgrp }
-
- master-secgrp:
- type: OS::Neutron::SecurityGroup
- properties:
- name:
- str_replace:
- template: network-prefix-master-secgrp
- params:
- network-prefix: { get_param: network-prefix }
- description:
- str_replace:
- template: Security group for cluster-id OpenShift cluster master
- params:
- cluster-id: { get_param: cluster-id }
- rules:
- - direction: ingress
- protocol: tcp
- port_range_min: 22
- port_range_max: 22
- remote_ip_prefix: { get_param: ssh-incoming }
- - direction: ingress
- protocol: tcp
- port_range_min: 4001
- port_range_max: 4001
- - direction: ingress
- protocol: tcp
- port_range_min: 8443
- port_range_max: 8443
- - direction: ingress
- protocol: tcp
- port_range_min: 53
- port_range_max: 53
- - direction: ingress
- protocol: udp
- port_range_min: 53
- port_range_max: 53
- - direction: ingress
- protocol: tcp
- port_range_min: 24224
- port_range_max: 24224
- - direction: ingress
- protocol: udp
- port_range_min: 24224
- port_range_max: 24224
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml
new file mode 100644
index 000000000..9dcab3e60
--- /dev/null
+++ b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml
@@ -0,0 +1,130 @@
+heat_template_version: 2014-10-16
+
+description: OpenShift cluster server
+
+parameters:
+
+ name:
+ type: string
+ label: Name
+ description: Name
+
+ cluster_id:
+ type: string
+ label: Cluster ID
+ description: Identifier of the cluster
+
+ type:
+ type: string
+ label: Type
+ description: Type master or node
+
+ subtype:
+ type: string
+ label: Sub-type
+ description: Sub-type compute or infra for nodes, default otherwise
+ default: default
+
+ key_name:
+ type: string
+ label: Key name
+ description: Key name of keypair
+
+ image:
+ type: string
+ label: Image
+ description: Name of the image
+
+ flavor:
+ type: string
+ label: Flavor
+ description: Name of the flavor
+
+ net:
+ type: string
+ label: Net ID
+ description: Net resource
+
+ net_name:
+ type: string
+ label: Net name
+ description: Net name
+
+ subnet:
+ type: string
+ label: Subnet ID
+ description: Subnet resource
+
+ secgrp:
+ type: comma_delimited_list
+ label: Security groups
+ description: Security group resources
+
+ floating_network:
+ type: string
+ label: Floating network
+ description: Network to allocate floating IP from
+
+outputs:
+
+ name:
+ description: Name of the server
+ value: { get_attr: [ server, name ] }
+
+ private_ip:
+ description: Private IP of the server
+ value:
+ get_attr:
+ - server
+ - addresses
+ - { get_param: net_name }
+ - 0
+ - addr
+
+ floating_ip:
+ description: Floating IP of the server
+ value:
+ get_attr:
+ - server
+ - addresses
+ - { get_param: net_name }
+ - 1
+ - addr
+
+resources:
+
+ server:
+ type: OS::Nova::Server
+ properties:
+ name: { get_param: name }
+ key_name: { get_param: key_name }
+ image: { get_param: image }
+ flavor: { get_param: flavor }
+ networks:
+ - port: { get_resource: port }
+ user_data: { get_file: user-data }
+ user_data_format: RAW
+ metadata:
+ env: { get_param: cluster_id }
+ host-type: { get_param: type }
+ env-host-type:
+ str_replace:
+ template: cluster_id-openshift-type
+ params:
+ cluster_id: { get_param: cluster_id }
+ type: { get_param: type }
+ sub-host-type: { get_param: subtype }
+
+ port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: net }
+ fixed_ips:
+ - subnet: { get_param: subnet }
+ security_groups: { get_param: secgrp }
+
+ floating-ip:
+ type: OS::Neutron::FloatingIP
+ properties:
+ floating_network: { get_param: floating_network }
+ port_id: { get_resource: port }
diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml
index 5c86ade3f..651aef40b 100644
--- a/playbooks/openstack/openshift-cluster/launch.yml
+++ b/playbooks/openstack/openshift-cluster/launch.yml
@@ -8,23 +8,114 @@
tasks:
- fail:
msg: "Deployment type not supported for OpenStack provider yet"
- when: deployment_type in ['online', 'enterprise']
-
- - include: tasks/configure_openstack.yml
-
- - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ master_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
-
- - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
+ when: deployment_type == 'online'
+
+ # TODO: Write an Ansible module for dealing with HEAT stacks
+ # Dealing with the outputs is currently terrible
+
+ - name: Check OpenStack stack
+ command: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack'
+ register: stack_show_result
+ changed_when: false
+ failed_when: stack_show_result.rc != 0 and 'Stack not found' not in stack_show_result.stderr
+
+ - set_fact:
+ heat_stack_action: 'stack-create'
+ when: stack_show_result.rc == 1
+ - set_fact:
+ heat_stack_action: 'stack-update'
+ when: stack_show_result.rc == 0
+
+ - name: Create or Update OpenStack Stack
+ command: 'heat {{ heat_stack_action }} -f {{ openstack_infra_heat_stack }}
+ -P cluster_id={{ cluster_id }}
+ -P cidr={{ openstack_network_cidr }}
+ -P dns_nameservers={{ openstack_network_dns | join(",") }}
+ -P external_net={{ openstack_network_external_net }}
+ -P floating_ip_pool={{ openstack_floating_ip_pool }}
+ -P ssh_public_key="{{ openstack_ssh_public_key }}"
+ -P ssh_incoming={{ openstack_ssh_access_from }}
+ -P num_masters={{ num_masters }}
+ -P num_nodes={{ num_nodes }}
+ -P num_infra={{ num_infra }}
+ -P master_image={{ deployment_vars[deployment_type].image }}
+ -P node_image={{ deployment_vars[deployment_type].image }}
+ -P infra_image={{ deployment_vars[deployment_type].image }}
+ -P master_flavor={{ openstack_flavor["master"] }}
+ -P node_flavor={{ openstack_flavor["node"] }}
+ -P infra_flavor={{ openstack_flavor["infra"] }}
+ openshift-ansible-{{ cluster_id }}-stack'
+
+ - name: Wait for OpenStack Stack readiness
+ shell: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack | awk ''$2 == "stack_status" {print $4}'''
+ register: stack_show_status_result
+ until: stack_show_status_result.stdout not in ['CREATE_IN_PROGRESS', 'UPDATE_IN_PROGRESS']
+ retries: 30
+ delay: 1
+ failed_when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']
+
+ - name: Read OpenStack Stack outputs
+ command: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack'
+ register: stack_show_result
+
+ - set_fact:
+ parsed_outputs: "{{ stack_show_result | oo_parse_heat_stack_outputs }}"
+
+ - name: Add new master instances groups and variables
+ add_host:
+ hostname: '{{ item[0] }}'
+ ansible_ssh_host: '{{ item[2] }}'
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: 'tag_env_{{ cluster_id }}, tag_host-type_master, tag_env-host-type_{{ cluster_id }}-openshift-master, tag_sub-host-type_default'
+ with_together:
+ - parsed_outputs.master_names
+ - parsed_outputs.master_ips
+ - parsed_outputs.master_floating_ips
+
+ - name: Add new node instances groups and variables
+ add_host:
+ hostname: '{{ item[0] }}'
+ ansible_ssh_host: '{{ item[2] }}'
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: 'tag_env_{{ cluster_id }}, tag_host-type_node, tag_env-host-type_{{ cluster_id }}-openshift-node, tag_sub-host-type_compute'
+ with_together:
+ - parsed_outputs.node_names
+ - parsed_outputs.node_ips
+ - parsed_outputs.node_floating_ips
+
+ - name: Add new infra instances groups and variables
+ add_host:
+ hostname: '{{ item[0] }}'
+ ansible_ssh_host: '{{ item[2] }}'
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: 'tag_env_{{ cluster_id }}, tag_host-type_node, tag_env-host-type_{{ cluster_id }}-openshift-node, tag_sub-host-type_infra'
+ with_together:
+ - parsed_outputs.infra_names
+ - parsed_outputs.infra_ips
+ - parsed_outputs.infra_floating_ips
+
+ - name: Wait for ssh
+ wait_for:
+ host: '{{ item }}'
+ port: 22
+ with_flattened:
+ - parsed_outputs.master_floating_ips
+ - parsed_outputs.node_floating_ips
+ - parsed_outputs.infra_floating_ips
+
+ - name: Wait for user setup
+ command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ deployment_vars[deployment_type].ssh_user }}@{{ item }} echo {{ deployment_vars[deployment_type].ssh_user }} user is setup'
+ register: result
+ until: result.rc == 0
+ retries: 30
+ delay: 1
+ with_flattened:
+ - parsed_outputs.master_floating_ips
+ - parsed_outputs.node_floating_ips
+ - parsed_outputs.infra_floating_ips
- include: update.yml
diff --git a/playbooks/openstack/openshift-cluster/lookup_plugins b/playbooks/openstack/openshift-cluster/lookup_plugins
new file mode 120000
index 000000000..ac79701db
--- /dev/null
+++ b/playbooks/openstack/openshift-cluster/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/openstack/openshift-cluster/tasks/configure_openstack.yml b/playbooks/openstack/openshift-cluster/tasks/configure_openstack.yml
deleted file mode 100644
index 2cbdb4805..000000000
--- a/playbooks/openstack/openshift-cluster/tasks/configure_openstack.yml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-- name: Check infra
- command: 'heat stack-show {{ openstack_network_prefix }}-stack'
- register: stack_show_result
- changed_when: false
- failed_when: stack_show_result.rc != 0 and 'Stack not found' not in stack_show_result.stderr
-
-- name: Create infra
- command: 'heat stack-create -f {{ openstack_infra_heat_stack }} -P cluster-id={{ cluster_id }} -P network-prefix={{ openstack_network_prefix }} -P dns-nameservers={{ openstack_network_dns | join(",") }} -P cidr={{ openstack_network_cidr }} -P ssh-incoming={{ openstack_ssh_access_from }} {{ openstack_network_prefix }}-stack'
- when: stack_show_result.rc == 1
-
-- name: Update infra
- command: 'heat stack-update -f {{ openstack_infra_heat_stack }} -P cluster-id={{ cluster_id }} -P network-prefix={{ openstack_network_prefix }} -P dns-nameservers={{ openstack_network_dns | join(",") }} -P cidr={{ openstack_network_cidr }} -P ssh-incoming={{ openstack_ssh_access_from }} {{ openstack_network_prefix }}-stack'
- when: stack_show_result.rc == 0
-
-- name: Wait for infra readiness
- shell: 'heat stack-show {{ openstack_network_prefix }}-stack | awk ''$2 == "stack_status" {print $4}'''
- register: stack_show_status_result
- until: stack_show_status_result.stdout not in ['CREATE_IN_PROGRESS', 'UPDATE_IN_PROGRESS']
- retries: 30
- delay: 1
- failed_when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']
-
-- name: Create ssh keypair
- nova_keypair:
- name: "{{ openstack_ssh_keypair }}"
- public_key: "{{ openstack_ssh_public_key }}"
diff --git a/playbooks/openstack/openshift-cluster/tasks/launch_instances.yml b/playbooks/openstack/openshift-cluster/tasks/launch_instances.yml
deleted file mode 100644
index 1b9696aac..000000000
--- a/playbooks/openstack/openshift-cluster/tasks/launch_instances.yml
+++ /dev/null
@@ -1,48 +0,0 @@
----
-- name: Get net id
- shell: 'neutron net-show {{ openstack_network_prefix }}-net | awk "/\\<id\\>/ {print \$4}"'
- register: net_id_result
-
-- name: Launch instance(s)
- nova_compute:
- name: '{{ item }}'
- image_name: '{{ deployment_vars[deployment_type].image.name | default(omit, true) }}'
- image_id: '{{ deployment_vars[deployment_type].image.id | default(omit, true) }}'
- flavor_ram: '{{ openstack_flavor[k8s_type].ram | default(omit, true) }}'
- flavor_id: '{{ openstack_flavor[k8s_type].id | default(omit, true) }}'
- flavor_include: '{{ openstack_flavor[k8s_type].include | default(omit, true) }}'
- key_name: '{{ openstack_ssh_keypair }}'
- security_groups: '{{ openstack_network_prefix }}-{{ k8s_type }}-secgrp'
- nics:
- - net-id: '{{ net_id_result.stdout }}'
- user_data: "{{ lookup('file','files/user-data') }}"
- meta:
- env: '{{ cluster }}'
- host-type: '{{ type }}'
- env-host-type: '{{ cluster }}-openshift-{{ type }}'
- floating_ip_pools: '{{ openstack_floating_ip_pools }}'
- with_items: instances
- register: nova_compute_result
-
-- name: Add new instances groups and variables
- add_host:
- hostname: '{{ item.item }}'
- ansible_ssh_host: '{{ item.public_ip }}'
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- groups: 'tag_env_{{ cluster }}, tag_host-type_{{ type }}, tag_env-host-type_{{ cluster }}-openshift-{{ type }}'
- with_items: nova_compute_result.results
-
-- name: Wait for ssh
- wait_for:
- host: '{{ item.public_ip }}'
- port: 22
- with_items: nova_compute_result.results
-
-- name: Wait for user setup
- command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.item].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.item].ansible_ssh_user }} user is setup'
- register: result
- until: result.rc == 0
- retries: 30
- delay: 1
- with_items: nova_compute_result.results
diff --git a/playbooks/openstack/openshift-cluster/terminate.yml b/playbooks/openstack/openshift-cluster/terminate.yml
index 2f05f0992..62df2be73 100644
--- a/playbooks/openstack/openshift-cluster/terminate.yml
+++ b/playbooks/openstack/openshift-cluster/terminate.yml
@@ -13,7 +13,16 @@
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
with_items: groups[cluster_group] | default([])
-- hosts: oo_hosts_to_terminate
+- name: Unsubscribe VMs
+ hosts: oo_hosts_to_terminate
+ vars_files:
+ - vars.yml
+ roles:
+ - role: rhel_unsubscribe
+ when: deployment_type == "enterprise" and
+ ansible_distribution == "RedHat" and
+ lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
+ default('no', True) | lower in ['no', 'false']
- hosts: localhost
connection: local
@@ -21,23 +30,18 @@
vars_files:
- vars.yml
tasks:
- - name: Retrieve the floating IPs
- shell: "neutron floatingip-list | awk '/{{ hostvars[item].ansible_default_ipv4.address }}/ {print $2}'"
- with_items: groups['oo_hosts_to_terminate'] | default([])
- register: floating_ips_to_delete
-
- - name: Terminate instance(s)
- nova_compute:
- name: "{{ hostvars[item].os_name }}"
- state: absent
- with_items: groups['oo_hosts_to_terminate'] | default([])
-
- - name: Delete floating IPs
- command: "neutron floatingip-delete {{ item.stdout }}"
- with_items: floating_ips_to_delete.results | default([])
-
- - name: Destroy the network
- command: "heat stack-delete {{ openstack_network_prefix }}-stack"
+ - name: Delete the OpenStack Stack
+ command: 'heat stack-delete openshift-ansible-{{ cluster_id }}-stack'
register: stack_delete_result
changed_when: stack_delete_result.rc == 0
failed_when: stack_delete_result.rc != 0 and 'could not be found' not in stack_delete_result.stdout
+
+ - name: Wait for the completion of the OpenStack Stack deletion
+ shell: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack | awk ''$2 == "stack_status" {print $4}'''
+ when: stack_delete_result.changed
+ register: stack_show_result
+ until: stack_show_result.stdout != 'DELETE_IN_PROGRESS'
+ retries: 60
+ delay: 1
+ failed_when: '"Stack not found" not in stack_show_result.stderr and
+ stack_show_result.stdout != "DELETE_COMPLETE"'
diff --git a/playbooks/openstack/openshift-cluster/update.yml b/playbooks/openstack/openshift-cluster/update.yml
index 5e7ab4e58..e006aa74a 100644
--- a/playbooks/openstack/openshift-cluster/update.yml
+++ b/playbooks/openstack/openshift-cluster/update.yml
@@ -11,7 +11,9 @@
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"]) | default([])
+ with_items: (groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([]))
+ | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([]))
+ | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-etcd"] | default([]))
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml
index c754f19fc..262d3f4ed 100644
--- a/playbooks/openstack/openshift-cluster/vars.yml
+++ b/playbooks/openstack/openshift-cluster/vars.yml
@@ -1,39 +1,33 @@
---
-openstack_infra_heat_stack: "{{ opt_infra_heat_stack | default('files/heat_stack.yml') }}"
-openstack_network_prefix: "{{ opt_network_prefix | default('openshift-ansible-'+cluster_id) }}"
-openstack_network_cidr: "{{ opt_net_cidr | default('192.168.' + ( ( 1048576 | random % 256 ) | string() ) + '.0/24') }}"
-openstack_network_external_net: "{{ opt_external_net | default('external') }}"
-openstack_floating_ip_pools: "{{ opt_floating_ip_pools | default('external') | oo_split() }}"
-openstack_network_dns: "{{ opt_dns | default('8.8.8.8,8.8.4.4') | oo_split() }}"
-openstack_ssh_keypair: "{{ opt_keypair | default(lookup('env', 'LOGNAME')+'_key') }}"
-openstack_ssh_public_key: "{{ lookup('file', opt_public_key | default('~/.ssh/id_rsa.pub')) }}"
-openstack_ssh_access_from: "{{ opt_ssh_from | default('0.0.0.0/0') }}"
+openstack_infra_heat_stack: "{{ lookup('oo_option', 'infra_heat_stack' ) |
+ default('files/heat_stack.yaml', True) }}"
+openstack_network_cidr: "{{ lookup('oo_option', 'net_cidr' ) |
+ default('192.168.' + ( ( 1048576 | random % 256 ) | string() ) + '.0/24', True) }}"
+openstack_network_external_net: "{{ lookup('oo_option', 'external_net' ) |
+ default('external', True) }}"
+openstack_floating_ip_pool: "{{ lookup('oo_option', 'floating_ip_pool' ) |
+ default('external', True) }}"
+openstack_network_dns: "{{ lookup('oo_option', 'dns' ) |
+ default('8.8.8.8,8.8.4.4', True) | oo_split() }}"
+openstack_ssh_public_key: "{{ lookup('file', lookup('oo_option', 'public_key') |
+ default('~/.ssh/id_rsa.pub', True)) }}"
+openstack_ssh_access_from: "{{ lookup('oo_option', 'ssh_from') |
+ default('0.0.0.0/0', True) }}"
openstack_flavor:
- master:
- ram: "{{ opt_master_flavor_ram | default(2048) }}"
- id: "{{ opt_master_flavor_id | default() }}"
- include: "{{ opt_master_flavor_include | default() }}"
- node:
- ram: "{{ opt_node_flavor_ram | default(4096) }}"
- id: "{{ opt_node_flavor_id | default() }}"
- include: "{{ opt_node_flavor_include | default() }}"
+ master: "{{ lookup('oo_option', 'master_flavor' ) | default('m1.small', True) }}"
+ infra: "{{ lookup('oo_option', 'infra_flavor' ) | default('m1.small', True) }}"
+ node: "{{ lookup('oo_option', 'node_flavor' ) | default('m1.medium', True) }}"
deployment_vars:
origin:
- image:
- name: "{{ opt_image_name | default('centos-70-raw') }}"
- id:
+ image: "{{ lookup('oo_option', 'image_name') | default('centos-70-raw', True) }}"
ssh_user: openshift
sudo: yes
online:
image:
- name:
- id:
ssh_user: root
sudo: no
enterprise:
- image:
- name: "{{ opt_image_name | default('centos-70-raw') }}"
- id:
+ image: "{{ lookup('oo_option', 'image_name') | default('rhel-guest-image-7.1-20150224.0.x86_64', True) }}"
ssh_user: openshift
sudo: yes