summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWesley Hearn <wesley.s.hearn@gmail.com>2015-04-24 14:06:12 -0400
committerWesley Hearn <wesley.s.hearn@gmail.com>2015-04-24 14:06:12 -0400
commit519e097df31e2148ac520ab273d0bd2fb2f7bb43 (patch)
tree4c5413c72a2dd2ec732730b6994a104cca6a9798
parentdb9cf8ef4f030f30391e021f360fe0c3db1dce74 (diff)
parent8ce5e1de898d2fd2c4aa4620f31b57b62ed0c5d6 (diff)
downloadopenshift-519e097df31e2148ac520ab273d0bd2fb2f7bb43.tar.gz
openshift-519e097df31e2148ac520ab273d0bd2fb2f7bb43.tar.bz2
openshift-519e097df31e2148ac520ab273d0bd2fb2f7bb43.tar.xz
openshift-519e097df31e2148ac520ab273d0bd2fb2f7bb43.zip
Merge pull request #188 from openshift/master
Merge master into stage
-rw-r--r--BUILD.md44
-rw-r--r--README.md6
-rw-r--r--README_AWS.md67
-rw-r--r--README_GCE.md27
-rw-r--r--README_OSE.md241
-rw-r--r--README_libvirt.md130
-rw-r--r--ansible.cfg23
-rwxr-xr-xbin/cluster241
-rwxr-xr-xbin/ohi110
-rw-r--r--bin/openshift-ansible-bin.spec65
-rw-r--r--bin/openshift_ansible.conf.example6
-rw-r--r--bin/openshift_ansible/__init__.py0
-rw-r--r--bin/openshift_ansible/awsutil.py (renamed from bin/awsutil.py)70
-rwxr-xr-xbin/opssh79
-rwxr-xr-xbin/oscp28
-rwxr-xr-xbin/ossh26
-rwxr-xr-xbin/ossh_bash_completion23
-rwxr-xr-xcluster.sh113
-rw-r--r--filter_plugins/oo_filters.py217
-rw-r--r--git/.pylintrc390
-rwxr-xr-xgit/parent.rb45
-rwxr-xr-xgit/pylint.sh14
-rwxr-xr-xgit/yaml_validation.rb72
-rw-r--r--inventory/aws/hosts/ec2.ini (renamed from inventory/aws/ec2.ini)0
-rwxr-xr-xinventory/aws/hosts/ec2.py (renamed from inventory/aws/ec2.py)0
-rw-r--r--inventory/aws/hosts/hosts1
-rw-r--r--inventory/byo/hosts34
-rwxr-xr-xinventory/gce/hosts/gce.py (renamed from inventory/gce/gce.py)0
-rw-r--r--inventory/gce/hosts/hosts1
-rw-r--r--inventory/libvirt/hosts/hosts1
-rw-r--r--inventory/libvirt/hosts/libvirt.ini20
-rwxr-xr-xinventory/libvirt/hosts/libvirt_generic.py179
-rwxr-xr-xinventory/multi_ec2.py110
-rw-r--r--inventory/multi_ec2.yaml.example4
-rw-r--r--inventory/openshift-ansible-inventory.spec50
l---------playbooks/adhoc/noc/filter_plugins1
-rw-r--r--playbooks/adhoc/noc/get_zabbix_problems.yml41
l---------playbooks/adhoc/noc/roles1
-rw-r--r--playbooks/aws/ansible-tower/launch.yml2
-rw-r--r--playbooks/aws/openshift-cluster/config.yml36
l---------playbooks/aws/openshift-cluster/filter_plugins1
-rw-r--r--playbooks/aws/openshift-cluster/launch.yml30
-rw-r--r--playbooks/aws/openshift-cluster/library/ec2_ami_find.py302
-rw-r--r--playbooks/aws/openshift-cluster/list.yml24
l---------playbooks/aws/openshift-cluster/roles1
-rw-r--r--playbooks/aws/openshift-cluster/tasks/launch_instances.yml132
-rw-r--r--playbooks/aws/openshift-cluster/templates/user_data.j229
-rw-r--r--playbooks/aws/openshift-cluster/terminate.yml16
-rw-r--r--playbooks/aws/openshift-cluster/update.yml18
-rw-r--r--playbooks/aws/openshift-cluster/vars.defaults.yml1
-rw-r--r--playbooks/aws/openshift-cluster/vars.online.int.yml9
-rw-r--r--playbooks/aws/openshift-cluster/vars.online.prod.yml9
-rw-r--r--playbooks/aws/openshift-cluster/vars.online.stage.yml9
-rw-r--r--playbooks/aws/openshift-cluster/vars.yml38
-rw-r--r--playbooks/aws/openshift-master/config.yml51
-rw-r--r--playbooks/aws/openshift-master/launch.yml19
-rw-r--r--playbooks/aws/openshift-master/terminate.yml2
-rw-r--r--playbooks/aws/openshift-master/vars.yml2
-rw-r--r--playbooks/aws/openshift-node/config.yml62
-rw-r--r--playbooks/aws/openshift-node/launch.yml25
-rw-r--r--playbooks/aws/openshift-node/terminate.yml2
-rw-r--r--playbooks/aws/openshift-node/vars.yml2
-rw-r--r--playbooks/aws/terminate.yml64
-rw-r--r--playbooks/byo/config.yml6
l---------playbooks/byo/filter_plugins1
-rw-r--r--playbooks/byo/openshift-master/config.yml15
l---------playbooks/byo/openshift-master/filter_plugins1
l---------playbooks/byo/openshift-master/roles1
-rw-r--r--playbooks/byo/openshift-node/config.yml21
l---------playbooks/byo/openshift-node/filter_plugins1
l---------playbooks/byo/openshift-node/roles1
-rw-r--r--playbooks/byo/openshift_facts.yml10
l---------playbooks/byo/roles1
-rw-r--r--playbooks/common/openshift-cluster/config.yml4
l---------playbooks/common/openshift-cluster/filter_plugins1
l---------playbooks/common/openshift-cluster/roles1
-rw-r--r--playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml11
-rw-r--r--playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml11
-rw-r--r--playbooks/common/openshift-cluster/update_repos_and_packages.yml7
-rw-r--r--playbooks/common/openshift-master/config.yml19
l---------playbooks/common/openshift-master/filter_plugins1
l---------playbooks/common/openshift-master/roles1
-rw-r--r--playbooks/common/openshift-node/config.yml127
l---------playbooks/common/openshift-node/filter_plugins1
l---------playbooks/common/openshift-node/roles1
-rw-r--r--playbooks/gce/openshift-cluster/config.yml37
l---------playbooks/gce/openshift-cluster/filter_plugins1
-rw-r--r--playbooks/gce/openshift-cluster/launch.yml28
-rw-r--r--playbooks/gce/openshift-cluster/list.yml24
l---------playbooks/gce/openshift-cluster/roles1
-rw-r--r--playbooks/gce/openshift-cluster/tasks/launch_instances.yml41
-rw-r--r--playbooks/gce/openshift-cluster/terminate.yml34
-rw-r--r--playbooks/gce/openshift-cluster/update.yml18
-rw-r--r--playbooks/gce/openshift-cluster/vars.yml15
-rw-r--r--playbooks/gce/openshift-master/config.yml50
-rw-r--r--playbooks/gce/openshift-master/launch.yml20
-rw-r--r--playbooks/gce/openshift-master/terminate.yml28
-rw-r--r--playbooks/gce/openshift-master/vars.yml2
-rw-r--r--playbooks/gce/openshift-node/config.yml61
-rw-r--r--playbooks/gce/openshift-node/launch.yml30
-rw-r--r--playbooks/gce/openshift-node/terminate.yml28
-rw-r--r--playbooks/gce/openshift-node/vars.yml2
-rw-r--r--playbooks/libvirt/openshift-cluster/config.yml38
l---------playbooks/libvirt/openshift-cluster/filter_plugins1
-rw-r--r--playbooks/libvirt/openshift-cluster/launch.yml36
-rw-r--r--playbooks/libvirt/openshift-cluster/list.yml23
l---------playbooks/libvirt/openshift-cluster/roles1
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml6
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml27
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml23
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml107
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/domain.xml68
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/meta-data3
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/network.xml23
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/user-data23
-rw-r--r--playbooks/libvirt/openshift-cluster/terminate.yml44
-rw-r--r--playbooks/libvirt/openshift-cluster/update.yml18
-rw-r--r--playbooks/libvirt/openshift-cluster/vars.yml33
-rw-r--r--rel-eng/packages/.readme3
-rw-r--r--rel-eng/packages/openshift-ansible-bin1
-rw-r--r--rel-eng/packages/openshift-ansible-inventory1
-rw-r--r--rel-eng/tito.props5
-rw-r--r--roles/ansible_tower/tasks/main.yaml7
-rw-r--r--roles/docker/tasks/main.yml2
-rw-r--r--roles/openshift_ansible_inventory/README.md41
-rw-r--r--roles/openshift_ansible_inventory/defaults/main.yml4
-rw-r--r--roles/openshift_ansible_inventory/handlers/main.yml2
-rw-r--r--roles/openshift_ansible_inventory/meta/main.yml8
-rw-r--r--roles/openshift_ansible_inventory/tasks/main.yml11
-rw-r--r--roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j211
-rw-r--r--roles/openshift_ansible_inventory/vars/main.yml2
-rw-r--r--roles/openshift_common/README.md20
-rw-r--r--roles/openshift_common/defaults/main.yml7
-rw-r--r--roles/openshift_common/meta/main.yml2
-rw-r--r--roles/openshift_common/tasks/main.yml33
-rw-r--r--roles/openshift_common/tasks/set_facts.yml9
-rw-r--r--roles/openshift_common/vars/main.yml9
-rw-r--r--roles/openshift_facts/README.md34
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py478
-rw-r--r--roles/openshift_facts/meta/main.yml15
-rw-r--r--roles/openshift_facts/tasks/main.yml3
-rw-r--r--roles/openshift_master/README.md29
-rw-r--r--roles/openshift_master/defaults/main.yml13
-rw-r--r--roles/openshift_master/handlers/main.yml1
-rw-r--r--roles/openshift_master/tasks/main.yml151
-rw-r--r--roles/openshift_master/vars/main.yml5
-rw-r--r--roles/openshift_node/README.md7
-rw-r--r--roles/openshift_node/defaults/main.yml6
-rw-r--r--roles/openshift_node/handlers/main.yml2
-rw-r--r--roles/openshift_node/library/openshift_register_node.py211
-rw-r--r--roles/openshift_node/tasks/main.yml96
-rw-r--r--roles/openshift_node/vars/main.yml2
-rw-r--r--roles/openshift_register_nodes/README.md34
-rw-r--r--roles/openshift_register_nodes/defaults/main.yml2
-rwxr-xr-xroles/openshift_register_nodes/library/kubernetes_register_node.py601
-rw-r--r--roles/openshift_register_nodes/meta/main.yml17
-rw-r--r--roles/openshift_register_nodes/tasks/main.yml56
-rw-r--r--roles/openshift_register_nodes/vars/main.yml7
-rw-r--r--roles/openshift_repos/README.md38
-rw-r--r--roles/openshift_repos/defaults/main.yaml2
-rw-r--r--roles/openshift_repos/files/online/repos/enterprise-v3.repo10
-rw-r--r--roles/openshift_repos/files/online/repos/rhel-7-libra-candidate.repo (renamed from roles/repos/files/online/rhel-7-libra-candidate.repo)0
-rw-r--r--roles/openshift_repos/files/origin/repos/maxamillion-origin-next-epel-7.repo7
-rw-r--r--roles/openshift_repos/files/removed/repos/epel7-openshift.repo0
-rw-r--r--roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-extras.repo0
-rw-r--r--roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-server.repo0
-rw-r--r--roles/openshift_repos/meta/main.yml15
-rw-r--r--roles/openshift_repos/tasks/main.yaml (renamed from roles/repos/tasks/main.yaml)23
-rw-r--r--roles/openshift_repos/templates/yum_repo.j2 (renamed from roles/repos/templates/yum_repo.j2)1
-rw-r--r--roles/openshift_repos/vars/main.yml (renamed from roles/repos/vars/main.yml)0
-rw-r--r--roles/openshift_sdn_master/defaults/main.yml2
-rw-r--r--roles/openshift_sdn_master/meta/main.yml3
-rw-r--r--roles/openshift_sdn_master/tasks/main.yml27
-rw-r--r--roles/openshift_sdn_node/README.md9
-rw-r--r--roles/openshift_sdn_node/defaults/main.yml2
-rw-r--r--roles/openshift_sdn_node/meta/main.yml3
-rw-r--r--roles/openshift_sdn_node/tasks/main.yml51
-rw-r--r--roles/os_env_extras_node/tasks/main.yml5
-rwxr-xr-x[-rw-r--r--]roles/os_firewall/library/os_firewall_manage_iptables.py145
-rw-r--r--roles/os_firewall/meta/main.yml1
-rw-r--r--roles/os_firewall/tasks/firewall/firewalld.yml6
-rw-r--r--roles/os_firewall/tasks/firewall/iptables.yml13
-rw-r--r--roles/os_update_latest/tasks/main.yml3
-rwxr-xr-xroles/os_zabbix/library/zbxapi.py273
-rw-r--r--roles/repos/defaults/main.yaml5
-rw-r--r--roles/repos/files/online/RPM-GPG-KEY-redhat-beta61
-rw-r--r--roles/repos/files/online/RPM-GPG-KEY-redhat-release63
-rw-r--r--roles/repos/files/online/epel7-kubernetes.repo6
-rw-r--r--roles/repos/files/online/epel7-openshift.repo6
-rw-r--r--roles/repos/files/online/oso-rhui-rhel-7-extras.repo23
-rw-r--r--roles/repos/files/online/oso-rhui-rhel-7-server.repo21
-rw-r--r--roles/yum_repos/README.md113
-rw-r--r--roles/yum_repos/defaults/main.yml3
-rw-r--r--roles/yum_repos/meta/main.yml8
-rw-r--r--roles/yum_repos/tasks/main.yml47
-rw-r--r--roles/yum_repos/templates/yumrepo.j218
196 files changed, 6230 insertions, 1234 deletions
diff --git a/BUILD.md b/BUILD.md
new file mode 100644
index 000000000..0016c96a5
--- /dev/null
+++ b/BUILD.md
@@ -0,0 +1,44 @@
+# openshift-ansible RPM Build instructions
+We use tito to make building and tracking revisions easy.
+
+For more information on tito, please see the [Tito home page](http://rm-rf.ca/tito "Tito home page").
+
+
+## Build openshift-ansible-bin
+- Change into openshift-ansible/bin
+```
+cd openshift-ansible/bin
+```
+- Build a test package (no tagging needed)
+```
+tito build --test --rpm
+```
+- Tag a new build (bumps version number and adds log entries)
+```
+tito tag
+```
+- Follow the on screen tito instructions to push the tags
+- Build a new package based on the latest tag information
+```
+tito build --rpm
+```
+
+
+## Build openshift-ansible-inventory
+- Change into openshift-ansible/inventory
+```
+cd openshift-ansible/inventory
+```
+- Build a test package (no tagging needed)
+```
+tito build --test --rpm
+```
+- Tag a new build (bumps version number and adds log entries)
+```
+tito tag
+```
+- Follow the on screen tito instructions to push the tags
+- Build a new package based on the latest tag information
+```
+tito build --rpm
+```
diff --git a/README.md b/README.md
index ffdfee6f2..87dbfc1ea 100644
--- a/README.md
+++ b/README.md
@@ -20,10 +20,14 @@ Setup
- Setup for a specific cloud:
- [AWS](README_AWS.md)
- [GCE](README_GCE.md)
+ - [local VMs](README_libvirt.md)
+
+- Build
+ - [How to build the openshift-ansible rpms](BUILD.md)
- Directory Structure:
- [cloud.rb](cloud.rb) - light wrapper around Ansible
- - [cluster.sh](cluster.sh) - easily create OpenShift 3 clusters
+ - [bin/cluster](bin/cluster) - python script to easily create OpenShift 3 clusters
- [filter_plugins/](filter_plugins) - custom filters used to manipulate data in Ansible
- [inventory/](inventory) - houses Ansible dynamic inventory scripts
- [lib/](lib) - library components of cloud.rb
diff --git a/README_AWS.md b/README_AWS.md
index fb9d0f895..888abe939 100644
--- a/README_AWS.md
+++ b/README_AWS.md
@@ -14,7 +14,7 @@ Create a credentials file
export AWS_ACCESS_KEY_ID='AKIASTUFF'
export AWS_SECRET_ACCESS_KEY='STUFF'
```
-1. source this file
+2. source this file
```
source ~/.aws_creds
```
@@ -23,7 +23,7 @@ Note: You must source this file in each shell that you want to run cloud.rb
(Optional) Setup your $HOME/.ssh/config file
-------------------------------------------
-In case of a cluster creation, or any other case where you don't know the machine hostname in advance, you can use '.ssh/config'
+In case of a cluster creation, or any other case where you don't know the machine hostname in advance, you can use '.ssh/config'
to setup a private key file to allow ansible to connect to the created hosts.
To do so, add the the following entry to your $HOME/.ssh/config file and make it point to the private key file which allows you to login on AWS.
@@ -34,6 +34,43 @@ Host *.compute-1.amazonaws.com
Alternatively, you can configure your ssh-agent to hold the credentials to connect to your AWS instances.
+(Optional) Choose where the cluster will be launched
+----------------------------------------------------
+
+By default, a cluster is launched with the following configuration:
+
+- Instance type: m3.large
+- AMI: ami-307b3658 (for online deployments, ami-acd999c4 for origin deployments and ami-10663b78 for enterprise deployments)
+- Region: us-east-1
+- Keypair name: libra
+- Security group: public
+
+Master specific defaults:
+- Master root volume size: 10 (in GiBs)
+- Master root volume type: gp2
+- Master root volume iops: 500 (only applicable when volume type is io1)
+
+Node specific defaults:
+- Node root volume size: 10 (in GiBs)
+- Node root volume type: gp2
+- Node root volume iops: 500 (only applicable when volume type is io1)
+- Docker volume size: 25 (in GiBs)
+- Docker volume ephemeral: true (Whether the docker volume is ephemeral)
+- Docker volume type: gp2 (only applicable if ephemeral is false)
+- Docker volume iops: 500 (only applicable when volume type is io1)
+
+If needed, these values can be changed by setting environment variables on your system.
+
+- export ec2_instance_type='m3.large'
+- export ec2_ami='ami-307b3658'
+- export ec2_region='us-east-1'
+- export ec2_keypair='libra'
+- export ec2_security_group='public'
+- export os_master_root_vol_size='20'
+- export os_master_root_vol_type='standard'
+- export os_node_root_vol_size='15'
+- export os_docker_vol_size='50'
+- export os_docker_vol_ephemeral='false'
Install Dependencies
--------------------
@@ -51,7 +88,29 @@ OSX:
Test The Setup
--------------
1. cd openshift-ansible
-1. Try to list all instances:
+1. Try to list all instances (Passing an empty string as the cluster_id
+argument will result in all ec2 instances being listed)
+```
+ bin/cluster list aws ''
+```
+
+Creating a cluster
+------------------
+1. To create a cluster with one master and two nodes
+```
+ bin/cluster create aws <cluster-id>
+```
+
+Updating a cluster
+---------------------
+1. To update the cluster
+```
+ bin/cluster update aws <cluster-id>
+```
+
+Terminating a cluster
+---------------------
+1. To terminate the cluster
```
- ./cloud.rb aws list
+ bin/cluster terminate aws <cluster-id>
```
diff --git a/README_GCE.md b/README_GCE.md
index b00598113..f6c5138c1 100644
--- a/README_GCE.md
+++ b/README_GCE.md
@@ -4,7 +4,7 @@ GCE Setup Instructions
Get a gce service key
---------------------
-1. ask your GCE project administrator for a GCE service key
+1. Ask your GCE project administrator for a GCE service key
Note: If your GCE project does not show a Service Account under <Project>/APIs & auth/Credentials, you will need to use "Create new Client ID" to create a Service Account before your administrator can create the service key for you.
@@ -65,12 +65,29 @@ Install Dependencies
Test The Setup
--------------
1. cd openshift-ansible/
-2. Try to list all instances:
+1. Try to list all instances (Passing an empty string as the cluster_id
+argument will result in all gce instances being listed)
```
- ./cloud.rb gce list
+ bin/cluster list gce ''
```
-3. Try to create an instance:
+Creating a cluster
+------------------
+1. To create a cluster with one master and two nodes
```
- ./cloud.rb gce launch -n ${USER}-node1 -e int --type os3-node
+ bin/cluster create gce <cluster-id>
+```
+
+Updating a cluster
+---------------------
+1. To update the cluster
+```
+ bin/cluster update gce <cluster-id>
+```
+
+Terminating a cluster
+---------------------
+1. To terminate the cluster
+```
+ bin/cluster terminate gce <cluster-id>
```
diff --git a/README_OSE.md b/README_OSE.md
new file mode 100644
index 000000000..cd0b9f7a6
--- /dev/null
+++ b/README_OSE.md
@@ -0,0 +1,241 @@
+# Installing OSEv3 from dev puddles using ansible
+
+* [Requirements](#requirements)
+* [Caveats](#caveats)
+* [Known Issues](#known-issues)
+* [Configuring the host inventory](#configuring-the-host-inventory)
+* [Creating the default variables for the hosts and host groups](#creating-the-default-variables-for-the-hosts-and-host-groups)
+* [Running the ansible playbooks](#running-the-ansible-playbooks)
+* [Post-ansible steps](#post-ansible-steps)
+* [Overriding detected ip addresses and hostnames](#overriding-detected-ip-addresses-and-hostnames)
+
+## Requirements
+* ansible
+ * Tested using ansible-1.8.4-1.fc20.noarch, but should work with version 1.8+
+ * There is currently a known issue with ansible-1.9.0, you can downgrade to 1.8.4 on Fedora by installing one of the bulids from Koji: http://koji.fedoraproject.org/koji/packageinfo?packageID=13842
+ * Available in Fedora channels
+ * Available for EL with EPEL and Optional channel
+* One or more RHEL 7.1 VMs
+* Either ssh key based auth for the root user or ssh key based auth for a user
+ with sudo access (no password)
+* A checkout of openshift-ansible from https://github.com/openshift/openshift-ansible/
+
+ ```sh
+ git clone https://github.com/openshift/openshift-ansible.git
+ cd openshift-ansible
+ ```
+
+## Caveats
+This ansible repo is currently under heavy revision for providing OSE support;
+the following items are highly likely to change before the OSE support is
+merged into the upstream repo:
+ * the current git branch for testing
+ * how the inventory file should be configured
+ * variables that need to be set
+ * bootstrapping steps
+ * other configuration steps
+
+## Known Issues
+* Host subscriptions are not configurable yet, the hosts need to be
+ pre-registered with subscription-manager or have the RHEL base repo
+ pre-configured. If using subscription-manager the following commands will
+ disable all but the rhel-7-server rhel-7-server-extras and
+ rhel-server7-ose-beta repos:
+```sh
+subscription-manager repos --disable="*"
+subscription-manager repos \
+--enable="rhel-7-server-rpms" \
+--enable="rhel-7-server-extras-rpms" \
+--enable="rhel-server-7-ose-beta-rpms"
+```
+* Configuration of router is not automated yet
+* Configuration of docker-registry is not automated yet
+
+## Configuring the host inventory
+[Ansible docs](http://docs.ansible.com/intro_inventory.html)
+
+Example inventory file for configuring one master and two nodes for the test
+environment. This can be configured in the default inventory file
+(/etc/ansible/hosts), or using a custom file and passing the --inventory
+option to ansible-playbook.
+
+/etc/ansible/hosts:
+```ini
+# This is an example of a bring your own (byo) host inventory
+
+# Create an OSEv3 group that contains the masters and nodes groups
+[OSEv3:children]
+masters
+nodes
+
+# Set variables common for all OSEv3 hosts
+[OSEv3:vars]
+# SSH user, this user should allow ssh based auth without requiring a password
+ansible_ssh_user=root
+
+# If ansible_ssh_user is not root, ansible_sudo must be set to true
+#ansible_sudo=true
+
+# To deploy origin, change deployment_type to origin
+deployment_type=enterprise
+
+# Pre-release registry URL
+openshift_registry_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}
+
+# Pre-release additional repo
+openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel',
+'baseurl':
+'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os',
+'enabled': 1, 'gpgcheck': 0}]
+
+# Origin copr repo
+#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name':
+'OpenShift Origin COPR', 'baseurl':
+'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/',
+'enabled': 1, 'gpgcheck': 1, gpgkey:
+'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
+
+# host group for masters
+[masters]
+ose3-master.example.com
+
+# host group for nodes
+[nodes]
+ose3-node[1:2].example.com
+```
+
+The hostnames above should resolve both from the hosts themselves and
+the host where ansible is running (if different).
+
+## Running the ansible playbooks
+From the openshift-ansible checkout run:
+```sh
+ansible-playbook playbooks/byo/config.yml
+```
+**Note:** this assumes that the host inventory is /etc/ansible/hosts, if using a different
+inventory file use the -i option for ansible-playbook.
+
+## Post-ansible steps
+#### Create the default router
+On the master host:
+```sh
+systemctl restart openshift-sdn-master
+openshift ex router --create=true \
+ --credentials=/var/lib/openshift/openshift.local.certificates/openshift-client/.kubeconfig \
+ --images='docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}'
+```
+
+#### Create the default docker-registry
+On the master host:
+```sh
+openshift ex registry --create=true \
+ --credentials=/var/lib/openshift/openshift.local.certificates/openshift-client/.kubeconfig \
+ --images='docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}' \
+ --mount-host=/var/lib/openshift/docker-registry
+```
+
+## Overriding detected ip addresses and hostnames
+Some deployments will require that the user override the detected hostnames
+and ip addresses for the hosts. To see what the default values will be you can
+run the openshift_facts playbook:
+```sh
+ansible-playbook playbooks/byo/openshift_facts.yml
+```
+The output will be similar to:
+```
+ok: [10.3.9.45] => {
+ "result": {
+ "ansible_facts": {
+ "openshift": {
+ "common": {
+ "hostname": "jdetiber-osev3-ansible-005dcfa6-27c6-463d-9b95-ef059579befd.os1.phx2.redhat.com",
+ "ip": "172.16.4.79",
+ "public_hostname": "jdetiber-osev3-ansible-005dcfa6-27c6-463d-9b95-ef059579befd.os1.phx2.redhat.com",
+ "public_ip": "10.3.9.45",
+ "use_openshift_sdn": true
+ },
+ "provider": {
+ ... <snip> ...
+ }
+ }
+ },
+ "changed": false,
+ "invocation": {
+ "module_args": "",
+ "module_name": "openshift_facts"
+ }
+ }
+}
+ok: [10.3.9.42] => {
+ "result": {
+ "ansible_facts": {
+ "openshift": {
+ "common": {
+ "hostname": "jdetiber-osev3-ansible-c6ae8cdc-ba0b-4a81-bb37-14549893f9d3.os1.phx2.redhat.com",
+ "ip": "172.16.4.75",
+ "public_hostname": "jdetiber-osev3-ansible-c6ae8cdc-ba0b-4a81-bb37-14549893f9d3.os1.phx2.redhat.com",
+ "public_ip": "10.3.9.42",
+ "use_openshift_sdn": true
+ },
+ "provider": {
+ ...<snip>...
+ }
+ }
+ },
+ "changed": false,
+ "invocation": {
+ "module_args": "",
+ "module_name": "openshift_facts"
+ }
+ }
+}
+ok: [10.3.9.36] => {
+ "result": {
+ "ansible_facts": {
+ "openshift": {
+ "common": {
+ "hostname": "jdetiber-osev3-ansible-bc39a3d3-cdd7-42fe-9c12-9fac9b0ec320.os1.phx2.redhat.com",
+ "ip": "172.16.4.73",
+ "public_hostname": "jdetiber-osev3-ansible-bc39a3d3-cdd7-42fe-9c12-9fac9b0ec320.os1.phx2.redhat.com",
+ "public_ip": "10.3.9.36",
+ "use_openshift_sdn": true
+ },
+ "provider": {
+ ...<snip>...
+ }
+ }
+ },
+ "changed": false,
+ "invocation": {
+ "module_args": "",
+ "module_name": "openshift_facts"
+ }
+ }
+}
+```
+Now, we want to verify the detected common settings to verify that they are
+what we expect them to be (if not, we can override them).
+
+* hostname
+ * Should resolve to the internal ip from the instances themselves.
+ * openshift_hostname will override.
+* ip
+ * Should be the internal ip of the instance.
+ * openshift_ip will override.
+* public hostname
+ * Should resolve to the external ip from hosts outside of the cloud
+ * provider openshift_public_hostname will override.
+* public_ip
+ * Should be the externally accessible ip associated with the instance
+ * openshift_public_ip will override
+* use_openshift_sdn
+ * Should be true unless the cloud is GCE.
+ * openshift_use_openshift_sdn overrides
+
+To override the the defaults, you can set the variables in your inventory:
+```
+...snip...
+[masters]
+ose3-master.example.com openshift_ip=1.1.1.1 openshift_hostname=ose3-master.example.com openshift_public_ip=2.2.2.2 openshift_public_hostname=ose3-master.public.example.com
+...snip...
+```
diff --git a/README_libvirt.md b/README_libvirt.md
new file mode 100644
index 000000000..bcbaf4bd5
--- /dev/null
+++ b/README_libvirt.md
@@ -0,0 +1,130 @@
+LIBVIRT Setup instructions
+==========================
+
+`libvirt` is an `openshift-ansible` provider that uses `libvirt` to create local Fedora VMs that are provisioned exactly the same way that cloud VMs would be provisioned.
+
+This makes `libvirt` useful to develop, test and debug Openshift and openshift-ansible locally on the developer’s workstation before going to the cloud.
+
+Install dependencies
+--------------------
+
+1. Install [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html)
+2. Install [ebtables](http://ebtables.netfilter.org/)
+3. Install [qemu](http://wiki.qemu.org/Main_Page)
+4. Install [libvirt](http://libvirt.org/)
+5. Enable and start the libvirt daemon, e.g:
+ - `systemctl enable libvirtd`
+ - `systemctl start libvirtd`
+6. [Grant libvirt access to your user¹](https://libvirt.org/aclpolkit.html)
+7. Check that your `$HOME` is accessible to the qemu user²
+8. Configure dns resolution on the host³
+
+#### ¹ Depending on your distribution, libvirt access may be denied by default or may require a password at each access.
+
+You can test it with the following command:
+
+```
+virsh -c qemu:///system pool-list
+```
+
+If you have access error messages, please read https://libvirt.org/acl.html and https://libvirt.org/aclpolkit.html .
+
+In short, if your libvirt has been compiled with Polkit support (ex: Arch, Fedora 21), you can create `/etc/polkit-1/rules.d/50-org.libvirt.unix.manage.rules` as follows to grant full access to libvirt to `$USER`
+
+```
+sudo /bin/sh -c "cat - > /etc/polkit-1/rules.d/50-org.libvirt.unix.manage.rules" << EOF
+polkit.addRule(function(action, subject) {
+ if (action.id == "org.libvirt.unix.manage" &&
+ subject.user == "$USER") {
+ return polkit.Result.YES;
+ polkit.log("action=" + action);
+ polkit.log("subject=" + subject);
+ }
+});
+EOF
+```
+
+If your libvirt has not been compiled with Polkit (ex: Ubuntu 14.04.1 LTS), check the permissions on the libvirt unix socket:
+
+```
+ls -l /var/run/libvirt/libvirt-sock
+srwxrwx--- 1 root libvirtd 0 févr. 12 16:03 /var/run/libvirt/libvirt-sock
+
+usermod -a -G libvirtd $USER
+# $USER needs to logout/login to have the new group be taken into account
+```
+
+(Replace `$USER` with your login name)
+
+#### ² Qemu will run with a specific user. It must have access to the VMs drives
+
+All the disk drive resources needed by the VMs (Fedora disk image, cloud-init files) are put inside `~/libvirt-storage-pool-openshift/`.
+
+As we’re using the `qemu:///system` instance of libvirt, qemu will run with a specific `user:group` distinct from your user. It is configured in `/etc/libvirt/qemu.conf`. That qemu user must have access to that libvirt storage pool.
+
+If your `$HOME` is world readable, everything is fine. If your `$HOME` is private, `ansible` will fail with an error message like:
+
+```
+error: Cannot access storage file '$HOME/libvirt-storage-pool-openshift/lenaic-master-216d8.qcow2' (as uid:99, gid:78): Permission denied
+```
+
+In order to fix that issue, you have several possibilities:* set `libvirt_storage_pool_path` inside `playbooks/libvirt/openshift-cluster/launch.yml` and `playbooks/libvirt/openshift-cluster/terminate.yml` to a directory: * backed by a filesystem with a lot of free disk space * writable by your user; * accessible by the qemu user.* Grant the qemu user access to the storage pool.
+
+On Arch:
+
+```
+setfacl -m g:kvm:--x ~
+```
+
+#### ³ Enabling DNS resolution to your guest VMs with NetworkManager
+
+- Verify NetworkManager is configured to use dnsmasq:
+
+```sh
+$ sudo vi /etc/NetworkManager/NetworkManager.conf
+[main]
+dns=dnsmasq
+```
+
+- Configure dnsmasq to use the Virtual Network router for example.com:
+
+```sh
+sudo vi /etc/NetworkManager/dnsmasq.d/libvirt_dnsmasq.conf server=/example.com/192.168.55.1
+```
+
+Test The Setup
+--------------
+
+1. cd openshift-ansible/
+2. Try to list all instances (Passing an empty string as the cluster_id argument will result in all libvirt instances being listed)
+
+```
+ bin/cluster list libvirt ''
+```
+
+Creating a cluster
+------------------
+
+1. To create a cluster with one master and two nodes
+
+```
+ bin/cluster create libvirt lenaic
+```
+
+Updating a cluster
+------------------
+
+1. To update the cluster
+
+```
+ bin/cluster update libvirt lenaic
+```
+
+Terminating a cluster
+---------------------
+
+1. To terminate the cluster
+
+```
+ bin/cluster terminate libvirt lenaic
+```
diff --git a/ansible.cfg b/ansible.cfg
new file mode 100644
index 000000000..6a7722ad8
--- /dev/null
+++ b/ansible.cfg
@@ -0,0 +1,23 @@
+# config file for ansible -- http://ansible.com/
+# ==============================================
+
+# This config file provides examples for running
+# the OpenShift playbooks with the provided
+# inventory scripts. Only global defaults are
+# left uncommented
+
+[defaults]
+# Add the roles directory to the roles path
+roles_path = roles/
+
+# Set the log_path
+log_path = /tmp/ansible.log
+
+# Uncomment to use the provided BYO inventory
+#hostfile = inventory/byo/hosts
+
+# Uncomment to use the provided GCE dynamic inventory script
+#hostfile = inventory/gce/gce.py
+
+# Uncomment to use the provided AWS dynamic inventory script
+#hostfile = inventory/aws/ec2.py
diff --git a/bin/cluster b/bin/cluster
new file mode 100755
index 000000000..79f1f988f
--- /dev/null
+++ b/bin/cluster
@@ -0,0 +1,241 @@
+#!/usr/bin/env python2
+# vim: expandtab:tabstop=4:shiftwidth=4
+
+import argparse
+import ConfigParser
+import sys
+import os
+
+
+class Cluster(object):
+ """
+ Control and Configuration Interface for OpenShift Clusters
+ """
+ def __init__(self):
+ # setup ansible ssh environment
+ if 'ANSIBLE_SSH_ARGS' not in os.environ:
+ os.environ['ANSIBLE_SSH_ARGS'] = (
+ '-o ForwardAgent=yes '
+ '-o StrictHostKeyChecking=no '
+ '-o UserKnownHostsFile=/dev/null '
+ '-o ControlMaster=auto '
+ '-o ControlPersist=600s '
+ )
+
+ def get_deployment_type(self, args):
+ """
+ Get the deployment_type based on the environment variables and the
+ command line arguments
+ :param args: command line arguments provided by the user
+ :return: string representing the deployment type
+ """
+ deployment_type = 'origin'
+ if args.deployment_type:
+ deployment_type = args.deployment_type
+ elif 'OS_DEPLOYMENT_TYPE' in os.environ:
+ deployment_type = os.environ['OS_DEPLOYMENT_TYPE']
+ return deployment_type
+
+ def create(self, args):
+ """
+ Create an OpenShift cluster for given provider
+ :param args: command line arguments provided by user
+ :return: exit status from run command
+ """
+ env = {'cluster_id': args.cluster_id,
+ 'deployment_type': self.get_deployment_type(args)}
+ playbook = "playbooks/{}/openshift-cluster/launch.yml".format(args.provider)
+ inventory = self.setup_provider(args.provider)
+
+ env['num_masters'] = args.masters
+ env['num_nodes'] = args.nodes
+
+ return self.action(args, inventory, env, playbook)
+
+ def terminate(self, args):
+ """
+ Destroy OpenShift cluster
+ :param args: command line arguments provided by user
+ :return: exit status from run command
+ """
+ env = {'cluster_id': args.cluster_id,
+ 'deployment_type': self.get_deployment_type(args)}
+ playbook = "playbooks/{}/openshift-cluster/terminate.yml".format(args.provider)
+ inventory = self.setup_provider(args.provider)
+
+ return self.action(args, inventory, env, playbook)
+
+ def list(self, args):
+ """
+ List VMs in cluster
+ :param args: command line arguments provided by user
+ :return: exit status from run command
+ """
+ env = {'cluster_id': args.cluster_id,
+ 'deployment_type': self.get_deployment_type(args)}
+ playbook = "playbooks/{}/openshift-cluster/list.yml".format(args.provider)
+ inventory = self.setup_provider(args.provider)
+
+ return self.action(args, inventory, env, playbook)
+
+ def config(self, args):
+ """
+ Configure or reconfigure OpenShift across clustered VMs
+ :param args: command line arguments provided by user
+ :return: exit status from run command
+ """
+ env = {'cluster_id': args.cluster_id,
+ 'deployment_type': self.get_deployment_type(args)}
+ playbook = "playbooks/{}/openshift-cluster/config.yml".format(args.provider)
+ inventory = self.setup_provider(args.provider)
+
+ return self.action(args, inventory, env, playbook)
+
+ def update(self, args):
+ """
+ Update to latest OpenShift across clustered VMs
+ :param args: command line arguments provided by user
+ :return: exit status from run command
+ """
+ env = {'cluster_id': args.cluster_id,
+ 'deployment_type': self.get_deployment_type(args)}
+ playbook = "playbooks/{}/openshift-cluster/update.yml".format(args.provider)
+ inventory = self.setup_provider(args.provider)
+
+ return self.action(args, inventory, env, playbook)
+
+ def setup_provider(self, provider):
+ """
+ Setup ansible playbook environment
+ :param provider: command line arguments provided by user
+ :return: path to inventory for given provider
+ """
+ config = ConfigParser.ConfigParser()
+ if 'gce' == provider:
+ config.readfp(open('inventory/gce/hosts/gce.ini'))
+
+ for key in config.options('gce'):
+ os.environ[key] = config.get('gce', key)
+
+ inventory = '-i inventory/gce/hosts'
+ elif 'aws' == provider:
+ config.readfp(open('inventory/aws/hosts/ec2.ini'))
+
+ for key in config.options('ec2'):
+ os.environ[key] = config.get('ec2', key)
+
+ inventory = '-i inventory/aws/hosts'
+ elif 'libvirt' == provider:
+ inventory = '-i inventory/libvirt/hosts'
+ else:
+ # this code should never be reached
+ raise ValueError("invalid PROVIDER {}".format(provider))
+
+ return inventory
+
+ def action(self, args, inventory, env, playbook):
+ """
+ Build ansible-playbook command line and execute
+ :param args: command line arguments provided by user
+ :param inventory: derived provider library
+ :param env: environment variables for kubernetes
+ :param playbook: ansible playbook to execute
+ :return: exit status from ansible-playbook command
+ """
+
+ verbose = ''
+ if args.verbose > 0:
+ verbose = '-{}'.format('v' * args.verbose)
+
+ ansible_env = '-e \'{}\''.format(
+ ' '.join(['%s=%s' % (key, value) for (key, value) in env.items()])
+ )
+
+ command = 'ansible-playbook {} {} {} {}'.format(
+ verbose, inventory, ansible_env, playbook
+ )
+
+ if args.verbose > 1:
+ command = 'time {}'.format(command)
+
+ if args.verbose > 0:
+ sys.stderr.write('RUN [{}]\n'.format(command))
+ sys.stderr.flush()
+
+ return os.system(command)
+
+
+if __name__ == '__main__':
+ """
+ Implemented to support writing unit tests
+ """
+
+ cluster = Cluster()
+
+ providers = ['gce', 'aws', 'libvirt']
+ parser = argparse.ArgumentParser(
+ description='Python wrapper to ensure proper environment for OpenShift ansible playbooks',
+ )
+ parser.add_argument('-v', '--verbose', action='count',
+ help='Multiple -v options increase the verbosity')
+ parser.add_argument('--version', action='version', version='%(prog)s 0.2')
+
+ meta_parser = argparse.ArgumentParser(add_help=False)
+ meta_parser.add_argument('provider', choices=providers, help='provider')
+ meta_parser.add_argument('cluster_id', help='prefix for cluster VM names')
+ meta_parser.add_argument('-t', '--deployment-type',
+ choices=['origin', 'online', 'enterprise'],
+ help='Deployment type. (default: origin)')
+
+ action_parser = parser.add_subparsers(dest='action', title='actions',
+ description='Choose from valid actions')
+
+ create_parser = action_parser.add_parser('create', help='Create a cluster',
+ parents=[meta_parser])
+ create_parser.add_argument('-m', '--masters', default=1, type=int,
+ help='number of masters to create in cluster')
+ create_parser.add_argument('-n', '--nodes', default=2, type=int,
+ help='number of nodes to create in cluster')
+ create_parser.set_defaults(func=cluster.create)
+
+ config_parser = action_parser.add_parser('config',
+ help='Configure or reconfigure a cluster',
+ parents=[meta_parser])
+ config_parser.set_defaults(func=cluster.config)
+
+ terminate_parser = action_parser.add_parser('terminate',
+ help='Destroy a cluster',
+ parents=[meta_parser])
+ terminate_parser.add_argument('-f', '--force', action='store_true',
+ help='Destroy cluster without confirmation')
+ terminate_parser.set_defaults(func=cluster.terminate)
+
+ update_parser = action_parser.add_parser('update',
+ help='Update OpenShift across cluster',
+ parents=[meta_parser])
+ update_parser.add_argument('-f', '--force', action='store_true',
+ help='Update cluster without confirmation')
+ update_parser.set_defaults(func=cluster.update)
+
+ list_parser = action_parser.add_parser('list', help='List VMs in cluster',
+ parents=[meta_parser])
+ list_parser.set_defaults(func=cluster.list)
+
+ args = parser.parse_args()
+
+ if 'terminate' == args.action and not args.force:
+ answer = raw_input("This will destroy the ENTIRE {} environment. Are you sure? [y/N] ".format(args.cluster_id))
+ if answer not in ['y', 'Y']:
+ sys.stderr.write('\nACTION [terminate] aborted by user!\n')
+ exit(1)
+
+ if 'update' == args.action and not args.force:
+ answer = raw_input("This is destructive and could corrupt {} environment. Continue? [y/N] ".format(args.cluster_id))
+ if answer not in ['y', 'Y']:
+ sys.stderr.write('\nACTION [update] aborted by user!\n')
+ exit(1)
+
+ status = args.func(args)
+ if status != 0:
+ sys.stderr.write("ACTION [{}] failed with exit status {}\n".format(args.action, status))
+ exit(status)
diff --git a/bin/ohi b/bin/ohi
new file mode 100755
index 000000000..408961ee4
--- /dev/null
+++ b/bin/ohi
@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+# vim: expandtab:tabstop=4:shiftwidth=4
+
+import argparse
+import traceback
+import sys
+import os
+import re
+import tempfile
+import time
+import subprocess
+import ConfigParser
+
+from openshift_ansible import awsutil
+from openshift_ansible.awsutil import ArgumentError
+
+CONFIG_MAIN_SECTION = 'main'
+CONFIG_HOST_TYPE_ALIAS_SECTION = 'host_type_aliases'
+CONFIG_INVENTORY_OPTION = 'inventory'
+
+class Ohi(object):
+ def __init__(self):
+ self.inventory = None
+ self.host_type_aliases = {}
+ self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
+
+ # Default the config path to /etc
+ self.config_path = os.path.join(os.path.sep, 'etc', \
+ 'openshift_ansible', \
+ 'openshift_ansible.conf')
+
+ self.parse_cli_args()
+ self.parse_config_file()
+
+ self.aws = awsutil.AwsUtil(self.inventory, self.host_type_aliases)
+
+ def run(self):
+ if self.args.list_host_types:
+ self.aws.print_host_types()
+ return 0
+
+ hosts = None
+ if self.args.host_type is not None and \
+ self.args.env is not None:
+ # Both env and host-type specified
+ hosts = self.aws.get_host_list(host_type=self.args.host_type, \
+ env=self.args.env)
+
+ if self.args.host_type is None and \
+ self.args.env is not None:
+ # Only env specified
+ hosts = self.aws.get_host_list(env=self.args.env)
+
+ if self.args.host_type is not None and \
+ self.args.env is None:
+ # Only host-type specified
+ hosts = self.aws.get_host_list(host_type=self.args.host_type)
+
+ if hosts is None:
+ # We weren't able to determine what they wanted to do
+ raise ArgumentError("Invalid combination of arguments")
+
+ for host in hosts:
+ print host
+ return 0
+
+ def parse_config_file(self):
+ if os.path.isfile(self.config_path):
+ config = ConfigParser.ConfigParser()
+ config.read(self.config_path)
+
+ if config.has_section(CONFIG_MAIN_SECTION) and \
+ config.has_option(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION):
+ self.inventory = config.get(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION)
+
+ self.host_type_aliases = {}
+ if config.has_section(CONFIG_HOST_TYPE_ALIAS_SECTION):
+ for alias in config.options(CONFIG_HOST_TYPE_ALIAS_SECTION):
+ value = config.get(CONFIG_HOST_TYPE_ALIAS_SECTION, alias).split(',')
+ self.host_type_aliases[alias] = value
+
+ def parse_cli_args(self):
+ """Setup the command line parser with the options we want
+ """
+
+ parser = argparse.ArgumentParser(description='Openshift Host Inventory')
+
+ parser.add_argument('--list-host-types', default=False, action='store_true',
+ help='List all of the host types')
+
+ parser.add_argument('-e', '--env', action="store",
+ help="Which environment to use")
+
+ parser.add_argument('-t', '--host-type', action="store",
+ help="Which host type to use")
+
+ self.args = parser.parse_args()
+
+
+if __name__ == '__main__':
+ if len(sys.argv) == 1:
+ print "\nError: No options given. Use --help to see the available options\n"
+ sys.exit(0)
+
+ try:
+ ohi = Ohi()
+ exitcode = ohi.run()
+ sys.exit(exitcode)
+ except ArgumentError as e:
+ print "\nError: %s\n" % e.message
diff --git a/bin/openshift-ansible-bin.spec b/bin/openshift-ansible-bin.spec
new file mode 100644
index 000000000..c7db6f684
--- /dev/null
+++ b/bin/openshift-ansible-bin.spec
@@ -0,0 +1,65 @@
+Summary: OpenShift Ansible Scripts for working with metadata hosts
+Name: openshift-ansible-bin
+Version: 0.0.8
+Release: 1%{?dist}
+License: ASL 2.0
+URL: https://github.com/openshift/openshift-ansible
+Source0: %{name}-%{version}.tar.gz
+Requires: python2, openshift-ansible-inventory
+BuildRequires: python2-devel
+BuildArch: noarch
+
+%description
+Scripts to make it nicer when working with hosts that are defined only by metadata.
+
+%prep
+%setup -q
+
+%build
+
+%install
+mkdir -p %{buildroot}%{_bindir}
+mkdir -p %{buildroot}%{python_sitelib}/openshift_ansible
+mkdir -p %{buildroot}/etc/bash_completion.d
+mkdir -p %{buildroot}/etc/openshift_ansible
+
+cp -p ossh oscp opssh ohi %{buildroot}%{_bindir}
+cp -p openshift_ansible/* %{buildroot}%{python_sitelib}/openshift_ansible
+cp -p ossh_bash_completion %{buildroot}/etc/bash_completion.d
+
+cp -p openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshift_ansible.conf
+
+%files
+%{_bindir}/*
+%{python_sitelib}/openshift_ansible/
+/etc/bash_completion.d/*
+%config(noreplace) /etc/openshift_ansible/
+
+%changelog
+* Mon Apr 13 2015 Thomas Wiest <twiest@redhat.com> 0.0.8-1
+- fixed bug in opssh where it wouldn't actually run pssh (twiest@redhat.com)
+
+* Mon Apr 13 2015 Thomas Wiest <twiest@redhat.com> 0.0.7-1
+- added the ability to run opssh and ohi on all hosts in an environment, as
+ well as all hosts of the same host-type regardless of environment
+ (twiest@redhat.com)
+- added ohi (twiest@redhat.com)
+* Thu Apr 09 2015 Thomas Wiest <twiest@redhat.com> 0.0.6-1
+- fixed bug where opssh would throw an exception if pssh returned a non-zero
+ exit code (twiest@redhat.com)
+
+* Wed Apr 08 2015 Thomas Wiest <twiest@redhat.com> 0.0.5-1
+- fixed the opssh default output behavior to be consistent with pssh. Also
+ fixed a bug in how directories are named for --outdir and --errdir.
+ (twiest@redhat.com)
+* Tue Mar 31 2015 Thomas Wiest <twiest@redhat.com> 0.0.4-1
+- Fixed when tag was missing and added opssh completion (kwoodson@redhat.com)
+
+* Mon Mar 30 2015 Thomas Wiest <twiest@redhat.com> 0.0.3-1
+- created a python package named openshift_ansible (twiest@redhat.com)
+
+* Mon Mar 30 2015 Thomas Wiest <twiest@redhat.com> 0.0.2-1
+- added config file support to opssh, ossh, and oscp (twiest@redhat.com)
+* Tue Mar 24 2015 Thomas Wiest <twiest@redhat.com> 0.0.1-1
+- new package built with tito
+
diff --git a/bin/openshift_ansible.conf.example b/bin/openshift_ansible.conf.example
new file mode 100644
index 000000000..e891b855a
--- /dev/null
+++ b/bin/openshift_ansible.conf.example
@@ -0,0 +1,6 @@
+#[main]
+#inventory = /usr/share/ansible/inventory/multi_ec2.py
+
+#[host_type_aliases]
+#host-type-one = aliasa,aliasb
+#host-type-two = aliasfortwo
diff --git a/bin/openshift_ansible/__init__.py b/bin/openshift_ansible/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/bin/openshift_ansible/__init__.py
diff --git a/bin/awsutil.py b/bin/openshift_ansible/awsutil.py
index 37259b946..65b269930 100644
--- a/bin/awsutil.py
+++ b/bin/openshift_ansible/awsutil.py
@@ -5,28 +5,36 @@ import os
import json
import re
+class ArgumentError(Exception):
+ def __init__(self, message):
+ self.message = message
+
class AwsUtil(object):
- def __init__(self):
- self.host_type_aliases = {
- 'legacy-openshift-broker': ['broker', 'ex-srv'],
- 'openshift-node': ['node', 'ex-node'],
- 'openshift-messagebus': ['msg'],
- 'openshift-customer-database': ['mongo'],
- 'openshift-website-proxy': ['proxy'],
- 'openshift-community-website': ['drupal'],
- 'package-mirror': ['mirror'],
- }
+ def __init__(self, inventory_path=None, host_type_aliases={}):
+ self.host_type_aliases = host_type_aliases
+ self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
+
+ if inventory_path is None:
+ inventory_path = os.path.realpath(os.path.join(self.file_path, \
+ '..', '..', 'inventory', \
+ 'multi_ec2.py'))
+
+ if not os.path.isfile(inventory_path):
+ raise Exception("Inventory file not found [%s]" % inventory_path)
+ self.inventory_path = inventory_path
+ self.setup_host_type_alias_lookup()
+
+ def setup_host_type_alias_lookup(self):
self.alias_lookup = {}
for key, values in self.host_type_aliases.iteritems():
for value in values:
self.alias_lookup[value] = key
- self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
- self.multi_ec2_path = os.path.realpath(os.path.join(self.file_path, '..','inventory','multi_ec2.py'))
+
def get_inventory(self,args=[]):
- cmd = [self.multi_ec2_path]
+ cmd = [self.inventory_path]
if args:
cmd.extend(args)
@@ -124,15 +132,45 @@ class AwsUtil(object):
return self.alias_lookup[host_type]
return host_type
+ def gen_env_tag(self, env):
+ """Generate the environment tag
+ """
+ return "tag_environment_%s" % env
+
+ def gen_host_type_tag(self, host_type):
+ """Generate the host type tag
+ """
+ host_type = self.resolve_host_type(host_type)
+ return "tag_host-type_%s" % host_type
+
def gen_env_host_type_tag(self, host_type, env):
"""Generate the environment host type tag
"""
host_type = self.resolve_host_type(host_type)
return "tag_env-host-type_%s-%s" % (env, host_type)
- def get_host_list(self, host_type, env):
+ def get_host_list(self, host_type=None, env=None):
"""Get the list of hosts from the inventory using host-type and environment
"""
inv = self.get_inventory()
- host_type_tag = self.gen_env_host_type_tag(host_type, env)
- return inv[host_type_tag]
+
+ if host_type is not None and \
+ env is not None:
+ # Both host type and environment were specified
+ env_host_type_tag = self.gen_env_host_type_tag(host_type, env)
+ return inv[env_host_type_tag]
+
+ if host_type is None and \
+ env is not None:
+ # Just environment was specified
+ host_type_tag = self.gen_env_tag(env)
+ return inv[host_type_tag]
+
+ if host_type is not None and \
+ env is None:
+ # Just host-type was specified
+ host_type_tag = self.gen_host_type_tag(host_type)
+ return inv[host_type_tag]
+
+ # We should never reach here!
+ raise ArgumentError("Invalid combination of parameters")
diff --git a/bin/opssh b/bin/opssh
index 71e5bf9f2..a4fceb6a8 100755
--- a/bin/opssh
+++ b/bin/opssh
@@ -2,7 +2,6 @@
# vim: expandtab:tabstop=4:shiftwidth=4
import argparse
-import awsutil
import traceback
import sys
import os
@@ -10,54 +9,71 @@ import re
import tempfile
import time
import subprocess
+import ConfigParser
-DEFAULT_PSSH_PAR=200
+from openshift_ansible import awsutil
+from openshift_ansible.awsutil import ArgumentError
+
+DEFAULT_PSSH_PAR = 200
PSSH = '/usr/bin/pssh'
+CONFIG_MAIN_SECTION = 'main'
+CONFIG_HOST_TYPE_ALIAS_SECTION = 'host_type_aliases'
+CONFIG_INVENTORY_OPTION = 'inventory'
class Opssh(object):
def __init__(self):
+ self.inventory = None
+ self.host_type_aliases = {}
self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
- self.aws = awsutil.AwsUtil()
+
+ # Default the config path to /etc
+ self.config_path = os.path.join(os.path.sep, 'etc', \
+ 'openshift_ansible', \
+ 'openshift_ansible.conf')
self.parse_cli_args()
+ self.parse_config_file()
+
+ self.aws = awsutil.AwsUtil(self.inventory, self.host_type_aliases)
+ def run(self):
if self.args.list_host_types:
self.aws.print_host_types()
- return
+ return 0
- if self.args.env and \
- self.args.host_type and \
- self.args.command:
- retval = self.run_pssh()
- if retval != 0:
- raise ValueError("pssh run failed")
+ if self.args.host_type is not None or \
+ self.args.env is not None:
+ return self.run_pssh()
- return
-
- # If it makes it here, we weren't able to determine what they wanted to do
- raise ValueError("Invalid combination of arguments")
+ # We weren't able to determine what they wanted to do
+ raise ArgumentError("Invalid combination of arguments")
def run_pssh(self):
"""Actually run the pssh command based off of the supplied options
"""
# Default set of options
- pssh_args = [PSSH, '-i', '-t', '0', '-p', str(self.args.par), '--user', self.args.user]
+ pssh_args = [PSSH, '-t', '0', '-p', str(self.args.par), '--user', self.args.user]
+
+ if self.args.inline:
+ pssh_args.append("--inline")
if self.args.outdir:
- pssh_args.append("--outdir='%s'" % self.args.outdir)
+ pssh_args.extend(["--outdir", self.args.outdir])
if self.args.errdir:
- pssh_args.append("--errdir='%s'" % self.args.errdir)
+ pssh_args.extend(["--errdir", self.args.errdir])
+
+ hosts = self.aws.get_host_list(host_type=self.args.host_type,
+ env=self.args.env)
- hosts = self.aws.get_host_list(self.args.host_type, self.args.env)
with tempfile.NamedTemporaryFile(prefix='opssh-', delete=True) as f:
for h in hosts:
f.write(h + os.linesep)
f.flush()
- pssh_args.extend(["-h", "%s" % f.name])
- pssh_args.append("%s" % self.args.command)
+ pssh_args.extend(["-h", f.name])
+ pssh_args.append(self.args.command)
print
print "Running: %s" % ' '.join(pssh_args)
@@ -66,6 +82,20 @@ class Opssh(object):
return None
+ def parse_config_file(self):
+ if os.path.isfile(self.config_path):
+ config = ConfigParser.ConfigParser()
+ config.read(self.config_path)
+
+ if config.has_section(CONFIG_MAIN_SECTION) and \
+ config.has_option(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION):
+ self.inventory = config.get(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION)
+
+ self.host_type_aliases = {}
+ if config.has_section(CONFIG_HOST_TYPE_ALIAS_SECTION):
+ for alias in config.options(CONFIG_HOST_TYPE_ALIAS_SECTION):
+ value = config.get(CONFIG_HOST_TYPE_ALIAS_SECTION, alias).split(',')
+ self.host_type_aliases[alias] = value
def parse_cli_args(self):
"""Setup the command line parser with the options we want
@@ -79,7 +109,7 @@ class Opssh(object):
parser.add_argument('-e', '--env', action="store",
help="Which environment to use")
- parser.add_argument('-t', '--host-type', action="store",
+ parser.add_argument('-t', '--host-type', action="store", default=None,
help="Which host type to use")
parser.add_argument('-c', '--command', action='store',
@@ -88,6 +118,9 @@ class Opssh(object):
parser.add_argument('--user', action='store', default='root',
help='username')
+ parser.add_argument('-i', '--inline', default=False, action='store_true',
+ help='inline aggregated output and error for each server')
+
parser.add_argument('-p', '--par', action='store', default=DEFAULT_PSSH_PAR,
help=('max number of parallel threads (default %s)' % DEFAULT_PSSH_PAR))
@@ -107,5 +140,7 @@ if __name__ == '__main__':
try:
opssh = Opssh()
- except ValueError as e:
+ exitcode = opssh.run()
+ sys.exit(exitcode)
+ except ArgumentError as e:
print "\nError: %s\n" % e.message
diff --git a/bin/oscp b/bin/oscp
index 146bbbea5..461ad0a0f 100755
--- a/bin/oscp
+++ b/bin/oscp
@@ -2,21 +2,34 @@
# vim: expandtab:tabstop=4:shiftwidth=4
import argparse
-import awsutil
import traceback
import sys
import os
import re
+import ConfigParser
+
+from openshift_ansible import awsutil
+
+CONFIG_MAIN_SECTION = 'main'
+CONFIG_INVENTORY_OPTION = 'inventory'
class Oscp(object):
def __init__(self):
+ self.inventory = None
self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
+
+ # Default the config path to /etc
+ self.config_path = os.path.join(os.path.sep, 'etc', \
+ 'openshift_ansible', \
+ 'openshift_ansible.conf')
+
self.parse_cli_args()
+ self.parse_config_file()
# parse host and user
self.process_host()
- self.aws = awsutil.AwsUtil()
+ self.aws = awsutil.AwsUtil(self.inventory)
# get a dict of host inventory
if self.args.list:
@@ -38,9 +51,18 @@ class Oscp(object):
else:
self.scp()
+ def parse_config_file(self):
+ if os.path.isfile(self.config_path):
+ config = ConfigParser.ConfigParser()
+ config.read(self.config_path)
+
+ if config.has_section(CONFIG_MAIN_SECTION) and \
+ config.has_option(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION):
+ self.inventory = config.get(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION)
+
def parse_cli_args(self):
parser = argparse.ArgumentParser(description='Openshift Online SSH Tool.')
- parser.add_argument('-e', '--env',
+ parser.add_argument('-e', '--env',
action="store", help="Environment where this server exists.")
parser.add_argument('-d', '--debug', default=False,
action="store_true", help="debug mode")
diff --git a/bin/ossh b/bin/ossh
index 66a4cfb5c..c16ea6eda 100755
--- a/bin/ossh
+++ b/bin/ossh
@@ -2,18 +2,31 @@
# vim: expandtab:tabstop=4:shiftwidth=4
import argparse
-import awsutil
import traceback
import sys
import os
import re
+import ConfigParser
+
+from openshift_ansible import awsutil
+
+CONFIG_MAIN_SECTION = 'main'
+CONFIG_INVENTORY_OPTION = 'inventory'
class Ossh(object):
def __init__(self):
+ self.inventory = None
self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
+
+ # Default the config path to /etc
+ self.config_path = os.path.join(os.path.sep, 'etc', \
+ 'openshift_ansible', \
+ 'openshift_ansible.conf')
+
self.parse_cli_args()
+ self.parse_config_file()
- self.aws = awsutil.AwsUtil()
+ self.aws = awsutil.AwsUtil(self.inventory)
# get a dict of host inventory
if self.args.list:
@@ -37,6 +50,15 @@ class Ossh(object):
else:
self.ssh()
+ def parse_config_file(self):
+ if os.path.isfile(self.config_path):
+ config = ConfigParser.ConfigParser()
+ config.read(self.config_path)
+
+ if config.has_section(CONFIG_MAIN_SECTION) and \
+ config.has_option(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION):
+ self.inventory = config.get(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION)
+
def parse_cli_args(self):
parser = argparse.ArgumentParser(description='Openshift Online SSH Tool.')
parser.add_argument('-e', '--env', action="store",
diff --git a/bin/ossh_bash_completion b/bin/ossh_bash_completion
index 6a95ce6ee..1467de858 100755
--- a/bin/ossh_bash_completion
+++ b/bin/ossh_bash_completion
@@ -1,6 +1,7 @@
__ossh_known_hosts(){
if [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- /usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items()])'
+ /usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+
fi
}
@@ -16,3 +17,23 @@ _ossh()
return 0
}
complete -F _ossh ossh oscp
+
+__opssh_known_hosts(){
+ if [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
+ /usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+ fi
+}
+
+_opssh()
+{
+ local cur prev known_hosts
+ COMPREPLY=()
+ cur="${COMP_WORDS[COMP_CWORD]}"
+ prev="${COMP_WORDS[COMP_CWORD-1]}"
+ known_hosts="$(__opssh_known_hosts)"
+ COMPREPLY=( $(compgen -W "${known_hosts}" -- ${cur}))
+
+ return 0
+}
+complete -F _opssh opssh
+
diff --git a/cluster.sh b/cluster.sh
deleted file mode 100755
index 9c9aad4d2..000000000
--- a/cluster.sh
+++ /dev/null
@@ -1,113 +0,0 @@
-#!/bin/bash -eu
-
-NODES=2
-MASTERS=1
-
-# If the environment variable OO_PROVDER is defined, it used for the provider
-PROVIDER=${OO_PROVIDER:-''}
-# Otherwise, default is gce (Google Compute Engine)
-if [ "x$PROVIDER" == "x" ];then
- PROVIDER=gce
-fi
-
-UPPER_CASE_PROVIDER=$(echo $PROVIDER | tr '[:lower:]' '[:upper:]')
-
-
-# Use OO_MASTER_PLAYBOOK/OO_NODE_PLAYBOOK environment variables for playbooks if defined,
-# otherwise use openshift default values.
-MASTER_PLAYBOOK=${OO_MASTER_PLAYBOOK:-'openshift-master'}
-NODE_PLAYBOOK=${OO_NODE_PLAYBOOK:-'openshift-node'}
-
-
-# @formatter:off
-function usage {
- cat 1>&2 <<-EOT
- ${0} : [create|terminate|update|list] { ${UPPER_CASE_PROVIDER} environment tag}
-
- Supported environment tags:
- $(grep --no-messages 'SUPPORTED_ENVS.*=' ./lib/${PROVIDER}_command.rb)
- $([ $? -ne 0 ] && echo "No supported environment tags found for ${PROVIDER}")
-
- Optional arguments for create:
- [-p|--provider, -m|--masters, -n|--nodes, --master-playbook, --node-playbook]
-
- Optional arguments for terminate|update:
- [-p|--provider, --master-playbook, --node-playbook]
-EOT
-}
-# @formatter:on
-
-function create_cluster {
- ./cloud.rb "${PROVIDER}" launch -e "${ENV}" --type=$MASTER_PLAYBOOK -c $MASTERS
-
- ./cloud.rb "${PROVIDER}" launch -e "${ENV}" --type=$NODE_PLAYBOOK -c $NODES
-
- update_cluster
-
- echo -e "\nCreated ${MASTERS}/${MASTER_PLAYBOOK} masters and ${NODES}/${NODE_PLAYBOOK} nodes using ${PROVIDER} provider\n"
-}
-
-function update_cluster {
- ./cloud.rb "${PROVIDER}" config -e "${ENV}" --type=$MASTER_PLAYBOOK
- ./cloud.rb "${PROVIDER}" config -e "${ENV}" --type=$NODE_PLAYBOOK
-}
-
-function terminate_cluster {
- ./cloud.rb "${PROVIDER}" terminate -e "${ENV}" --type=$MASTER_PLAYBOOK
- ./cloud.rb "${PROVIDER}" terminate -e "${ENV}" --type=$NODE_PLAYBOOK
-}
-
-[ -f ./cloud.rb ] || (echo 1>&2 'Cannot find ./cloud.rb' && exit 1)
-
-function check_argval {
- if [[ $1 == -* ]]; then
- echo "Invalid value: '$1'"
- usage
- exit 1
- fi
-}
-
-# Using GNU getopt to support both small and long formats
-OPTIONS=`getopt -o p:m:n:h --long provider:,masters:,nodes:,master-playbook:,node-playbook:,help \
- -n "$0" -- "$@"`
-eval set -- "$OPTIONS"
-
-while true; do
- case "$1" in
- -h|--help) (usage; exit 1) ; shift ;;
- -p|--provider) PROVIDER="$2" ; check_argval $2 ; shift 2 ;;
- -m|--masters) MASTERS="$2" ; check_argval $2 ; shift 2 ;;
- -n|--nodes) NODES="$2" ; check_argval $2 ; shift 2 ;;
- --master-playbook) MASTER_PLAYBOOK="$2" ; check_argval $2 ; shift 2 ;;
- --node-playbook) NODE_PLAYBOOK="$2" ; check_argval $2 ; shift 2 ;;
- --) shift ; break ;;
- *) break ;;
- esac
-done
-
-shift $((OPTIND-1))
-
-[ -z "${1:-}" ] && (usage; exit 1)
-
-case "${1}" in
- 'create')
- [ -z "${2:-}" ] && (usage; exit 1)
- ENV="${2}"
- create_cluster ;;
- 'update')
- [ -z "${2:-}" ] && (usage; exit 1)
- ENV="${2}"
- update_cluster ;;
- 'terminate')
- [ -z "${2:-}" ] && (usage; exit 1)
- ENV="${2}"
- terminate_cluster ;;
- 'list') ./cloud.rb "${PROVIDER}" list ;;
- 'help') usage; exit 0 ;;
- *)
- echo -n 1>&2 "${1} is not a supported operation";
- usage;
- exit 1 ;;
-esac
-
-exit 0
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index b57056375..097038450 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -1,39 +1,57 @@
-from ansible import errors, runner
-import json
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+'''
+Custom filters for use in openshift-ansible
+'''
+
+from ansible import errors
+from operator import itemgetter
import pdb
def oo_pdb(arg):
- ''' This pops you into a pdb instance where arg is the data passed in from the filter.
+ ''' This pops you into a pdb instance where arg is the data passed in
+ from the filter.
Ex: "{{ hostvars | oo_pdb }}"
- '''
- pdb.set_trace()
- return arg
+ '''
+ pdb.set_trace()
+ return arg
def oo_len(arg):
- ''' This returns the length of the argument
+ ''' This returns the length of the argument
Ex: "{{ hostvars | oo_len }}"
- '''
- return len(arg)
+ '''
+ return len(arg)
def get_attr(data, attribute=None):
- ''' This looks up dictionary attributes of the form a.b.c and returns the value.
+ ''' This looks up dictionary attributes of the form a.b.c and returns
+ the value.
Ex: data = {'a': {'b': {'c': 5}}}
attribute = "a.b.c"
returns 5
- '''
+ '''
+ if not attribute:
+ raise errors.AnsibleFilterError("|failed expects attribute to be set")
- if not attribute:
- raise errors.AnsibleFilterError("|failed expects attribute to be set")
+ ptr = data
+ for attr in attribute.split('.'):
+ ptr = ptr[attr]
- ptr = data
- for attr in attribute.split('.'):
- ptr = ptr[attr]
+ return ptr
- return ptr
+def oo_flatten(data):
+ ''' This filter plugin will flatten a list of lists
+ '''
+ if not issubclass(type(data), list):
+ raise errors.AnsibleFilterError("|failed expects to flatten a List")
-def oo_collect(data, attribute=None, filters={}):
- ''' This takes a list of dict and collects all attributes specified into a list
- If filter is specified then we will include all items that match _ALL_ of filters.
+ return [item for sublist in data for item in sublist]
+
+
+def oo_collect(data, attribute=None, filters=None):
+ ''' This takes a list of dict and collects all attributes specified into a
+ list If filter is specified then we will include all items that match
+ _ALL_ of filters.
Ex: data = [ {'a':1, 'b':5, 'z': 'z'}, # True, return
{'a':2, 'z': 'z'}, # True, return
{'a':3, 'z': 'z'}, # True, return
@@ -42,44 +60,137 @@ def oo_collect(data, attribute=None, filters={}):
attribute = 'a'
filters = {'z': 'z'}
returns [1, 2, 3]
- '''
+ '''
+ if not issubclass(type(data), list):
+ raise errors.AnsibleFilterError("|failed expects to filter on a List")
- if not issubclass(type(data), list):
- raise errors.AnsibleFilterError("|failed expects to filter on a List")
+ if not attribute:
+ raise errors.AnsibleFilterError("|failed expects attribute to be set")
- if not attribute:
- raise errors.AnsibleFilterError("|failed expects attribute to be set")
+ if filters is not None:
+ if not issubclass(type(filters), dict):
+ raise errors.AnsibleFilterError("|fialed expects filter to be a"
+ " dict")
+ retval = [get_attr(d, attribute) for d in data if (
+ all([d[key] == filters[key] for key in filters]))]
+ else:
+ retval = [get_attr(d, attribute) for d in data]
- if filters:
- retval = [get_attr(d, attribute) for d in data if all([ d[key] == filters[key] for key in filters ]) ]
- else:
- retval = [get_attr(d, attribute) for d in data]
-
- return retval
+ return retval
def oo_select_keys(data, keys):
- ''' This returns a list, which contains the value portions for the keys
+ ''' This returns a list, which contains the value portions for the keys
Ex: data = { 'a':1, 'b':2, 'c':3 }
keys = ['a', 'c']
returns [1, 3]
- '''
-
- if not issubclass(type(data), dict):
- raise errors.AnsibleFilterError("|failed expects to filter on a Dictionary")
-
- if not issubclass(type(keys), list):
- raise errors.AnsibleFilterError("|failed expects first param is a list")
-
- # Gather up the values for the list of keys passed in
- retval = [data[key] for key in keys]
-
- return retval
-
-class FilterModule (object):
- def filters(self):
- return {
- "oo_select_keys": oo_select_keys,
- "oo_collect": oo_collect,
- "oo_len": oo_len,
- "oo_pdb": oo_pdb
- }
+ '''
+
+ if not issubclass(type(data), dict):
+ raise errors.AnsibleFilterError("|failed expects to filter on a dict")
+
+ if not issubclass(type(keys), list):
+ raise errors.AnsibleFilterError("|failed expects first param is a list")
+
+ # Gather up the values for the list of keys passed in
+ retval = [data[key] for key in keys]
+
+ return retval
+
+def oo_prepend_strings_in_list(data, prepend):
+ ''' This takes a list of strings and prepends a string to each item in the
+ list
+ Ex: data = ['cart', 'tree']
+ prepend = 'apple-'
+ returns ['apple-cart', 'apple-tree']
+ '''
+ if not issubclass(type(data), list):
+ raise errors.AnsibleFilterError("|failed expects first param is a list")
+ if not all(isinstance(x, basestring) for x in data):
+ raise errors.AnsibleFilterError("|failed expects first param is a list"
+ " of strings")
+ retval = [prepend + s for s in data]
+ return retval
+
+def oo_ami_selector(data, image_name):
+ ''' This takes a list of amis and an image name and attempts to return
+ the latest ami.
+ '''
+ if not issubclass(type(data), list):
+ raise errors.AnsibleFilterError("|failed expects first param is a list")
+
+ if not data:
+ return None
+ else:
+ if image_name is None or not image_name.endswith('_*'):
+ ami = sorted(data, key=itemgetter('name'), reverse=True)[0]
+ return ami['ami_id']
+ else:
+ ami_info = [(ami, ami['name'].split('_')[-1]) for ami in data]
+ ami = sorted(ami_info, key=itemgetter(1), reverse=True)[0][0]
+ return ami['ami_id']
+
+def oo_ec2_volume_definition(data, host_type, docker_ephemeral=False):
+ ''' This takes a dictionary of volume definitions and returns a valid ec2
+ volume definition based on the host_type and the values in the
+ dictionary.
+ The dictionary should look similar to this:
+ { 'master':
+ { 'root':
+ { 'volume_size': 10, 'device_type': 'gp2',
+ 'iops': 500
+ }
+ },
+ 'node':
+ { 'root':
+ { 'volume_size': 10, 'device_type': 'io1',
+ 'iops': 1000
+ },
+ 'docker':
+ { 'volume_size': 40, 'device_type': 'gp2',
+ 'iops': 500, 'ephemeral': 'true'
+ }
+ }
+ }
+ '''
+ if not issubclass(type(data), dict):
+ raise errors.AnsibleFilterError("|failed expects first param is a dict")
+ if host_type not in ['master', 'node']:
+ raise errors.AnsibleFilterError("|failed expects either master or node"
+ " host type")
+
+ root_vol = data[host_type]['root']
+ root_vol['device_name'] = '/dev/sda1'
+ root_vol['delete_on_termination'] = True
+ if root_vol['device_type'] != 'io1':
+ root_vol.pop('iops', None)
+ if host_type == 'node':
+ docker_vol = data[host_type]['docker']
+ docker_vol['device_name'] = '/dev/xvdb'
+ docker_vol['delete_on_termination'] = True
+ if docker_vol['device_type'] != 'io1':
+ docker_vol.pop('iops', None)
+ if docker_ephemeral:
+ docker_vol.pop('device_type', None)
+ docker_vol.pop('delete_on_termination', None)
+ docker_vol['ephemeral'] = 'ephemeral0'
+ return [root_vol, docker_vol]
+ return [root_vol]
+
+# disabling pylint checks for too-few-public-methods and no-self-use since we
+# need to expose a FilterModule object that has a filters method that returns
+# a mapping of filter names to methods.
+# pylint: disable=too-few-public-methods, no-self-use
+class FilterModule(object):
+ ''' FilterModule '''
+ def filters(self):
+ ''' returns a mapping of filters to methods '''
+ return {
+ "oo_select_keys": oo_select_keys,
+ "oo_collect": oo_collect,
+ "oo_flatten": oo_flatten,
+ "oo_len": oo_len,
+ "oo_pdb": oo_pdb,
+ "oo_prepend_strings_in_list": oo_prepend_strings_in_list,
+ "oo_ami_selector": oo_ami_selector,
+ "oo_ec2_volume_definition": oo_ec2_volume_definition
+ }
diff --git a/git/.pylintrc b/git/.pylintrc
new file mode 100644
index 000000000..2d45f867e
--- /dev/null
+++ b/git/.pylintrc
@@ -0,0 +1,390 @@
+[MASTER]
+
+# Specify a configuration file.
+#rcfile=
+
+# Python code to execute, usually for sys.path manipulation such as
+# pygtk.require().
+#init-hook=
+
+# Profiled execution.
+profile=no
+
+# Add files or directories to the blacklist. They should be base names, not
+# paths.
+ignore=CVS
+
+# Pickle collected data for later comparisons.
+persistent=no
+
+# List of plugins (as comma separated values of python modules names) to load,
+# usually to register additional checkers.
+load-plugins=
+
+# Deprecated. It was used to include message's id in output. Use --msg-template
+# instead.
+#include-ids=no
+
+# Deprecated. It was used to include symbolic ids of messages in output. Use
+# --msg-template instead.
+#symbols=no
+
+# Use multiple processes to speed up Pylint.
+jobs=1
+
+# Allow loading of arbitrary C extensions. Extensions are imported into the
+# active Python interpreter and may run arbitrary code.
+unsafe-load-any-extension=no
+
+# A comma-separated list of package or module names from where C extensions may
+# be loaded. Extensions are loading into the active Python interpreter and may
+# run arbitrary code
+extension-pkg-whitelist=
+
+# Allow optimization of some AST trees. This will activate a peephole AST
+# optimizer, which will apply various small optimizations. For instance, it can
+# be used to obtain the result of joining multiple strings with the addition
+# operator. Joining a lot of strings can lead to a maximum recursion error in
+# Pylint and this flag can prevent that. It has one side effect, the resulting
+# AST will be different than the one from reality.
+optimize-ast=no
+
+
+[MESSAGES CONTROL]
+
+# Only show warnings with the listed confidence levels. Leave empty to show
+# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
+confidence=
+
+# Enable the message, report, category or checker with the given id(s). You can
+# either give multiple identifier separated by comma (,) or put this option
+# multiple time. See also the "--disable" option for examples.
+#enable=
+
+# Disable the message, report, category or checker with the given id(s). You
+# can either give multiple identifiers separated by comma (,) or put this
+# option multiple times (only on the command line, not in the configuration
+# file where it should appear only once).You can also use "--disable=all" to
+# disable everything first and then reenable specific checks. For example, if
+# you want to run only the similarities checker, you can use "--disable=all
+# --enable=similarities". If you want to run only the classes checker, but have
+# no Warning level messages displayed, use"--disable=all --enable=classes
+# --disable=W"
+disable=E1608,W1627,E1601,E1603,E1602,E1605,E1604,E1607,E1606,W1621,W1620,W1623,W1622,W1625,W1624,W1609,W1608,W1607,W1606,W1605,W1604,W1603,W1602,W1601,W1639,W1640,I0021,W1638,I0020,W1618,W1619,W1630,W1626,W1637,W1634,W1635,W1610,W1611,W1612,W1613,W1614,W1615,W1616,W1617,W1632,W1633,W0704,W1628,W1629,W1636
+
+
+[REPORTS]
+
+# Set the output format. Available formats are text, parseable, colorized, msvs
+# (visual studio) and html. You can also give a reporter class, eg
+# mypackage.mymodule.MyReporterClass.
+output-format=text
+
+# Put messages in a separate file for each module / package specified on the
+# command line instead of printing them on stdout. Reports (if any) will be
+# written in a file name "pylint_global.[txt|html]".
+files-output=no
+
+# Tells whether to display a full report or only the messages
+reports=no
+
+# Python expression which should return a note less than 10 (10 is the highest
+# note). You have access to the variables errors warning, statement which
+# respectively contain the number of errors / warnings messages and the total
+# number of statements analyzed. This is used by the global evaluation report
+# (RP0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Add a comment according to your evaluation note. This is used by the global
+# evaluation report (RP0004).
+comment=no
+
+# Template used to display messages. This is a python new-style format string
+# used to format the message information. See doc for all details
+#msg-template=
+
+
+[LOGGING]
+
+# Logging modules to check that the string format arguments are in logging
+# function parameter format
+logging-modules=logging
+
+
+[BASIC]
+
+# Required attributes for module, separated by a comma
+required-attributes=
+
+# List of builtins function names that should not be used, separated by a comma
+bad-functions=map,filter,input
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=i,j,k,ex,Run,_
+
+# Bad variable names which should always be refused, separated by a comma
+bad-names=foo,bar,baz,toto,tutu,tata
+
+# Colon-delimited sets of names that determine each other's naming style when
+# the name regexes allow several styles.
+name-group=
+
+# Include a hint for the correct naming format with invalid-name
+include-naming-hint=no
+
+# Regular expression matching correct function names
+function-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for function names
+function-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct variable names
+variable-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for variable names
+variable-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct constant names
+const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Naming hint for constant names
+const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Regular expression matching correct attribute names
+attr-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for attribute names
+attr-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct argument names
+argument-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for argument names
+argument-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct class attribute names
+class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
+
+# Naming hint for class attribute names
+class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
+
+# Regular expression matching correct inline iteration names
+inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
+
+# Naming hint for inline iteration names
+inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
+
+# Regular expression matching correct class names
+class-rgx=[A-Z_][a-zA-Z0-9]+$
+
+# Naming hint for class names
+class-name-hint=[A-Z_][a-zA-Z0-9]+$
+
+# Regular expression matching correct module names
+module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Naming hint for module names
+module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Regular expression matching correct method names
+method-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for method names
+method-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match function or class names that do
+# not require a docstring.
+no-docstring-rgx=__.*__
+
+# Minimum line length for functions/classes that require docstrings, shorter
+# ones are exempt.
+docstring-min-length=-1
+
+
+[SIMILARITIES]
+
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+# Ignore imports when computing similarities.
+ignore-imports=no
+
+
+[VARIABLES]
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching the name of dummy variables (i.e. expectedly
+# not used).
+dummy-variables-rgx=_$|dummy
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+# List of strings which can identify a callback function by name. A callback
+# name must start or end with one of those strings.
+callbacks=cb_,_cb
+
+
+[TYPECHECK]
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# List of module names for which member attributes should not be checked
+# (useful for modules/projects where namespaces are manipulated during runtime
+# and thus existing member attributes cannot be deduced by static analysis
+ignored-modules=
+
+# List of classes names for which member attributes should not be checked
+# (useful for classes with attributes dynamically set).
+ignored-classes=SQLObject
+
+# When zope mode is activated, add a predefined set of Zope acquired attributes
+# to generated-members.
+zope=no
+
+# List of members which are set dynamically and missed by pylint inference
+# system, and so shouldn't trigger E0201 when accessed. Python regular
+# expressions are accepted.
+generated-members=REQUEST,acl_users,aq_parent
+
+
+[SPELLING]
+
+# Spelling dictionary name. Available dictionaries: none. To make it working
+# install python-enchant package.
+spelling-dict=
+
+# List of comma separated words that should not be checked.
+spelling-ignore-words=
+
+# A path to a file that contains private dictionary; one word per line.
+spelling-private-dict-file=
+
+# Tells whether to store unknown words to indicated private dictionary in
+# --spelling-private-dict-file option instead of raising a message.
+spelling-store-unknown-words=no
+
+
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,XXX,TODO
+
+
+[FORMAT]
+
+# Maximum number of characters on a single line.
+max-line-length=100
+
+# Regexp for a line that is allowed to be longer than the limit.
+ignore-long-lines=^\s*(# )?<?https?://\S+>?$
+
+# Allow the body of an if to be on the same line as the test if there is no
+# else.
+single-line-if-stmt=no
+
+# List of optional constructs for which whitespace checking is disabled
+no-space-check=trailing-comma,dict-separator
+
+# Maximum number of lines in a module
+max-module-lines=1000
+
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# tab).
+indent-string=' '
+
+# Number of spaces of indent required inside a hanging or continued line.
+indent-after-paren=4
+
+# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
+expected-line-ending-format=
+
+
+[DESIGN]
+
+# Maximum number of arguments for function / method
+max-args=5
+
+# Argument names that match this expression will be ignored. Default to name
+# with leading underscore
+ignored-argument-names=_.*
+
+# Maximum number of locals for function / method body
+max-locals=15
+
+# Maximum number of return / yield for function / method body
+max-returns=6
+
+# Maximum number of branch for function / method body
+max-branches=12
+
+# Maximum number of statements in function / method body
+max-statements=50
+
+# Maximum number of parents for a class (see R0901).
+max-parents=7
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=7
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=2
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=20
+
+
+[CLASSES]
+
+# List of interface methods to ignore, separated by a comma. This is used for
+# instance to not check methods defines in Zope's Interface base class.
+ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,__new__,setUp
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg=cls
+
+# List of valid names for the first argument in a metaclass class method.
+valid-metaclass-classmethod-first-arg=mcs
+
+# List of member names, which should be excluded from the protected access
+# warning.
+exclude-protected=_asdict,_fields,_replace,_source,_make
+
+
+[IMPORTS]
+
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=regsub,TERMIOS,Bastion,rexec
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report RP0402 must not be disabled)
+import-graph=
+
+# Create a graph of external dependencies in the given file (report RP0402 must
+# not be disabled)
+ext-import-graph=
+
+# Create a graph of internal dependencies in the given file (report RP0402 must
+# not be disabled)
+int-import-graph=
+
+
+[EXCEPTIONS]
+
+# Exceptions that will emit a warning when being caught. Defaults to
+# "Exception"
+overgeneral-exceptions=Exception
diff --git a/git/parent.rb b/git/parent.rb
new file mode 100755
index 000000000..2acb127c4
--- /dev/null
+++ b/git/parent.rb
@@ -0,0 +1,45 @@
+#!/usr/bin/env ruby
+#
+#
+#
+
+if __FILE__ == $0
+ # If we aren't on master we don't need to parent check
+ branch = 'prod'
+ exit(0) if ARGV[0] !~ /#{branch}/
+ commit_id = ARGV[1]
+ %x[/usr/bin/git checkout #{branch}]
+ %x[/usr/bin/git merge #{commit_id}]
+
+ count = 0
+ #lines = %x[/usr/bin/git rev-list --left-right stg...master].split("\n")
+ lines = %x[/usr/bin/git rev-list --left-right remotes/origin/stg...#{branch}].split("\n")
+ lines.each do |commit|
+ # next if they are in stage
+ next if commit =~ /^</
+ # remove the first char '>'
+ commit = commit[1..-1]
+ # check if any remote branches contain $commit
+ results = %x[/usr/bin/git branch -q -r --contains #{commit} 2>/dev/null ]
+ # if this comes back empty, nothing contains it, we can skip it as
+ # we have probably created the merge commit here locally
+ next if results.empty?
+
+ # The results generally contain origin/pr/246/merge and origin/pr/246/head
+ # this is the pull request which would contain the commit in question.
+ #
+ # If the results do not contain origin/stg then stage does not contain
+ # the commit in question. Therefore we need to alert!
+ unless results =~ /origin\/stg/
+ puts "\nFAILED: (These commits are not in stage.)\n"
+ puts "\t#{commit}"
+ count += 1
+ end
+ end
+
+ # Exit with count of commits in #{branch} but not stg
+ exit(count)
+end
+
+__END__
+
diff --git a/git/pylint.sh b/git/pylint.sh
new file mode 100755
index 000000000..286747565
--- /dev/null
+++ b/git/pylint.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+
+
+OLDREV=$1
+NEWREV=$2
+TRG_BRANCH=$3
+
+PYTHON=/var/lib/jenkins/python27/bin/python
+
+/usr/bin/git diff --name-only $OLDREV $NEWREV --diff-filter=ACM | \
+ grep ".py$" | \
+ xargs -r -I{} ${PYTHON} -m pylint --rcfile ${WORKSPACE}/git/.pylintrc {}
+
+exit $?
diff --git a/git/yaml_validation.rb b/git/yaml_validation.rb
new file mode 100755
index 000000000..f5ded7a78
--- /dev/null
+++ b/git/yaml_validation.rb
@@ -0,0 +1,72 @@
+#!/usr/bin/env ruby
+#
+#
+#
+require 'yaml'
+require 'tmpdir'
+
+class YamlValidate
+ def self.yaml_file?(filename)
+ return filename.end_with?('.yaml') || filename.end_with?('.yml')
+ end
+
+ def self.short_yaml_ext?(filename)
+ return filename.end_with?(".yml")
+ end
+
+ def self.valid_yaml?(filename)
+ YAML::load_file(filename)
+
+ return true
+ end
+end
+
+class GitCommit
+ attr_accessor :oldrev, :newrev, :refname, :tmp
+ def initialize(oldrev, newrev, refname)
+ @oldrev = oldrev
+ @newrev = newrev
+ @refname = refname
+ @tmp = Dir.mktmpdir(@newrev)
+ end
+
+ def get_file_changes()
+ files = %x[/usr/bin/git diff --name-only #{@oldrev} #{@newrev} --diff-filter=ACM].split("\n")
+
+ # if files is empty we will get a full checkout. This happens on
+ # a git rm file. If there are no changes then we need to skip the archive
+ return [] if files.empty?
+
+ # We only want to take the files that changed. Archive will do that when passed
+ # the filenames. It will export these to a tmp dir
+ system("/usr/bin/git archive #{@newrev} #{files.join(" ")} | tar x -C #{@tmp}")
+ return Dir.glob("#{@tmp}/**/*").delete_if { |file| File.directory?(file) }
+ end
+end
+
+if __FILE__ == $0
+ while data = STDIN.gets
+ oldrev, newrev, refname = data.split
+ gc = GitCommit.new(oldrev, newrev, refname)
+
+ results = []
+ gc.get_file_changes().each do |file|
+ begin
+ puts "++++++ Received: #{file}"
+
+ #raise "Yaml file extensions must be .yaml not .yml" if YamlValidate.short_yaml_ext? file
+
+ # skip readme, other files, etc
+ next unless YamlValidate.yaml_file?(file)
+
+ results << YamlValidate.valid_yaml?(file)
+ rescue Exception => ex
+ puts "\n#{ex.message}\n\n"
+ results << false
+ end
+ end
+
+ #puts "RESULTS\n#{results.inspect}\n"
+ exit 1 if results.include?(false)
+ end
+end
diff --git a/inventory/aws/ec2.ini b/inventory/aws/hosts/ec2.ini
index eaab0a410..eaab0a410 100644
--- a/inventory/aws/ec2.ini
+++ b/inventory/aws/hosts/ec2.ini
diff --git a/inventory/aws/ec2.py b/inventory/aws/hosts/ec2.py
index f231ff4c2..f231ff4c2 100755
--- a/inventory/aws/ec2.py
+++ b/inventory/aws/hosts/ec2.py
diff --git a/inventory/aws/hosts/hosts b/inventory/aws/hosts/hosts
new file mode 100644
index 000000000..34a4396bd
--- /dev/null
+++ b/inventory/aws/hosts/hosts
@@ -0,0 +1 @@
+localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter=/usr/bin/python2
diff --git a/inventory/byo/hosts b/inventory/byo/hosts
new file mode 100644
index 000000000..98dbb4fd8
--- /dev/null
+++ b/inventory/byo/hosts
@@ -0,0 +1,34 @@
+# This is an example of a bring your own (byo) host inventory
+
+# Create an OSEv3 group that contains the masters and nodes groups
+[OSEv3:children]
+masters
+nodes
+
+# Set variables common for all OSEv3 hosts
+[OSEv3:vars]
+# SSH user, this user should allow ssh based auth without requiring a password
+ansible_ssh_user=root
+
+# If ansible_ssh_user is not root, ansible_sudo must be set to true
+#ansible_sudo=true
+
+# To deploy origin, change deployment_type to origin
+deployment_type=enterprise
+
+# Pre-release registry URL
+openshift_registry_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}
+
+# Pre-release additional repo
+openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
+
+# Origin copr repo
+#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
+
+# host group for masters
+[masters]
+ose3-master-ansible.test.example.com
+
+# host group for nodes
+[nodes]
+ose3-node[1:2]-ansible.test.example.com
diff --git a/inventory/gce/gce.py b/inventory/gce/hosts/gce.py
index 3403f735e..3403f735e 100755
--- a/inventory/gce/gce.py
+++ b/inventory/gce/hosts/gce.py
diff --git a/inventory/gce/hosts/hosts b/inventory/gce/hosts/hosts
new file mode 100644
index 000000000..34a4396bd
--- /dev/null
+++ b/inventory/gce/hosts/hosts
@@ -0,0 +1 @@
+localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter=/usr/bin/python2
diff --git a/inventory/libvirt/hosts/hosts b/inventory/libvirt/hosts/hosts
new file mode 100644
index 000000000..34a4396bd
--- /dev/null
+++ b/inventory/libvirt/hosts/hosts
@@ -0,0 +1 @@
+localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter=/usr/bin/python2
diff --git a/inventory/libvirt/hosts/libvirt.ini b/inventory/libvirt/hosts/libvirt.ini
new file mode 100644
index 000000000..62ff204dd
--- /dev/null
+++ b/inventory/libvirt/hosts/libvirt.ini
@@ -0,0 +1,20 @@
+# Ansible libvirt external inventory script settings
+#
+
+[libvirt]
+
+uri = qemu:///system
+
+# API calls to libvirt can be slow. For this reason, we cache the results of an API
+# call. Set this to the path you want cache files to be written to. Two files
+# will be written to this directory:
+# - ansible-libvirt.cache
+# - ansible-libvirt.index
+cache_path = /tmp
+
+# The number of seconds a cache file is considered valid. After this many
+# seconds, a new API call will be made, and the cache file will be updated.
+cache_max_age = 900
+
+
+
diff --git a/inventory/libvirt/hosts/libvirt_generic.py b/inventory/libvirt/hosts/libvirt_generic.py
new file mode 100755
index 000000000..4652f112e
--- /dev/null
+++ b/inventory/libvirt/hosts/libvirt_generic.py
@@ -0,0 +1,179 @@
+#!/usr/bin/env python2
+
+"""
+libvirt external inventory script
+=================================
+
+Ansible has a feature where instead of reading from /etc/ansible/hosts
+as a text file, it can query external programs to obtain the list
+of hosts, groups the hosts are in, and even variables to assign to each host.
+
+To use this, copy this file over /etc/ansible/hosts and chmod +x the file.
+This, more or less, allows you to keep one central database containing
+info about all of your managed instances.
+
+"""
+
+# (c) 2015, Jason DeTiberus <jdetiber@redhat.com>
+#
+# This file is part of Ansible,
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+######################################################################
+
+import argparse
+import ConfigParser
+import os
+import re
+import sys
+from time import time
+import libvirt
+import xml.etree.ElementTree as ET
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+
+class LibvirtInventory(object):
+
+ def __init__(self):
+ self.inventory = dict() # A list of groups and the hosts in that group
+ self.cache = dict() # Details about hosts in the inventory
+
+ # Read settings and parse CLI arguments
+ self.read_settings()
+ self.parse_cli_args()
+
+ if self.args.host:
+ print self.json_format_dict(self.get_host_info(), self.args.pretty)
+ elif self.args.list:
+ print self.json_format_dict(self.get_inventory(), self.args.pretty)
+ else: # default action with no options
+ print self.json_format_dict(self.get_inventory(), self.args.pretty)
+
+ def read_settings(self):
+ config = ConfigParser.SafeConfigParser()
+ config.read(
+ os.path.dirname(os.path.realpath(__file__)) + '/libvirt.ini'
+ )
+ self.libvirt_uri = config.get('libvirt', 'uri')
+
+ def parse_cli_args(self):
+ parser = argparse.ArgumentParser(
+ description='Produce an Ansible Inventory file based on libvirt'
+ )
+ parser.add_argument(
+ '--list',
+ action='store_true',
+ default=True,
+ help='List instances (default: True)'
+ )
+ parser.add_argument(
+ '--host',
+ action='store',
+ help='Get all the variables about a specific instance'
+ )
+ parser.add_argument(
+ '--pretty',
+ action='store_true',
+ default=False,
+ help='Pretty format (default: False)'
+ )
+ self.args = parser.parse_args()
+
+ def get_host_info(self):
+ inventory = self.get_inventory()
+ if self.args.host in inventory['_meta']['hostvars']:
+ return inventory['_meta']['hostvars'][self.args.host]
+
+ def get_inventory(self):
+ inventory = dict(_meta=dict(hostvars=dict()))
+
+ conn = libvirt.openReadOnly(self.libvirt_uri)
+ if conn is None:
+ print "Failed to open connection to %s" % libvirt_uri
+ sys.exit(1)
+
+ domains = conn.listAllDomains()
+ if domains is None:
+ print "Failed to list domains for connection %s" % libvirt_uri
+ sys.exit(1)
+
+ arp_entries = self.parse_arp_entries()
+
+ for domain in domains:
+ hostvars = dict(libvirt_name=domain.name(),
+ libvirt_id=domain.ID(),
+ libvirt_uuid=domain.UUIDString())
+ domain_name = domain.name()
+
+ # TODO: add support for guests that are not in a running state
+ state, _ = domain.state()
+ # 2 is the state for a running guest
+ if state != 1:
+ continue
+
+ hostvars['libvirt_status'] = 'running'
+
+ root = ET.fromstring(domain.XMLDesc())
+ ns = {'ansible': 'https://github.com/ansible/ansible'}
+ for tag_elem in root.findall('./metadata/ansible:tags/ansible:tag', ns):
+ tag = tag_elem.text
+ self.push(inventory, "tag_%s" % tag, domain_name)
+ self.push(hostvars, 'libvirt_tags', tag)
+
+ # TODO: support more than one network interface, also support
+ # interface types other than 'network'
+ interface = root.find("./devices/interface[@type='network']")
+ if interface is not None:
+ mac_elem = interface.find('mac')
+ if mac_elem is not None:
+ mac = mac_elem.get('address')
+ if mac in arp_entries:
+ ip_address = arp_entries[mac]['ip_address']
+ hostvars['ansible_ssh_host'] = ip_address
+ hostvars['libvirt_ip_address'] = ip_address
+
+ inventory['_meta']['hostvars'][domain_name] = hostvars
+
+ return inventory
+
+ def parse_arp_entries(self):
+ arp_entries = dict()
+ with open('/proc/net/arp', 'r') as f:
+ # throw away the header
+ f.readline()
+
+ for line in f:
+ ip_address, _, _, mac, _, device = line.strip().split()
+ arp_entries[mac] = dict(ip_address=ip_address, device=device)
+
+ return arp_entries
+
+ def push(self, my_dict, key, element):
+ if key in my_dict:
+ my_dict[key].append(element)
+ else:
+ my_dict[key] = [element]
+
+ def json_format_dict(self, data, pretty=False):
+ if pretty:
+ return json.dumps(data, sort_keys=True, indent=2)
+ else:
+ return json.dumps(data)
+
+LibvirtInventory()
diff --git a/inventory/multi_ec2.py b/inventory/multi_ec2.py
index 5dee7972b..b839a33ea 100755
--- a/inventory/multi_ec2.py
+++ b/inventory/multi_ec2.py
@@ -1,37 +1,58 @@
#!/usr/bin/env python2
+'''
+ Fetch and combine multiple ec2 account settings into a single
+ json hash.
+'''
# vim: expandtab:tabstop=4:shiftwidth=4
from time import time
import argparse
import yaml
import os
-import sys
-import pdb
import subprocess
import json
-import pprint
+CONFIG_FILE_NAME = 'multi_ec2.yaml'
+
class MultiEc2(object):
+ '''
+ MultiEc2 class:
+ Opens a yaml config file and reads aws credentials.
+ Stores a json hash of resources in result.
+ '''
def __init__(self):
+ self.args = None
self.config = None
self.all_ec2_results = {}
self.result = {}
self.cache_path = os.path.expanduser('~/.ansible/tmp/multi_ec2_inventory.cache')
self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
- self.config_file = os.path.join(self.file_path,"multi_ec2.yaml")
+
+ same_dir_config_file = os.path.join(self.file_path, CONFIG_FILE_NAME)
+ etc_dir_config_file = os.path.join(os.path.sep, 'etc', 'ansible', CONFIG_FILE_NAME)
+
+ # Prefer a file in the same directory, fall back to a file in etc
+ if os.path.isfile(same_dir_config_file):
+ self.config_file = same_dir_config_file
+ elif os.path.isfile(etc_dir_config_file):
+ self.config_file = etc_dir_config_file
+ else:
+ self.config_file = None # expect env vars
+
self.parse_cli_args()
# load yaml
- if os.path.isfile(self.config_file):
+ if self.config_file and os.path.isfile(self.config_file):
self.config = self.load_yaml_config()
- elif os.environ.has_key("AWS_ACCESS_KEY_ID") and os.environ.has_key("AWS_SECRET_ACCESS_KEY"):
+ elif os.environ.has_key("AWS_ACCESS_KEY_ID") and \
+ os.environ.has_key("AWS_SECRET_ACCESS_KEY"):
self.config = {}
self.config['accounts'] = [
{
'name': 'default',
- 'provider': 'aws/ec2.py',
+ 'provider': 'aws/hosts/ec2.py',
'env_vars': {
'AWS_ACCESS_KEY_ID': os.environ["AWS_ACCESS_KEY_ID"],
'AWS_SECRET_ACCESS_KEY': os.environ["AWS_SECRET_ACCESS_KEY"],
@@ -43,13 +64,9 @@ class MultiEc2(object):
else:
raise RuntimeError("Could not find valid ec2 credentials in the environment.")
- if self.args.cache_only:
- # get data from disk
- result = self.get_inventory_from_cache()
-
- if not result:
- self.get_inventory()
- self.write_to_cache()
+ if self.args.refresh_cache:
+ self.get_inventory()
+ self.write_to_cache()
# if its a host query, fetch and do not cache
elif self.args.host:
self.get_inventory()
@@ -61,7 +78,7 @@ class MultiEc2(object):
# get data from disk
self.get_inventory_from_cache()
- def load_yaml_config(self,conf_file=None):
+ def load_yaml_config(self, conf_file=None):
"""Load a yaml config file with credentials to query the
respective cloud for inventory.
"""
@@ -75,7 +92,7 @@ class MultiEc2(object):
return config
- def get_provider_tags(self,provider, env={}):
+ def get_provider_tags(self, provider, env=None):
"""Call <provider> and query all of the tags that are usuable
by ansible. If environment is empty use the default env.
"""
@@ -140,7 +157,8 @@ class MultiEc2(object):
self.all_ec2_results[result['name']] = json.loads(result['out'])
values = self.all_ec2_results.values()
values.insert(0, self.result)
- [MultiEc2.merge_destructively(self.result, x) for x in values]
+ for result in values:
+ MultiEc2.merge_destructively(self.result, result)
else:
# For any 0 result, return it
count = 0
@@ -152,30 +170,30 @@ class MultiEc2(object):
raise RuntimeError("Found > 1 results for --host %s. \
This is an invalid state." % self.args.host)
@staticmethod
- def merge_destructively(a, b):
- "merges b into a"
- for key in b:
- if key in a:
- if isinstance(a[key], dict) and isinstance(b[key], dict):
- MultiEc2.merge_destructively(a[key], b[key])
- elif a[key] == b[key]:
+ def merge_destructively(input_a, input_b):
+ "merges b into input_a"
+ for key in input_b:
+ if key in input_a:
+ if isinstance(input_a[key], dict) and isinstance(input_b[key], dict):
+ MultiEc2.merge_destructively(input_a[key], input_b[key])
+ elif input_a[key] == input_b[key]:
pass # same leaf value
# both lists so add each element in b to a if it does ! exist
- elif isinstance(a[key], list) and isinstance(b[key],list):
- for x in b[key]:
- if x not in a[key]:
- a[key].append(x)
+ elif isinstance(input_a[key], list) and isinstance(input_b[key], list):
+ for result in input_b[key]:
+ if result not in input_a[key]:
+ input_a[key].input_append(result)
# a is a list and not b
- elif isinstance(a[key], list):
- if b[key] not in a[key]:
- a[key].append(b[key])
- elif isinstance(b[key], list):
- a[key] = [a[key]] + [k for k in b[key] if k != a[key]]
+ elif isinstance(input_a[key], list):
+ if input_b[key] not in input_a[key]:
+ input_a[key].append(input_b[key])
+ elif isinstance(input_b[key], list):
+ input_a[key] = [input_a[key]] + [k for k in input_b[key] if k != input_a[key]]
else:
- a[key] = [a[key],b[key]]
+ input_a[key] = [input_a[key], input_b[key]]
else:
- a[key] = b[key]
- return a
+ input_a[key] = input_b[key]
+ return input_a
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
@@ -191,19 +209,20 @@ class MultiEc2(object):
def parse_cli_args(self):
''' Command line argument processing '''
- parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on a provider')
- parser.add_argument('--cache-only', action='store_true', default=False,
- help='Fetch cached only instances (default: False)')
+ parser = argparse.ArgumentParser(
+ description='Produce an Ansible Inventory file based on a provider')
+ parser.add_argument('--refresh-cache', action='store_true', default=False,
+ help='Fetch cached only instances (default: False)')
parser.add_argument('--list', action='store_true', default=True,
- help='List instances (default: True)')
+ help='List instances (default: True)')
parser.add_argument('--host', action='store', default=False,
- help='Get all the variables about a specific instance')
+ help='Get all the variables about a specific instance')
self.args = parser.parse_args()
def write_to_cache(self):
''' Writes data in JSON format to a file '''
- json_data = self.json_format_dict(self.result, True)
+ json_data = MultiEc2.json_format_dict(self.result, True)
with open(self.cache_path, 'w') as cache:
cache.write(json_data)
@@ -219,7 +238,8 @@ class MultiEc2(object):
return True
- def json_format_dict(self, data, pretty=False):
+ @classmethod
+ def json_format_dict(cls, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
@@ -229,9 +249,9 @@ class MultiEc2(object):
return json.dumps(data)
def result_str(self):
+ '''Return cache string stored in self.result'''
return self.json_format_dict(self.result, True)
if __name__ == "__main__":
- mi = MultiEc2()
- print mi.result_str()
+ print MultiEc2().result_str()
diff --git a/inventory/multi_ec2.yaml.example b/inventory/multi_ec2.yaml.example
index 0bd505816..91e7c7970 100644
--- a/inventory/multi_ec2.yaml.example
+++ b/inventory/multi_ec2.yaml.example
@@ -1,13 +1,13 @@
# multi ec2 inventory configs
accounts:
- name: aws1
- provider: aws/ec2.py
+ provider: aws/hosts/ec2.py
env_vars:
AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
- name: aws2
- provider: aws/ec2.py
+ provider: aws/hosts/ec2.py
env_vars:
AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
diff --git a/inventory/openshift-ansible-inventory.spec b/inventory/openshift-ansible-inventory.spec
new file mode 100644
index 000000000..8267e16f6
--- /dev/null
+++ b/inventory/openshift-ansible-inventory.spec
@@ -0,0 +1,50 @@
+Summary: OpenShift Ansible Inventories
+Name: openshift-ansible-inventory
+Version: 0.0.2
+Release: 1%{?dist}
+License: ASL 2.0
+URL: https://github.com/openshift/openshift-ansible
+Source0: %{name}-%{version}.tar.gz
+Requires: python2
+BuildRequires: python2-devel
+BuildArch: noarch
+
+%description
+Ansible Inventories used with the openshift-ansible scripts and playbooks.
+
+%prep
+%setup -q
+
+%build
+
+%install
+mkdir -p %{buildroot}/etc/ansible
+mkdir -p %{buildroot}/usr/share/ansible/inventory
+mkdir -p %{buildroot}/usr/share/ansible/inventory/aws
+mkdir -p %{buildroot}/usr/share/ansible/inventory/gce
+
+cp -p multi_ec2.py %{buildroot}/usr/share/ansible/inventory
+cp -p multi_ec2.yaml.example %{buildroot}/etc/ansible/multi_ec2.yaml
+cp -p aws/ec2.py aws/ec2.ini %{buildroot}/usr/share/ansible/inventory/aws
+cp -p gce/gce.py %{buildroot}/usr/share/ansible/inventory/gce
+
+%files
+%config(noreplace) /etc/ansible/*
+%dir /usr/share/ansible/inventory
+/usr/share/ansible/inventory/multi_ec2.py*
+/usr/share/ansible/inventory/aws/ec2.py*
+%config(noreplace) /usr/share/ansible/inventory/aws/ec2.ini
+/usr/share/ansible/inventory/gce/gce.py*
+
+%changelog
+* Thu Mar 26 2015 Thomas Wiest <twiest@redhat.com> 0.0.2-1
+- added the ability to have a config file in /etc/openshift_ansible to
+ multi_ec2.py. (twiest@redhat.com)
+- Merge pull request #97 from jwhonce/wip/cluster (jhonce@redhat.com)
+- gce inventory/playbook updates for node registration changes
+ (jdetiber@redhat.com)
+- Various fixes (jdetiber@redhat.com)
+
+* Tue Mar 24 2015 Thomas Wiest <twiest@redhat.com> 0.0.1-1
+- new package built with tito
+
diff --git a/playbooks/adhoc/noc/filter_plugins b/playbooks/adhoc/noc/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/adhoc/noc/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/adhoc/noc/get_zabbix_problems.yml b/playbooks/adhoc/noc/get_zabbix_problems.yml
new file mode 100644
index 000000000..02bffc1d2
--- /dev/null
+++ b/playbooks/adhoc/noc/get_zabbix_problems.yml
@@ -0,0 +1,41 @@
+---
+- name: 'Get current hosts who have triggers that are alerting by trigger description'
+ hosts: localhost
+ gather_facts: no
+ roles:
+ - os_zabbix
+ post_tasks:
+ - assert:
+ that: oo_desc is defined
+
+ - zbxapi:
+ server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php
+ zbx_class: Trigger
+ action: get
+ params:
+ only_true: true
+ output: extend
+ selectHosts: extend
+ searchWildCardsEnabled: 1
+ search:
+ description: "{{ oo_desc }}"
+ register: problems
+
+ - debug: var=problems
+
+ - set_fact:
+ problem_hosts: "{{ problems.results | oo_collect(attribute='hosts') | oo_flatten | oo_collect(attribute='host') | difference(['aggregates']) }}"
+
+ - debug: var=problem_hosts
+
+ - add_host:
+ name: "{{ item }}"
+ groups: problem_hosts_group
+ with_items: problem_hosts
+
+- name: "Run on problem hosts"
+ hosts: problem_hosts_group
+ gather_facts: no
+ tasks:
+ - command: "{{ oo_cmd }}"
+ when: oo_cmd is defined
diff --git a/playbooks/adhoc/noc/roles b/playbooks/adhoc/noc/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/adhoc/noc/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/aws/ansible-tower/launch.yml b/playbooks/aws/ansible-tower/launch.yml
index 4c29fa833..56235bc8a 100644
--- a/playbooks/aws/ansible-tower/launch.yml
+++ b/playbooks/aws/ansible-tower/launch.yml
@@ -6,7 +6,7 @@
vars:
inst_region: us-east-1
- rhel7_ami: ami-a24e30ca
+ rhel7_ami: ami-906240f8
user_data_file: user_data.txt
vars_files:
diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml
new file mode 100644
index 000000000..b8961704e
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/config.yml
@@ -0,0 +1,36 @@
+---
+- name: Populate oo_masters_to_config host group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_masters_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_masters_to_config
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([])
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([])
+ - name: Evaluate oo_first_master
+ add_host:
+ name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}"
+ groups: oo_first_master
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ when: "'tag_env-host-type_{{ cluster_id }}-openshift-master' in groups"
+
+- include: ../../common/openshift-cluster/config.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
+ openshift_hostname: "{{ ec2_private_ip_address }}"
+ openshift_public_hostname: "{{ ec2_ip_address }}"
diff --git a/playbooks/aws/openshift-cluster/filter_plugins b/playbooks/aws/openshift-cluster/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml
new file mode 100644
index 000000000..3eb5496e4
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/launch.yml
@@ -0,0 +1,30 @@
+---
+- name: Launch instance(s)
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ - ["vars.{{ deployment_type }}.{{ cluster_id }}.yml", vars.defaults.yml]
+ tasks:
+ - fail:
+ msg: Deployment type not supported for aws provider yet
+ when: deployment_type == 'enterprise'
+
+ - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ master_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+
+ - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ node_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+
+- include: update.yml
+
+- include: list.yml
diff --git a/playbooks/aws/openshift-cluster/library/ec2_ami_find.py b/playbooks/aws/openshift-cluster/library/ec2_ami_find.py
new file mode 100644
index 000000000..29e594a65
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/library/ec2_ami_find.py
@@ -0,0 +1,302 @@
+#!/usr/bin/python
+#pylint: skip-file
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ec2_ami_find
+version_added: 2.0
+short_description: Searches for AMIs to obtain the AMI ID and other information
+description:
+ - Returns list of matching AMIs with AMI ID, along with other useful information
+ - Can search AMIs with different owners
+ - Can search by matching tag(s), by AMI name and/or other criteria
+ - Results can be sorted and sliced
+author: Tom Bamford
+notes:
+ - This module is not backwards compatible with the previous version of the ec2_search_ami module which worked only for Ubuntu AMIs listed on cloud-images.ubuntu.com.
+ - See the example below for a suggestion of how to search by distro/release.
+options:
+ region:
+ description:
+ - The AWS region to use.
+ required: true
+ aliases: [ 'aws_region', 'ec2_region' ]
+ owner:
+ description:
+ - Search AMIs owned by the specified owner
+ - Can specify an AWS account ID, or one of the special IDs 'self', 'amazon' or 'aws-marketplace'
+ - If not specified, all EC2 AMIs in the specified region will be searched.
+ - You can include wildcards in many of the search options. An asterisk (*) matches zero or more characters, and a question mark (?) matches exactly one character. You can escape special characters using a backslash (\) before the character. For example, a value of \*amazon\?\\ searches for the literal string *amazon?\.
+ required: false
+ default: null
+ ami_id:
+ description:
+ - An AMI ID to match.
+ default: null
+ required: false
+ ami_tags:
+ description:
+ - A hash/dictionary of tags to match for the AMI.
+ default: null
+ required: false
+ architecture:
+ description:
+ - An architecture type to match (e.g. x86_64).
+ default: null
+ required: false
+ hypervisor:
+ description:
+ - A hypervisor type type to match (e.g. xen).
+ default: null
+ required: false
+ is_public:
+ description:
+ - Whether or not the image(s) are public.
+ choices: ['yes', 'no']
+ default: null
+ required: false
+ name:
+ description:
+ - An AMI name to match.
+ default: null
+ required: false
+ platform:
+ description:
+ - Platform type to match.
+ default: null
+ required: false
+ sort:
+ description:
+ - Optional attribute which with to sort the results.
+ - If specifying 'tag', the 'tag_name' parameter is required.
+ choices: ['name', 'description', 'tag']
+ default: null
+ required: false
+ sort_tag:
+ description:
+ - Tag name with which to sort results.
+ - Required when specifying 'sort=tag'.
+ default: null
+ required: false
+ sort_order:
+ description:
+ - Order in which to sort results.
+ - Only used when the 'sort' parameter is specified.
+ choices: ['ascending', 'descending']
+ default: 'ascending'
+ required: false
+ sort_start:
+ description:
+ - Which result to start with (when sorting).
+ - Corresponds to Python slice notation.
+ default: null
+ required: false
+ sort_end:
+ description:
+ - Which result to end with (when sorting).
+ - Corresponds to Python slice notation.
+ default: null
+ required: false
+ state:
+ description:
+ - AMI state to match.
+ default: 'available'
+ required: false
+ virtualization_type:
+ description:
+ - Virtualization type to match (e.g. hvm).
+ default: null
+ required: false
+ no_result_action:
+ description:
+ - What to do when no results are found.
+ - "'success' reports success and returns an empty array"
+ - "'fail' causes the module to report failure"
+ choices: ['success', 'fail']
+ default: 'success'
+ required: false
+requirements:
+ - boto
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Search for the AMI tagged "project:website"
+- ec2_ami_find:
+ owner: self
+ tags:
+ project: website
+ no_result_action: fail
+ register: ami_find
+
+# Search for the latest Ubuntu 14.04 AMI
+- ec2_ami_find:
+ name: "ubuntu/images/ebs/ubuntu-trusty-14.04-amd64-server-*"
+ owner: 099720109477
+ sort: name
+ sort_order: descending
+ sort_end: 1
+ register: ami_find
+
+# Launch an EC2 instance
+- ec2:
+ image: "{{ ami_search.results[0].ami_id }}"
+ instance_type: m3.medium
+ key_name: mykey
+ wait: yes
+'''
+
+try:
+ import boto.ec2
+ HAS_BOTO=True
+except ImportError:
+ HAS_BOTO=False
+
+import json
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ region = dict(required=True,
+ aliases = ['aws_region', 'ec2_region']),
+ owner = dict(required=False, default=None),
+ ami_id = dict(required=False),
+ ami_tags = dict(required=False, type='dict',
+ aliases = ['search_tags', 'image_tags']),
+ architecture = dict(required=False),
+ hypervisor = dict(required=False),
+ is_public = dict(required=False),
+ name = dict(required=False),
+ platform = dict(required=False),
+ sort = dict(required=False, default=None,
+ choices=['name', 'description', 'tag']),
+ sort_tag = dict(required=False),
+ sort_order = dict(required=False, default='ascending',
+ choices=['ascending', 'descending']),
+ sort_start = dict(required=False),
+ sort_end = dict(required=False),
+ state = dict(required=False, default='available'),
+ virtualization_type = dict(required=False),
+ no_result_action = dict(required=False, default='success',
+ choices = ['success', 'fail']),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ )
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module, install via pip or your package manager')
+
+ ami_id = module.params.get('ami_id')
+ ami_tags = module.params.get('ami_tags')
+ architecture = module.params.get('architecture')
+ hypervisor = module.params.get('hypervisor')
+ is_public = module.params.get('is_public')
+ name = module.params.get('name')
+ owner = module.params.get('owner')
+ platform = module.params.get('platform')
+ sort = module.params.get('sort')
+ sort_tag = module.params.get('sort_tag')
+ sort_order = module.params.get('sort_order')
+ sort_start = module.params.get('sort_start')
+ sort_end = module.params.get('sort_end')
+ state = module.params.get('state')
+ virtualization_type = module.params.get('virtualization_type')
+ no_result_action = module.params.get('no_result_action')
+
+ filter = {'state': state}
+
+ if ami_id:
+ filter['image_id'] = ami_id
+ if ami_tags:
+ for tag in ami_tags:
+ filter['tag:'+tag] = ami_tags[tag]
+ if architecture:
+ filter['architecture'] = architecture
+ if hypervisor:
+ filter['hypervisor'] = hypervisor
+ if is_public:
+ filter['is_public'] = is_public
+ if name:
+ filter['name'] = name
+ if platform:
+ filter['platform'] = platform
+ if virtualization_type:
+ filter['virtualization_type'] = virtualization_type
+
+ ec2 = ec2_connect(module)
+
+ images_result = ec2.get_all_images(owners=owner, filters=filter)
+
+ if no_result_action == 'fail' and len(images_result) == 0:
+ module.fail_json(msg="No AMIs matched the attributes: %s" % json.dumps(filter))
+
+ results = []
+ for image in images_result:
+ data = {
+ 'ami_id': image.id,
+ 'architecture': image.architecture,
+ 'description': image.description,
+ 'is_public': image.is_public,
+ 'name': image.name,
+ 'owner_id': image.owner_id,
+ 'platform': image.platform,
+ 'root_device_name': image.root_device_name,
+ 'root_device_type': image.root_device_type,
+ 'state': image.state,
+ 'tags': image.tags,
+ 'virtualization_type': image.virtualization_type,
+ }
+
+ if image.kernel_id:
+ data['kernel_id'] = image.kernel_id
+ if image.ramdisk_id:
+ data['ramdisk_id'] = image.ramdisk_id
+
+ results.append(data)
+
+ if sort == 'tag':
+ if not sort_tag:
+ module.fail_json(msg="'sort_tag' option must be given with 'sort=tag'")
+ results.sort(key=lambda e: e['tags'][sort_tag], reverse=(sort_order=='descending'))
+ elif sort:
+ results.sort(key=lambda e: e[sort], reverse=(sort_order=='descending'))
+
+ try:
+ if sort and sort_start and sort_end:
+ results = results[int(sort_start):int(sort_end)]
+ elif sort and sort_start:
+ results = results[int(sort_start):]
+ elif sort and sort_end:
+ results = results[:int(sort_end)]
+ except TypeError:
+ module.fail_json(msg="Please supply numeric values for sort_start and/or sort_end")
+
+ module.exit_json(results=results)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
+
diff --git a/playbooks/aws/openshift-cluster/list.yml b/playbooks/aws/openshift-cluster/list.yml
new file mode 100644
index 000000000..04fcdc0a1
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/list.yml
@@ -0,0 +1,24 @@
+---
+- name: Generate oo_list_hosts group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - set_fact: scratch_group=tag_env_{{ cluster_id }}
+ when: cluster_id != ''
+ - set_fact: scratch_group=all
+ when: cluster_id == ''
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_list_hosts
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[scratch_group] | default([]) | difference(['localhost'])
+
+- name: List Hosts
+ hosts: oo_list_hosts
+ gather_facts: no
+ tasks:
+ - debug:
+ msg: "public ip:{{ hostvars[inventory_hostname].ec2_ip_address }} private ip:{{ hostvars[inventory_hostname].ec2_private_ip_address }}"
diff --git a/playbooks/aws/openshift-cluster/roles b/playbooks/aws/openshift-cluster/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
new file mode 100644
index 000000000..666a8d1fb
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
@@ -0,0 +1,132 @@
+---
+- set_fact:
+ created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}"
+ docker_vol_ephemeral: "{{ lookup('env', 'os_docker_vol_ephemeral') | default(false, true) }}"
+ env: "{{ cluster }}"
+ env_host_type: "{{ cluster }}-openshift-{{ type }}"
+ host_type: "{{ type }}"
+
+- set_fact:
+ ec2_region: "{{ lookup('env', 'ec2_region')
+ | default(deployment_vars[deployment_type].region, true) }}"
+ when: ec2_region is not defined
+- set_fact:
+ ec2_image_name: "{{ lookup('env', 'ec2_image_name')
+ | default(deployment_vars[deployment_type].image_name, true) }}"
+ when: ec2_image_name is not defined and ec2_image is not defined
+- set_fact:
+ ec2_image: "{{ lookup('env', 'ec2_image')
+ | default(deployment_vars[deployment_type].image, true) }}"
+ when: ec2_image is not defined and not ec2_image_name
+- set_fact:
+ ec2_instance_type: "{{ lookup('env', 'ec2_instance_type')
+ | default(deployment_vars[deployment_type].type, true) }}"
+ when: ec2_instance_type is not defined
+- set_fact:
+ ec2_keypair: "{{ lookup('env', 'ec2_keypair')
+ | default(deployment_vars[deployment_type].keypair, true) }}"
+ when: ec2_keypair is not defined
+- set_fact:
+ ec2_vpc_subnet: "{{ lookup('env', 'ec2_vpc_subnet')
+ | default(deployment_vars[deployment_type].vpc_subnet, true) }}"
+ when: ec2_vpc_subnet is not defined
+- set_fact:
+ ec2_assign_public_ip: "{{ lookup('env', 'ec2_assign_public_ip')
+ | default(deployment_vars[deployment_type].assign_public_ip, true) }}"
+ when: ec2_assign_public_ip is not defined
+- set_fact:
+ ec2_security_groups: "{{ lookup('env', 'ec2_security_groups')
+ | default(deployment_vars[deployment_type].security_groups, true) }}"
+ when: ec2_security_groups is not defined
+
+- name: Find amis for deployment_type
+ ec2_ami_find:
+ region: "{{ ec2_region }}"
+ ami_id: "{{ ec2_image | default(omit, true) }}"
+ name: "{{ ec2_image_name | default(omit, true) }}"
+ register: ami_result
+
+- fail: msg="Could not find requested ami"
+ when: not ami_result.results
+
+- set_fact:
+ latest_ami: "{{ ami_result.results | oo_ami_selector(ec2_image_name) }}"
+ user_data: "{{ lookup('template', '../templates/user_data.j2') if type == 'node' else None | default('omit') }}"
+ volume_defs:
+ master:
+ root:
+ volume_size: "{{ lookup('env', 'os_master_root_vol_size') | default(25, true) }}"
+ device_type: "{{ lookup('env', 'os_master_root_vol_type') | default('gp2', true) }}"
+ iops: "{{ lookup('env', 'os_master_root_vol_iops') | default(500, true) }}"
+ node:
+ root:
+ volume_size: "{{ lookup('env', 'os_node_root_vol_size') | default(25, true) }}"
+ device_type: "{{ lookup('env', 'os_node_root_vol_type') | default('gp2', true) }}"
+ iops: "{{ lookup('env', 'os_node_root_vol_iops') | default(500, true) }}"
+ docker:
+ volume_size: "{{ lookup('env', 'os_docker_vol_size') | default(32, true) }}"
+ device_type: "{{ lookup('env', 'os_docker_vol_type') | default('gp2', true) }}"
+ iops: "{{ lookup('env', 'os_docker_vol_iops') | default(500, true) }}"
+
+- set_fact:
+ volumes: "{{ volume_defs | oo_ec2_volume_definition(host_type, docker_vol_ephemeral | bool) }}"
+
+- name: Launch instance(s)
+ ec2:
+ state: present
+ region: "{{ ec2_region }}"
+ keypair: "{{ ec2_keypair }}"
+ group: "{{ ec2_security_groups }}"
+ instance_type: "{{ ec2_instance_type }}"
+ image: "{{ latest_ami }}"
+ count: "{{ instances | oo_len }}"
+ vpc_subnet_id: "{{ ec2_vpc_subnet | default(omit, true) }}"
+ assign_public_ip: "{{ ec2_assign_public_ip | default(omit, true) }}"
+ user_data: "{{ user_data }}"
+ wait: yes
+ instance_tags:
+ created-by: "{{ created_by }}"
+ env: "{{ env }}"
+ host-type: "{{ host_type }}"
+ env-host-type: "{{ env_host_type }}"
+ volumes: "{{ volumes }}"
+ register: ec2
+
+- name: Add Name tag to instances
+ ec2_tag: resource={{ item.1.id }} region={{ ec2_region }} state=present
+ with_together:
+ - instances
+ - ec2.instances
+ args:
+ tags:
+ Name: "{{ item.0 }}"
+
+- set_fact:
+ instance_groups: tag_created-by_{{ created_by }}, tag_env_{{ env }}, tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }}
+
+- name: Add new instances groups and variables
+ add_host:
+ hostname: "{{ item.0 }}"
+ ansible_ssh_host: "{{ item.1.dns_name }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: "{{ instance_groups }}"
+ ec2_private_ip_address: "{{ item.1.private_ip }}"
+ ec2_ip_address: "{{ item.1.public_ip }}"
+ with_together:
+ - instances
+ - ec2.instances
+
+- name: Wait for ssh
+ wait_for: "port=22 host={{ item.dns_name }}"
+ with_items: ec2.instances
+
+- name: Wait for user setup
+ command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.0].ansible_ssh_user }}@{{ item.1.dns_name }} echo {{ hostvars[item.0].ansible_ssh_user }} user is setup"
+ register: result
+ until: result.rc == 0
+ retries: 20
+ delay: 10
+ with_together:
+ - instances
+ - ec2.instances
diff --git a/playbooks/aws/openshift-cluster/templates/user_data.j2 b/playbooks/aws/openshift-cluster/templates/user_data.j2
new file mode 100644
index 000000000..7dbc8f552
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/templates/user_data.j2
@@ -0,0 +1,29 @@
+#cloud-config
+yum_repos:
+ jdetiber-copr:
+ name: Copr repo for origin owned by jdetiber
+ baseurl: https://copr-be.cloud.fedoraproject.org/results/jdetiber/origin/epel-7-$basearch/
+ skip_if_unavailable: true
+ gpgcheck: true
+ gpgkey: https://copr-be.cloud.fedoraproject.org/results/jdetiber/origin/pubkey.gpg
+ enabled: true
+
+packages:
+- xfsprogs # can be dropped after docker-storage-setup properly requires it: https://github.com/projectatomic/docker-storage-setup/pull/8
+- docker-storage-setup
+
+mounts:
+- [ xvdb ]
+- [ ephemeral0 ]
+
+write_files:
+- content: |
+ DEVS=/dev/xvdb
+ VG=docker_vg
+ path: /etc/sysconfig/docker-storage-setup
+ owner: root:root
+ permissions: '0644'
+
+runcmd:
+- systemctl daemon-reload
+- systemctl enable lvm2-lvmetad.service docker-storage-setup.service
diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml
new file mode 100644
index 000000000..617d0d456
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/terminate.yml
@@ -0,0 +1,16 @@
+---
+- name: Terminate instance(s)
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - set_fact: scratch_group=tag_env_{{ cluster_id }}
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_hosts_to_terminate
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[scratch_group] | default([]) | difference(['localhost'])
+
+- include: ../terminate.yml
diff --git a/playbooks/aws/openshift-cluster/update.yml b/playbooks/aws/openshift-cluster/update.yml
new file mode 100644
index 000000000..5e7ab4e58
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/update.yml
@@ -0,0 +1,18 @@
+---
+- name: Populate oo_hosts_to_update group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_hosts_to_update
+ add_host:
+ name: "{{ item }}"
+ groups: oo_hosts_to_update
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"]) | default([])
+
+- include: ../../common/openshift-cluster/update_repos_and_packages.yml
+
+- include: config.yml
diff --git a/playbooks/aws/openshift-cluster/vars.defaults.yml b/playbooks/aws/openshift-cluster/vars.defaults.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/vars.defaults.yml
@@ -0,0 +1 @@
+---
diff --git a/playbooks/aws/openshift-cluster/vars.online.int.yml b/playbooks/aws/openshift-cluster/vars.online.int.yml
new file mode 100644
index 000000000..12f79a9c1
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/vars.online.int.yml
@@ -0,0 +1,9 @@
+---
+ec2_image: ami-906240f8
+ec2_image_name: libra-ops-rhel7*
+ec2_region: us-east-1
+ec2_keypair: mmcgrath_libra
+ec2_instance_type: m3.large
+ec2_security_groups: [ 'int-v3' ]
+ec2_vpc_subnet: subnet-987c0def
+ec2_assign_public_ip: yes
diff --git a/playbooks/aws/openshift-cluster/vars.online.prod.yml b/playbooks/aws/openshift-cluster/vars.online.prod.yml
new file mode 100644
index 000000000..12f79a9c1
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/vars.online.prod.yml
@@ -0,0 +1,9 @@
+---
+ec2_image: ami-906240f8
+ec2_image_name: libra-ops-rhel7*
+ec2_region: us-east-1
+ec2_keypair: mmcgrath_libra
+ec2_instance_type: m3.large
+ec2_security_groups: [ 'int-v3' ]
+ec2_vpc_subnet: subnet-987c0def
+ec2_assign_public_ip: yes
diff --git a/playbooks/aws/openshift-cluster/vars.online.stage.yml b/playbooks/aws/openshift-cluster/vars.online.stage.yml
new file mode 100644
index 000000000..12f79a9c1
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/vars.online.stage.yml
@@ -0,0 +1,9 @@
+---
+ec2_image: ami-906240f8
+ec2_image_name: libra-ops-rhel7*
+ec2_region: us-east-1
+ec2_keypair: mmcgrath_libra
+ec2_instance_type: m3.large
+ec2_security_groups: [ 'int-v3' ]
+ec2_vpc_subnet: subnet-987c0def
+ec2_assign_public_ip: yes
diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml
new file mode 100644
index 000000000..07e453f89
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/vars.yml
@@ -0,0 +1,38 @@
+---
+deployment_vars:
+ origin:
+ # fedora, since centos requires marketplace
+ image: ami-acd999c4
+ image_name:
+ region: us-east-1
+ ssh_user: fedora
+ sudo: yes
+ keypair: libra
+ type: m3.large
+ security_groups: [ 'public' ]
+ vpc_subnet:
+ assign_public_ip:
+ online:
+ # private ami
+ image: ami-7a9e9812
+ image_name: openshift-rhel7_*
+ region: us-east-1
+ ssh_user: root
+ sudo: no
+ keypair: libra
+ type: m3.large
+ security_groups: [ 'public' ]
+ vpc_subnet:
+ assign_public_ip:
+ enterprise:
+ # rhel-7.1, requires cloud access subscription
+ image: ami-10663b78
+ image_name:
+ region: us-east-1
+ ssh_user: ec2-user
+ sudo: yes
+ keypair: libra
+ type: m3.large
+ security_groups: [ 'public' ]
+ vpc_subnet:
+ assign_public_ip:
diff --git a/playbooks/aws/openshift-master/config.yml b/playbooks/aws/openshift-master/config.yml
index 454cd6f24..37ab4fbe6 100644
--- a/playbooks/aws/openshift-master/config.yml
+++ b/playbooks/aws/openshift-master/config.yml
@@ -1,42 +1,19 @@
---
-- name: "populate oo_hosts_to_config host group if needed"
+- name: Populate oo_masters_to_config host group
hosts: localhost
gather_facts: no
tasks:
- - name: "Evaluate oo_host_group_exp if it's set"
- add_host: "name={{ item }} groups=oo_hosts_to_config"
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
+ - name: Evaluate oo_masters_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_masters_to_config
+ ansible_ssh_user: root
+ with_items: oo_host_group_exp | default([])
-- name: "Gather facts for nodes in {{ oo_env }}"
- hosts: "tag_env-host-type_{{ oo_env }}-openshift-node"
- connection: ssh
- user: root
-
-- name: "Set Origin specific facts on localhost (for later use)"
- hosts: localhost
- gather_facts: no
- tasks:
- - name: Setting openshift_node_ips fact on localhost
- set_fact:
- openshift_node_ips: "{{ hostvars
- | oo_select_keys(groups['tag_env-host-type_' + oo_env + '-openshift-node'])
- | oo_collect(attribute='ansible_default_ipv4.address') }}"
- when: groups['tag_env-host-type_' + oo_env + '-openshift-node'] is defined
-
-- name: "Configure instances"
- hosts: oo_hosts_to_config
- connection: ssh
- user: root
- vars_files:
- - vars.yml
- roles:
- - repos
- - {
- role: openshift_master,
- openshift_node_ips: "{{ hostvars['localhost'].openshift_node_ips | default(['']) }}",
- openshift_env: "{{ oo_env }}"
- openshift_public_ip: "{{ ec2_ip_address }}"
- }
- - pods
- - os_env_extras
+- include: ../../common/openshift-master/config.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
+ openshift_hostname: "{{ ec2_private_ip_address }}"
+ openshift_public_hostname: "{{ ec2_ip_address }}"
diff --git a/playbooks/aws/openshift-master/launch.yml b/playbooks/aws/openshift-master/launch.yml
index a889b93be..6b3751682 100644
--- a/playbooks/aws/openshift-master/launch.yml
+++ b/playbooks/aws/openshift-master/launch.yml
@@ -4,14 +4,12 @@
connection: local
gather_facts: no
+# TODO: modify atomic_ami based on deployment_type
vars:
inst_region: us-east-1
atomic_ami: ami-86781fee
user_data_file: user_data.txt
- vars_files:
- - vars.yml
-
tasks:
- name: Launch instances
ec2:
@@ -40,21 +38,24 @@
Name: "{{ item.0 }}"
- name: Add other tags to instances
- ec2_tag: "resource={{ item.id }} region={{ inst_region }} state=present"
+ ec2_tag: resource={{ item.id }} region={{ inst_region }} state=present
with_items: ec2.instances
args:
tags: "{{ oo_new_inst_tags }}"
- - name: Add new instances public IPs to oo_hosts_to_config
- add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.dns_name }} groupname=oo_hosts_to_config"
+ - name: Add new instances public IPs to oo_masters_to_config
+ add_host:
+ hostname: "{{ item.0 }}"
+ ansible_ssh_host: "{{ item.1.dns_name }}"
+ groupname: oo_masters_to_config
+ ec2_private_ip_address: "{{ item.1.private_ip }}"
+ ec2_ip_address: "{{ item.1.public_ip }}"
with_together:
- oo_new_inst_names
- ec2.instances
- - debug: var=ec2
-
- name: Wait for ssh
- wait_for: "port=22 host={{ item.dns_name }}"
+ wait_for: port=22 host={{ item.dns_name }}
with_items: ec2.instances
- name: Wait for root user setup
diff --git a/playbooks/aws/openshift-master/terminate.yml b/playbooks/aws/openshift-master/terminate.yml
new file mode 100644
index 000000000..07d9961bc
--- /dev/null
+++ b/playbooks/aws/openshift-master/terminate.yml
@@ -0,0 +1,2 @@
+---
+- include: ../terminate.yml
diff --git a/playbooks/aws/openshift-master/vars.yml b/playbooks/aws/openshift-master/vars.yml
deleted file mode 100644
index fb5f4ea42..000000000
--- a/playbooks/aws/openshift-master/vars.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-openshift_debug_level: 4
diff --git a/playbooks/aws/openshift-node/config.yml b/playbooks/aws/openshift-node/config.yml
index 9662168c4..fc9b397b4 100644
--- a/playbooks/aws/openshift-node/config.yml
+++ b/playbooks/aws/openshift-node/config.yml
@@ -1,49 +1,25 @@
---
-- name: "populate oo_hosts_to_config host group if needed"
+- name: Populate oo_nodes_to_config and oo_first_master host groups
hosts: localhost
gather_facts: no
tasks:
- - name: Evaluate oo_host_group_exp
- add_host: "name={{ item }} groups=oo_hosts_to_config"
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ ansible_ssh_user: root
+ with_items: oo_host_group_exp | default([])
+ - name: Evaluate oo_first_master
+ add_host:
+ name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}"
+ groups: oo_first_master
+ ansible_ssh_user: root
-- name: "Gather facts for masters in {{ oo_env }}"
- hosts: "tag_env-host-type_{{ oo_env }}-openshift-master"
- connection: ssh
- user: root
-- name: "Set OO sepcific facts on localhost (for later use)"
- hosts: localhost
- gather_facts: no
- tasks:
- - name: Setting openshift_master_ips fact on localhost
- set_fact:
- openshift_master_ips: "{{ hostvars
- | oo_select_keys(groups['tag_env-host-type_' + oo_env + '-openshift-master'])
- | oo_collect(attribute='ansible_default_ipv4.address') }}"
- when: groups['tag_env-host-type_' + oo_env + '-openshift-master'] is defined
- - name: Setting openshift_master_public_ips fact on localhost
- set_fact:
- openshift_master_public_ips: "{{ hostvars
- | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-master'])
- | oo_collect(attribute='ec2_ip_address') }}"
- when: groups['tag_env-host-type-' + oo_env + '-openshift-master'] is defined
-
-- name: "Configure instances"
- hosts: oo_hosts_to_config
- connection: ssh
- user: root
- vars_files:
- - vars.yml
- roles:
- - repos
- - docker
- - {
- role: openshift_node,
- openshift_master_ips: "{{ hostvars['localhost'].openshift_master_ips | default(['']) }}",
- openshift_master_public_ips: "{{ hostvars['localhost'].openshift_master_public_ips | default(['']) }}",
- openshift_env: "{{ oo_env }}"
- openshift_public_ip: "{{ ec2_ip_address }}"
- }
- - os_env_extras
+- include: ../../common/openshift-node/config.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
+ openshift_hostname: "{{ ec2_private_ip_address }}"
+ openshift_public_hostname: "{{ ec2_ip_address }}"
diff --git a/playbooks/aws/openshift-node/launch.yml b/playbooks/aws/openshift-node/launch.yml
index a889b93be..36aee14ff 100644
--- a/playbooks/aws/openshift-node/launch.yml
+++ b/playbooks/aws/openshift-node/launch.yml
@@ -4,14 +4,12 @@
connection: local
gather_facts: no
+# TODO: modify atomic_ami based on deployment_type
vars:
inst_region: us-east-1
atomic_ami: ami-86781fee
user_data_file: user_data.txt
- vars_files:
- - vars.yml
-
tasks:
- name: Launch instances
ec2:
@@ -27,11 +25,13 @@
register: ec2
- name: Add new instances public IPs to the atomic proxy host group
- add_host: "hostname={{ item.public_ip }} groupname=new_ec2_instances"
+ add_host:
+ hostname: "{{ item.public_ip }}"
+ groupname: new_ec2_instances"
with_items: ec2.instances
- name: Add Name and environment tags to instances
- ec2_tag: "resource={{ item.1.id }} region={{ inst_region }} state=present"
+ ec2_tag: resource={{ item.1.id }} region={{ inst_region }} state=present
with_together:
- oo_new_inst_names
- ec2.instances
@@ -40,21 +40,24 @@
Name: "{{ item.0 }}"
- name: Add other tags to instances
- ec2_tag: "resource={{ item.id }} region={{ inst_region }} state=present"
+ ec2_tag: resource={{ item.id }} region={{ inst_region }} state=present
with_items: ec2.instances
args:
tags: "{{ oo_new_inst_tags }}"
- - name: Add new instances public IPs to oo_hosts_to_config
- add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.dns_name }} groupname=oo_hosts_to_config"
+ - name: Add new instances public IPs to oo_nodes_to_config
+ add_host:
+ hostname: "{{ item.0 }}"
+ ansible_ssh_host: "{{ item.1.dns_name }}"
+ groupname: oo_nodes_to_config
+ ec2_private_ip_address: "{{ item.1.private_ip }}"
+ ec2_ip_address: "{{ item.1.public_ip }}"
with_together:
- oo_new_inst_names
- ec2.instances
- - debug: var=ec2
-
- name: Wait for ssh
- wait_for: "port=22 host={{ item.dns_name }}"
+ wait_for: port=22 host={{ item.dns_name }}
with_items: ec2.instances
- name: Wait for root user setup
diff --git a/playbooks/aws/openshift-node/terminate.yml b/playbooks/aws/openshift-node/terminate.yml
new file mode 100644
index 000000000..07d9961bc
--- /dev/null
+++ b/playbooks/aws/openshift-node/terminate.yml
@@ -0,0 +1,2 @@
+---
+- include: ../terminate.yml
diff --git a/playbooks/aws/openshift-node/vars.yml b/playbooks/aws/openshift-node/vars.yml
deleted file mode 100644
index fb5f4ea42..000000000
--- a/playbooks/aws/openshift-node/vars.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-openshift_debug_level: 4
diff --git a/playbooks/aws/terminate.yml b/playbooks/aws/terminate.yml
new file mode 100644
index 000000000..e9767b260
--- /dev/null
+++ b/playbooks/aws/terminate.yml
@@ -0,0 +1,64 @@
+---
+- name: Populate oo_hosts_to_terminate host group
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Evaluate oo_hosts_to_terminate
+ add_host: name={{ item }} groups=oo_hosts_to_terminate
+ with_items: oo_host_group_exp | default([])
+
+- name: Gather dynamic inventory variables for hosts to terminate
+ hosts: oo_hosts_to_terminate
+ gather_facts: no
+
+- name: Terminate instances
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ vars:
+ host_vars: "{{ hostvars
+ | oo_select_keys(groups['oo_hosts_to_terminate']) }}"
+ tasks:
+ - name: Remove tags from instances
+ ec2_tag: resource={{ item.ec2_id }} region={{ item.ec2_region }} state=absent
+ args:
+ tags:
+ env: "{{ item['ec2_tag_env'] }}"
+ host-type: "{{ item['ec2_tag_host-type'] }}"
+ env-host-type: "{{ item['ec2_tag_env-host-type'] }}"
+ with_items: host_vars
+ when: "'oo_hosts_to_terminate' in groups"
+
+ - name: Terminate instances
+ ec2:
+ state: absent
+ instance_ids: ["{{ item.ec2_id }}"]
+ region: "{{ item.ec2_region }}"
+ ignore_errors: yes
+ register: ec2_term
+ with_items: host_vars
+ when: "'oo_hosts_to_terminate' in groups"
+
+ # Fail if any of the instances failed to terminate with an error other
+ # than 403 Forbidden
+ - fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }}
+ when: "'oo_hosts_to_terminate' in groups and item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")"
+ with_items: ec2_term.results
+
+ - name: Stop instance if termination failed
+ ec2:
+ state: stopped
+ instance_ids: ["{{ item.item.ec2_id }}"]
+ region: "{{ item.item.ec2_region }}"
+ register: ec2_stop
+ when: item.failed
+ with_items: ec2_term.results
+ when: "'oo_hosts_to_terminate' in groups"
+
+ - name: Rename stopped instances
+ ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
+ args:
+ tags:
+ Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
+ with_items: ec2_stop.results
+ when: "'oo_hosts_to_terminate' in groups"
diff --git a/playbooks/byo/config.yml b/playbooks/byo/config.yml
new file mode 100644
index 000000000..dce49d32f
--- /dev/null
+++ b/playbooks/byo/config.yml
@@ -0,0 +1,6 @@
+---
+- name: Run the openshift-master config playbook
+ include: openshift-master/config.yml
+
+- name: Run the openshift-node config playbook
+ include: openshift-node/config.yml
diff --git a/playbooks/byo/filter_plugins b/playbooks/byo/filter_plugins
new file mode 120000
index 000000000..a4f518f07
--- /dev/null
+++ b/playbooks/byo/filter_plugins
@@ -0,0 +1 @@
+../../filter_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-master/config.yml b/playbooks/byo/openshift-master/config.yml
new file mode 100644
index 000000000..f61d277c6
--- /dev/null
+++ b/playbooks/byo/openshift-master/config.yml
@@ -0,0 +1,15 @@
+---
+- name: Populate oo_masters_to_config host group
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_masters_to_config
+ with_items: groups['masters']
+
+- include: ../../common/openshift-master/config.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-master/filter_plugins b/playbooks/byo/openshift-master/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/byo/openshift-master/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-master/roles b/playbooks/byo/openshift-master/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/byo/openshift-master/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-node/config.yml b/playbooks/byo/openshift-node/config.yml
new file mode 100644
index 000000000..d569827b4
--- /dev/null
+++ b/playbooks/byo/openshift-node/config.yml
@@ -0,0 +1,21 @@
+---
+- name: Populate oo_nodes_to_config and oo_first_master host groups
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ with_items: groups.nodes
+ - name: Evaluate oo_first_master
+ add_host:
+ name: "{{ groups.masters[0] }}"
+ groups: oo_first_master
+
+
+- include: ../../common/openshift-node/config.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-node/filter_plugins b/playbooks/byo/openshift-node/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/byo/openshift-node/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-node/roles b/playbooks/byo/openshift-node/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/byo/openshift-node/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml
new file mode 100644
index 000000000..cd282270f
--- /dev/null
+++ b/playbooks/byo/openshift_facts.yml
@@ -0,0 +1,10 @@
+---
+- name: Gather OpenShift facts
+ hosts: all
+ gather_facts: no
+ roles:
+ - openshift_facts
+ tasks:
+ - openshift_facts:
+ register: result
+ - debug: var=result
diff --git a/playbooks/byo/roles b/playbooks/byo/roles
new file mode 120000
index 000000000..b741aa3db
--- /dev/null
+++ b/playbooks/byo/roles
@@ -0,0 +1 @@
+../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
new file mode 100644
index 000000000..14ffa928f
--- /dev/null
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -0,0 +1,4 @@
+---
+- include: ../openshift-master/config.yml
+
+- include: ../openshift-node/config.yml
diff --git a/playbooks/common/openshift-cluster/filter_plugins b/playbooks/common/openshift-cluster/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/common/openshift-cluster/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/roles b/playbooks/common/openshift-cluster/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/common/openshift-cluster/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml
new file mode 100644
index 000000000..118727273
--- /dev/null
+++ b/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml
@@ -0,0 +1,11 @@
+---
+- set_fact: k8s_type="master"
+
+- name: Generate master instance names(s)
+ set_fact:
+ scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
+ register: master_names_output
+ with_sequence: start=1 end={{ num_masters }}
+
+- set_fact:
+ master_names: "{{ master_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}"
diff --git a/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml
new file mode 100644
index 000000000..162315d46
--- /dev/null
+++ b/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml
@@ -0,0 +1,11 @@
+---
+- set_fact: k8s_type="node"
+
+- name: Generate node instance names(s)
+ set_fact:
+ scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
+ register: node_names_output
+ with_sequence: start=1 end={{ num_nodes }}
+
+- set_fact:
+ node_names: "{{ node_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}"
diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
new file mode 100644
index 000000000..e92c6f1ee
--- /dev/null
+++ b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
@@ -0,0 +1,7 @@
+---
+- hosts: oo_hosts_to_update
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
+ roles:
+ - openshift_repos
+ - os_update_latest
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
new file mode 100644
index 000000000..05822d118
--- /dev/null
+++ b/playbooks/common/openshift-master/config.yml
@@ -0,0 +1,19 @@
+---
+- name: Configure master instances
+ hosts: oo_masters_to_config
+ vars:
+ openshift_sdn_master_url: https://{{ openshift.common.hostname }}:4001
+ roles:
+ - openshift_master
+ - { role: openshift_sdn_master, when: openshift.common.use_openshift_sdn | bool }
+ tasks:
+ - name: Create group for deployment type
+ group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
+ changed_when: False
+
+# Additional instance config for online deployments
+- name: Additional instance config
+ hosts: oo_masters_deployment_type_online
+ roles:
+ - pods
+ - os_env_extras
diff --git a/playbooks/common/openshift-master/filter_plugins b/playbooks/common/openshift-master/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/common/openshift-master/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-master/roles b/playbooks/common/openshift-master/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/playbooks/common/openshift-master/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
new file mode 100644
index 000000000..5a6c89489
--- /dev/null
+++ b/playbooks/common/openshift-node/config.yml
@@ -0,0 +1,127 @@
+---
+- name: Gather and set facts for node hosts
+ hosts: oo_nodes_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ # Since the master is registering the nodes before they are configured, we
+ # need to make sure to set the node properties beforehand if we do not want
+ # the defaults
+ - openshift_facts:
+ role: "{{ item.role }}"
+ local_facts: "{{ item.local_facts }}"
+ with_items:
+ - role: common
+ local_facts:
+ hostname: "{{ openshift_hostname | default(None) }}"
+ public_hostname: "{{ openshift_public_hostname | default(None) }}"
+ - role: node
+ local_facts:
+ external_id: "{{ openshift_node_external_id | default(None) }}"
+ resources_cpu: "{{ openshift_node_resources_cpu | default(None) }}"
+ resources_memory: "{{ openshift_node_resources_memory | default(None) }}"
+ pod_cidr: "{{ openshift_node_pod_cidr | default(None) }}"
+ labels: "{{ openshift_node_labels | default(None) }}"
+ annotations: "{{ openshift_node_annotations | default(None) }}"
+ deployment_type: "{{ openshift_deployment_type }}"
+
+
+- name: Create temp directory for syncing certs
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Create local temp directory for syncing certs
+ local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: mktemp
+ changed_when: False
+
+
+- name: Register nodes
+ hosts: oo_first_master
+ vars:
+ openshift_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) }}"
+ sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
+ roles:
+ - openshift_register_nodes
+ tasks:
+ # TODO: update so that we only sync necessary configs/directories, currently
+ # we sync for all nodes in oo_nodes_to_config. We will need to inspect the
+ # configs on the nodes to make the determination on whether to sync or not.
+ - name: Create the temp directory on the master
+ file:
+ path: "{{ sync_tmpdir }}"
+ owner: "{{ ansible_ssh_user }}"
+ mode: 0700
+ state: directory
+ changed_when: False
+
+ - name: Create a tarball of the node config directories
+ command: tar -czvf {{ sync_tmpdir }}/{{ item.openshift.common.hostname }}.tgz ./
+ args:
+ chdir: "{{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}"
+ with_items: openshift_nodes
+ changed_when: False
+
+ - name: Retrieve the node config tarballs from the master
+ fetch:
+ src: "{{ sync_tmpdir }}/{{ item.openshift.common.hostname }}.tgz"
+ dest: "{{ sync_tmpdir }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+ with_items: openshift_nodes
+ changed_when: False
+
+
+- name: Configure node instances
+ hosts: oo_nodes_to_config
+ gather_facts: no
+ vars:
+ sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
+ openshift_sdn_master_url: "https://{{ hostvars[groups['oo_first_master'][0]].openshift.common.hostname }}:4001"
+ pre_tasks:
+ - name: Ensure certificate directory exists
+ file:
+ path: "{{ openshift_node_cert_dir }}"
+ state: directory
+
+ # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
+ # possibly test service started time against certificate/config file
+ # timestamps in openshift-node or openshift-sdn-node to trigger notify
+ - name: Unarchive the tarball on the node
+ unarchive:
+ src: "{{ sync_tmpdir }}/{{ openshift.common.hostname }}.tgz"
+ dest: "{{ openshift_node_cert_dir }}"
+ roles:
+ - openshift_node
+ - { role: openshift_sdn_node, when: openshift.common.use_openshift_sdn | bool }
+ tasks:
+ - name: Create group for deployment type
+ group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }}
+ changed_when: False
+
+- name: Delete the temporary directory on the master
+ hosts: oo_first_master
+ gather_facts: no
+ vars:
+ sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
+ tasks:
+ - file: name={{ sync_tmpdir }} state=absent
+ changed_when: False
+
+
+- name: Delete temporary directory on localhost
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - file: name={{ mktemp.stdout }} state=absent
+ changed_when: False
+
+
+# Additional config for online type deployments
+- name: Additional instance config
+ hosts: oo_nodes_deployment_type_online
+ gather_facts: no
+ roles:
+ - os_env_extras
+ - os_env_extras_node
diff --git a/playbooks/common/openshift-node/filter_plugins b/playbooks/common/openshift-node/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/common/openshift-node/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-node/roles b/playbooks/common/openshift-node/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/playbooks/common/openshift-node/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml
new file mode 100644
index 000000000..8b8490246
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/config.yml
@@ -0,0 +1,37 @@
+---
+# TODO: fix firewall related bug with GCE and origin, since GCE is overriding
+# /etc/sysconfig/iptables
+- name: Populate oo_masters_to_config host group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_masters_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_masters_to_config
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([])
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([])
+ - name: Evaluate oo_first_master
+ add_host:
+ name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
+ groups: oo_first_master
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups"
+
+- include: ../../common/openshift-cluster/config.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
+ openshift_hostname: "{{ gce_private_ip }}"
diff --git a/playbooks/gce/openshift-cluster/filter_plugins b/playbooks/gce/openshift-cluster/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml
new file mode 100644
index 000000000..771f51e91
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/launch.yml
@@ -0,0 +1,28 @@
+---
+- name: Launch instance(s)
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - fail: msg="Deployment type not supported for gce provider yet"
+ when: deployment_type == 'enterprise'
+
+ - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ master_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+
+ - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ node_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+
+- include: update.yml
+
+- include: list.yml
diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml
new file mode 100644
index 000000000..962381306
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/list.yml
@@ -0,0 +1,24 @@
+---
+- name: Generate oo_list_hosts group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - set_fact: scratch_group=tag_env-{{ cluster_id }}
+ when: cluster_id != ''
+ - set_fact: scratch_group=all
+ when: cluster_id == ''
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_list_hosts
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
+
+- name: List Hosts
+ hosts: oo_list_hosts
+ gather_facts: no
+ tasks:
+ - debug:
+ msg: "public ip:{{ hostvars[inventory_hostname].gce_public_ip }} private ip:{{ hostvars[inventory_hostname].gce_private_ip }}"
diff --git a/playbooks/gce/openshift-cluster/roles b/playbooks/gce/openshift-cluster/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
new file mode 100644
index 000000000..9a9848f05
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
@@ -0,0 +1,41 @@
+---
+# TODO: when we are ready to go to ansible 1.9+ support only, we can update to
+# the gce task to use the disk_auto_delete parameter to avoid having to delete
+# the disk as a separate step on termination
+- name: Launch instance(s)
+ gce:
+ instance_names: "{{ instances }}"
+ machine_type: "{{ lookup('env', 'gce_machine_type') | default('n1-standard-1', true) }}"
+ image: "{{ lookup('env', 'gce_machine_image') | default(deployment_vars[deployment_type].image, true) }}"
+ service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+ pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+ project_id: "{{ lookup('env', 'gce_project_id') }}"
+ tags:
+ - created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }}
+ - env-{{ cluster }}
+ - host-type-{{ type }}
+ - env-host-type-{{ cluster }}-openshift-{{ type }}
+ register: gce
+
+- name: Add new instances to groups and set variables needed
+ add_host:
+ hostname: "{{ item.name }}"
+ ansible_ssh_host: "{{ item.public_ip }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}"
+ gce_public_ip: "{{ item.public_ip }}"
+ gce_private_ip: "{{ item.private_ip }}"
+ with_items: gce.instance_data
+
+- name: Wait for ssh
+ wait_for: port=22 host={{ item.public_ip }}
+ with_items: gce.instance_data
+
+- name: Wait for user setup
+ command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.name].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.name].ansible_ssh_user }} user is setup"
+ register: result
+ until: result.rc == 0
+ retries: 20
+ delay: 10
+ with_items: gce.instance_data
diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml
new file mode 100644
index 000000000..abe6a4c95
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/terminate.yml
@@ -0,0 +1,34 @@
+---
+- name: Terminate instance(s)
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-node
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_terminate
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
+
+ - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-master
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_masters_to_terminate
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
+
+- include: ../openshift-node/terminate.yml
+ vars:
+ gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+ gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+ gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
+
+- include: ../openshift-master/terminate.yml
+ vars:
+ gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+ gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+ gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
diff --git a/playbooks/gce/openshift-cluster/update.yml b/playbooks/gce/openshift-cluster/update.yml
new file mode 100644
index 000000000..9ebf39a13
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/update.yml
@@ -0,0 +1,18 @@
+---
+- name: Populate oo_hosts_to_update group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_hosts_to_update
+ add_host:
+ name: "{{ item }}"
+ groups: oo_hosts_to_update
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]) | default([])
+
+- include: ../../common/openshift-cluster/update_repos_and_packages.yml
+
+- include: config.yml
diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml
new file mode 100644
index 000000000..ae33083b9
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/vars.yml
@@ -0,0 +1,15 @@
+---
+deployment_vars:
+ origin:
+ image: centos-7
+ ssh_user:
+ sudo: yes
+ online:
+ image: libra-rhel7
+ ssh_user: root
+ sudo: no
+ enterprise:
+ image: rhel-7
+ ssh_user:
+ sudo: yes
+
diff --git a/playbooks/gce/openshift-master/config.yml b/playbooks/gce/openshift-master/config.yml
index ae598b622..af6000bc8 100644
--- a/playbooks/gce/openshift-master/config.yml
+++ b/playbooks/gce/openshift-master/config.yml
@@ -1,42 +1,18 @@
---
-- name: "populate oo_hosts_to_config host group if needed"
+- name: Populate oo_masters_to_config host group
hosts: localhost
gather_facts: no
tasks:
- - name: "Evaluate oo_host_group_exp if it's set"
- add_host: "name={{ item }} groups=oo_hosts_to_config"
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
+ - name: Evaluate oo_masters_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_masters_to_config
+ ansible_ssh_user: root
+ with_items: oo_host_group_exp | default([])
-- name: "Gather facts for nodes in {{ oo_env }}"
- hosts: "tag_env-host-type-{{ oo_env }}-openshift-node"
- connection: ssh
- user: root
-
-- name: "Set Origin specific facts on localhost (for later use)"
- hosts: localhost
- gather_facts: no
- tasks:
- - name: Setting openshift_node_ips fact on localhost
- set_fact:
- openshift_node_ips: "{{ hostvars
- | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-node'])
- | oo_collect(attribute='ansible_default_ipv4.address') }}"
- when: groups['tag_env-host-type-' + oo_env + '-openshift-node'] is defined
-
-- name: "Configure instances"
- hosts: oo_hosts_to_config
- connection: ssh
- user: root
- vars_files:
- - vars.yml
- roles:
- - repos
- - {
- role: openshift_master,
- openshift_node_ips: "{{ hostvars['localhost'].openshift_node_ips | default(['']) }}",
- openshift_public_ip: "{{ gce_public_ip }}",
- openshift_env: "{{ oo_env }}",
- }
- - pods
- - os_env_extras
+- include: ../../common/openshift-master/config.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
+ openshift_hostname: "{{ gce_private_ip }}"
diff --git a/playbooks/gce/openshift-master/launch.yml b/playbooks/gce/openshift-master/launch.yml
index f2800b061..ef10b6cf0 100644
--- a/playbooks/gce/openshift-master/launch.yml
+++ b/playbooks/gce/openshift-master/launch.yml
@@ -1,17 +1,19 @@
---
+# TODO: when we are ready to go to ansible 1.9+ support only, we can update to
+# the gce task to use the disk_auto_delete parameter to avoid having to delete
+# the disk as a separate step on termination
+
- name: Launch instance(s)
hosts: localhost
connection: local
gather_facts: no
+# TODO: modify image based on deployment_type
vars:
inst_names: "{{ oo_new_inst_names }}"
machine_type: n1-standard-1
image: libra-rhel7
- vars_files:
- - vars.yml
-
tasks:
- name: Launch instances
gce:
@@ -24,16 +26,18 @@
tags: "{{ oo_new_inst_tags }}"
register: gce
- - name: Add new instances public IPs to oo_hosts_to_config
- add_host: "hostname={{ item.name }} ansible_ssh_host={{ item.public_ip }} groupname=oo_hosts_to_config"
+ - name: Add new instances public IPs to oo_masters_to_config
+ add_host:
+ hostname: "{{ item.name }}"
+ ansible_ssh_host: "{{ item.public_ip }}"
+ groupname: oo_masters_to_config
+ gce_private_ip: "{{ item.private_ip }}"
with_items: gce.instance_data
- name: Wait for ssh
- wait_for: "port=22 host={{ item.public_ip }}"
+ wait_for: port=22 host={{ item.public_ip }}
with_items: gce.instance_data
- - debug: var=gce
-
- name: Wait for root user setup
command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup"
register: result
diff --git a/playbooks/gce/openshift-master/terminate.yml b/playbooks/gce/openshift-master/terminate.yml
index 76e1404b5..452ac5199 100644
--- a/playbooks/gce/openshift-master/terminate.yml
+++ b/playbooks/gce/openshift-master/terminate.yml
@@ -1,20 +1,16 @@
-- name: "populate oo_hosts_to_terminate host group if needed"
+---
+- name: Populate oo_masters_to_terminate host group if needed
hosts: localhost
gather_facts: no
tasks:
- - debug: var=oo_host_group_exp
+ - name: Evaluate oo_masters_to_terminate
+ add_host: name={{ item }} groups=oo_masters_to_terminate
+ with_items: oo_host_group_exp | default([])
- - name: Evaluate oo_host_group_exp if it's set
- add_host: "name={{ item }} groups=oo_hosts_to_terminate"
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
-
- - debug: msg="{{ groups['oo_hosts_to_terminate'] }}"
-
-
-- name: Terminate instances
+- name: Terminate master instances
hosts: localhost
connection: local
+ gather_facts: no
tasks:
- name: Terminate master instances
gce:
@@ -22,11 +18,10 @@
pem_file: "{{ gce_pem_file }}"
project_id: "{{ gce_project_id }}"
state: 'absent'
- instance_names: "{{ groups['oo_hosts_to_terminate'] }}"
- disks: "{{ groups['oo_hosts_to_terminate'] }}"
+ instance_names: "{{ groups['oo_masters_to_terminate'] }}"
+ disks: "{{ groups['oo_masters_to_terminate'] }}"
register: gce
-
- - debug: var=gce
+ when: "'oo_masters_to_terminate' in groups"
- name: Remove disks of instances
gce_pd:
@@ -37,5 +32,4 @@
zone: "{{ gce.zone }}"
state: absent
with_items: gce.instance_names
-
-
+ when: "'oo_masters_to_terminate' in groups"
diff --git a/playbooks/gce/openshift-master/vars.yml b/playbooks/gce/openshift-master/vars.yml
deleted file mode 100644
index fb5f4ea42..000000000
--- a/playbooks/gce/openshift-master/vars.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-openshift_debug_level: 4
diff --git a/playbooks/gce/openshift-node/config.yml b/playbooks/gce/openshift-node/config.yml
index 85f34e814..5b1601176 100644
--- a/playbooks/gce/openshift-node/config.yml
+++ b/playbooks/gce/openshift-node/config.yml
@@ -1,49 +1,24 @@
---
-- name: "populate oo_hosts_to_config host group if needed"
+- name: Populate oo_nodes_to_config and oo_first_master host groups
hosts: localhost
gather_facts: no
tasks:
- - name: Evaluate oo_host_group_exp
- add_host: "name={{ item }} groups=oo_hosts_to_config"
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ ansible_ssh_user: root
+ with_items: oo_host_group_exp | default([])
+ - name: Evaluate oo_first_master
+ add_host:
+ name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
+ groups: oo_first_master
+ ansible_ssh_user: root
-- name: "Gather facts for masters in {{ oo_env }}"
- hosts: "tag_env-host-type-{{ oo_env }}-openshift-master"
- connection: ssh
- user: root
-- name: "Set OO sepcific facts on localhost (for later use)"
- hosts: localhost
- gather_facts: no
- tasks:
- - name: Setting openshift_master_ips fact on localhost
- set_fact:
- openshift_master_ips: "{{ hostvars
- | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-master'])
- | oo_collect(attribute='ansible_default_ipv4.address') }}"
- when: groups['tag_env-host-type-' + oo_env + '-openshift-master'] is defined
- - name: Setting openshift_master_public_ips fact on localhost
- set_fact:
- openshift_master_public_ips: "{{ hostvars
- | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-master'])
- | oo_collect(attribute='gce_public_ip') }}"
- when: groups['tag_env-host-type-' + oo_env + '-openshift-master'] is defined
-
-- name: "Configure instances"
- hosts: oo_hosts_to_config
- connection: ssh
- user: root
- vars_files:
- - vars.yml
- roles:
- - repos
- - docker
- - {
- role: openshift_node,
- openshift_master_ips: "{{ hostvars['localhost'].openshift_master_ips | default(['']) }}",
- openshift_master_public_ips: "{{ hostvars['localhost'].openshift_master_public_ips | default(['']) }}",
- openshift_public_ip: "{{ gce_public_ip }}",
- openshift_env: "{{ oo_env }}",
- }
- - os_env_extras
+- include: ../../common/openshift-node/config.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
+ openshift_hostname: "{{ gce_private_ip }}"
diff --git a/playbooks/gce/openshift-node/launch.yml b/playbooks/gce/openshift-node/launch.yml
index 935599efd..086ba58bc 100644
--- a/playbooks/gce/openshift-node/launch.yml
+++ b/playbooks/gce/openshift-node/launch.yml
@@ -1,17 +1,19 @@
---
+# TODO: when we are ready to go to ansible 1.9+ support only, we can update to
+# the gce task to use the disk_auto_delete parameter to avoid having to delete
+# the disk as a separate step on termination
+
- name: Launch instance(s)
hosts: localhost
connection: local
gather_facts: no
+# TODO: modify image based on deployment_type
vars:
inst_names: "{{ oo_new_inst_names }}"
machine_type: n1-standard-1
image: libra-rhel7
- vars_files:
- - vars.yml
-
tasks:
- name: Launch instances
gce:
@@ -24,16 +26,18 @@
tags: "{{ oo_new_inst_tags }}"
register: gce
- - name: Add new instances public IPs to oo_hosts_to_config
- add_host: "hostname={{ item.name }} ansible_ssh_host={{ item.public_ip }} groupname=oo_hosts_to_config"
+ - name: Add new instances public IPs to oo_nodes_to_config
+ add_host:
+ hostname: "{{ item.name }}"
+ ansible_ssh_host: "{{ item.public_ip }}"
+ groupname: oo_nodes_to_config
+ gce_private_ip: "{{ item.private_ip }}"
with_items: gce.instance_data
- name: Wait for ssh
- wait_for: "port=22 host={{ item.public_ip }}"
+ wait_for: port=22 host={{ item.public_ip }}
with_items: gce.instance_data
- - debug: var=gce
-
- name: Wait for root user setup
command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup"
register: result
@@ -45,13 +49,3 @@
# Apply the configs, separate so that just the configs can be run by themselves
- include: config.yml
-
-# Always bounce service to pick up new credentials
-#- name: "Restart instances"
-# hosts: oo_hosts_to_config
-# connection: ssh
-# user: root
-# tasks:
-# - debug: var=groups.oo_hosts_to_config
-# - name: Restart OpenShift
-# service: name=openshift-node enabled=yes state=restarted
diff --git a/playbooks/gce/openshift-node/terminate.yml b/playbooks/gce/openshift-node/terminate.yml
index 8d60f27b3..357e0c295 100644
--- a/playbooks/gce/openshift-node/terminate.yml
+++ b/playbooks/gce/openshift-node/terminate.yml
@@ -1,20 +1,16 @@
-- name: "populate oo_hosts_to_terminate host group if needed"
+---
+- name: Populate oo_nodes_to_terminate host group if needed
hosts: localhost
gather_facts: no
tasks:
- - debug: var=oo_host_group_exp
+ - name: Evaluate oo_nodes_to_terminate
+ add_host: name={{ item }} groups=oo_nodes_to_terminate
+ with_items: oo_host_group_exp | default([])
- - name: Evaluate oo_host_group_exp if it's set
- add_host: "name={{ item }} groups=oo_hosts_to_terminate"
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
-
- - debug: msg="{{ groups['oo_hosts_to_terminate'] }}"
-
-
-- name: Terminate instances
+- name: Terminate node instances
hosts: localhost
connection: local
+ gather_facts: no
tasks:
- name: Terminate node instances
gce:
@@ -22,11 +18,10 @@
pem_file: "{{ gce_pem_file }}"
project_id: "{{ gce_project_id }}"
state: 'absent'
- instance_names: "{{ groups['oo_hosts_to_terminate'] }}"
- disks: "{{ groups['oo_hosts_to_terminate'] }}"
+ instance_names: "{{ groups['oo_nodes_to_terminate'] }}"
+ disks: "{{ groups['oo_nodes_to_terminate'] }}"
register: gce
-
- - debug: var=gce
+ when: "'oo_nodes_to_terminate' in groups"
- name: Remove disks of instances
gce_pd:
@@ -37,5 +32,4 @@
zone: "{{ gce.zone }}"
state: absent
with_items: gce.instance_names
-
-
+ when: "'oo_nodes_to_terminate' in groups"
diff --git a/playbooks/gce/openshift-node/vars.yml b/playbooks/gce/openshift-node/vars.yml
deleted file mode 100644
index fb5f4ea42..000000000
--- a/playbooks/gce/openshift-node/vars.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-openshift_debug_level: 4
diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml
new file mode 100644
index 000000000..faf278b10
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/config.yml
@@ -0,0 +1,38 @@
+---
+# TODO: need to figure out a plan for setting hostname, currently the default
+# is localhost, so no hostname value (or public_hostname) value is getting
+# assigned
+
+- name: Populate oo_masters_to_config host group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_masters_to_config
+ add_host:
+ name: "{{ item }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: oo_masters_to_config
+ with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([])
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: oo_nodes_to_config
+ with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([])
+ - name: Evaluate oo_first_master
+ add_host:
+ name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: oo_first_master
+ when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups"
+
+- include: ../../common/openshift-cluster/config.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/libvirt/openshift-cluster/filter_plugins b/playbooks/libvirt/openshift-cluster/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml
new file mode 100644
index 000000000..a7ddc1e7e
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/launch.yml
@@ -0,0 +1,36 @@
+---
+- name: Launch instance(s)
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ vars:
+ os_libvirt_storage_pool: "{{ libvirt_storage_pool | default('images') }}"
+ os_libvirt_storage_pool_path: "{{ libvirt_storage_pool_path | default('/var/lib/libvirt/images') }}"
+ os_libvirt_network: "{{ libvirt_network | default('default') }}"
+ image_url: "{{ deployment_vars[deployment_type].image.url }}"
+ image_sha256: "{{ deployment_vars[deployment_type].image.sha256 }}"
+ image_name: "{{ deployment_vars[deployment_type].image.name }}"
+ tasks:
+ - fail: msg="Deployment type not supported for libvirt provider yet"
+ when: deployment_type in ['online', 'enterprise']
+
+ - include: tasks/configure_libvirt.yml
+
+ - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ master_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+
+ - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ node_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+
+- include: update.yml
+
+- include: list.yml
diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml
new file mode 100644
index 000000000..eaedc4d0d
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/list.yml
@@ -0,0 +1,23 @@
+---
+- name: Generate oo_list_hosts group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - set_fact: scratch_group=tag_env-{{ cluster_id }}
+ when: cluster_id != ''
+ - set_fact: scratch_group=all
+ when: cluster_id == ''
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_list_hosts
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[scratch_group] | default([]) | difference(['localhost'])
+
+- name: List Hosts
+ hosts: oo_list_hosts
+ tasks:
+ - debug:
+ msg: 'public:{{ansible_default_ipv4.address}} private:{{ansible_default_ipv4.address}}'
diff --git a/playbooks/libvirt/openshift-cluster/roles b/playbooks/libvirt/openshift-cluster/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml
new file mode 100644
index 000000000..f237c1a60
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml
@@ -0,0 +1,6 @@
+---
+- include: configure_libvirt_storage_pool.yml
+ when: libvirt_storage_pool is defined and libvirt_storage_pool_path is defined
+
+- include: configure_libvirt_network.yml
+ when: libvirt_network is defined
diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
new file mode 100644
index 000000000..3117d9edc
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
@@ -0,0 +1,27 @@
+---
+- name: Test if libvirt network for openshift already exists
+ command: "virsh -c {{ libvirt_uri }} net-info {{ libvirt_network }}"
+ register: net_info_result
+ changed_when: False
+ failed_when: "net_info_result.rc != 0 and 'no network with matching name' not in net_info_result.stderr"
+
+- name: Create a temp directory for the template xml file
+ command: "mktemp -d /tmp/openshift-ansible-XXXXXXX"
+ register: mktemp
+ when: net_info_result.rc == 1
+
+- name: Create network xml file
+ template:
+ src: templates/network.xml
+ dest: "{{ mktemp.stdout }}/network.xml"
+ when: net_info_result.rc == 1
+
+- name: Create libvirt network for openshift
+ command: "virsh -c {{ libvirt_uri }} net-create {{ mktemp.stdout }}/network.xml"
+ when: net_info_result.rc == 1
+
+- name: Remove the temp directory
+ file:
+ path: "{{ mktemp.stdout }}"
+ state: absent
+ when: net_info_result.rc == 1
diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
new file mode 100644
index 000000000..8a67d713f
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
@@ -0,0 +1,23 @@
+---
+- name: Create libvirt storage directory for openshift
+ file:
+ dest: "{{ libvirt_storage_pool_path }}"
+ state: directory
+
+- acl:
+ default: yes
+ entity: kvm
+ etype: group
+ name: "{{ libvirt_storage_pool_path }}"
+ permissions: rwx
+ state: present
+
+- name: Test if libvirt storage pool for openshift already exists
+ command: "virsh -c {{ libvirt_uri }} pool-info {{ libvirt_storage_pool }}"
+ register: pool_info_result
+ changed_when: False
+ failed_when: "pool_info_result.rc != 0 and 'no storage pool with matching name' not in pool_info_result.stderr"
+
+- name: Create the libvirt storage pool for openshift
+ command: 'virsh -c {{ libvirt_uri }} pool-create-as {{ libvirt_storage_pool }} dir --target {{ libvirt_storage_pool_path }}'
+ when: pool_info_result.rc == 1
diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
new file mode 100644
index 000000000..359d0b2f3
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
@@ -0,0 +1,107 @@
+---
+# TODO: Add support for choosing base image based on deployment_type and os
+# wanted (os wanted needs support added in bin/cluster with sane defaults:
+# fedora/centos for origin, rhel for online/enterprise)
+
+# TODO: create a role to encapsulate some of this complexity, possibly also
+# create a module to manage the storage tasks, network tasks, and possibly
+# even handle the libvirt tasks to set metadata in the domain xml and be able
+# to create/query data about vms without having to use xml the python libvirt
+# bindings look like a good candidate for this
+
+- name: Download Base Cloud image
+ get_url:
+ url: '{{ image_url }}'
+ sha256sum: '{{ image_sha256 }}'
+ dest: '{{ os_libvirt_storage_pool_path }}/{{ image_name }}'
+
+- name: Create the cloud-init config drive path
+ file:
+ dest: '{{ os_libvirt_storage_pool_path }}/{{ item }}_configdrive/'
+ state: directory
+ with_items: instances
+
+- name: Create the cloud-init config drive files
+ template:
+ src: '{{ item[1] }}'
+ dest: '{{ os_libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/{{ item[1] }}'
+ with_nested:
+ - instances
+ - [ user-data, meta-data ]
+
+- name: Create the cloud-init config drive
+ command: 'genisoimage -output {{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data'
+ args:
+ chdir: '{{ os_libvirt_storage_pool_path }}/{{ item }}_configdrive/'
+ creates: '{{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
+ with_items: instances
+
+- name: Refresh the libvirt storage pool for openshift
+ command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}'
+
+- name: Create VMs drives
+ command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ os_libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ image_name }} --backing-vol-format qcow2'
+ with_items: instances
+
+- name: Create VMs
+ virt:
+ name: '{{ item }}'
+ command: define
+ xml: "{{ lookup('template', '../templates/domain.xml') }}"
+ uri: '{{ libvirt_uri }}'
+ with_items: instances
+
+- name: Start VMs
+ virt:
+ name: '{{ item }}'
+ state: running
+ uri: '{{ libvirt_uri }}'
+ with_items: instances
+
+- name: Collect MAC addresses of the VMs
+ shell: 'virsh -c {{ libvirt_uri }} dumpxml {{ item }} | xmllint --xpath "string(//domain/devices/interface/mac/@address)" -'
+ register: scratch_mac
+ with_items: instances
+
+- name: Wait for the VMs to get an IP
+ command: "egrep -c '{{ scratch_mac.results | oo_collect('stdout') | join('|') }}' /proc/net/arp"
+ ignore_errors: yes
+ register: nb_allocated_ips
+ until: nb_allocated_ips.stdout == '{{ instances | length }}'
+ retries: 30
+ delay: 1
+
+- name: Collect IP addresses of the VMs
+ shell: "awk '/{{ item.stdout }}/ {print $1}' /proc/net/arp"
+ register: scratch_ip
+ with_items: scratch_mac.results
+
+- set_fact:
+ ips: "{{ scratch_ip.results | oo_collect('stdout') }}"
+
+- name: Add new instances
+ add_host:
+ hostname: '{{ item.0 }}'
+ ansible_ssh_host: '{{ item.1 }}'
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_env-host-type-{{ cluster }}-openshift-{{ type }}'
+ with_together:
+ - instances
+ - ips
+
+- name: Wait for ssh
+ wait_for:
+ host: '{{ item }}'
+ port: 22
+ with_items: ips
+
+- name: Wait for openshift user setup
+ command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null openshift@{{ item.1 }} echo openshift user is setup'
+ register: result
+ until: result.rc == 0
+ retries: 30
+ delay: 1
+ with_together:
+ - instances
+ - ips
diff --git a/playbooks/libvirt/openshift-cluster/templates/domain.xml b/playbooks/libvirt/openshift-cluster/templates/domain.xml
new file mode 100644
index 000000000..df200e374
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/templates/domain.xml
@@ -0,0 +1,68 @@
+<domain type='kvm' id='8'>
+ <name>{{ item }}</name>
+ <memory unit='GiB'>1</memory>
+ <metadata xmlns:ansible="https://github.com/ansible/ansible">
+ <ansible:tags>
+ <ansible:tag>env-{{ cluster }}</ansible:tag>
+ <ansible:tag>env-host-type-{{ cluster }}-openshift-{{ type }}</ansible:tag>
+ <ansible:tag>host-type-{{ type }}</ansible:tag>
+ </ansible:tags>
+ </metadata>
+ <currentMemory unit='GiB'>1</currentMemory>
+ <vcpu placement='static'>2</vcpu>
+ <os>
+ <type arch='x86_64' machine='pc'>hvm</type>
+ <boot dev='hd'/>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ <pae/>
+ </features>
+ <clock offset='utc'>
+ <timer name='rtc' tickpolicy='catchup'/>
+ <timer name='pit' tickpolicy='delay'/>
+ <timer name='hpet' present='no'/>
+ </clock>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>restart</on_crash>
+ <devices>
+ <emulator>/usr/bin/qemu-system-x86_64</emulator>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='qcow2'/>
+ <source file='{{ os_libvirt_storage_pool_path }}/{{ item }}.qcow2'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <disk type='file' device='cdrom'>
+ <driver name='qemu' type='raw'/>
+ <source file='{{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'/>
+ <target dev='vdb' bus='virtio'/>
+ <readonly/>
+ </disk>
+ <controller type='usb' index='0' />
+ <interface type='network'>
+ <source network='{{ os_libvirt_network }}'/>
+ <model type='virtio'/>
+ </interface>
+ <serial type='pty'>
+ <target port='0'/>
+ </serial>
+ <console type='pty'>
+ <target type='serial' port='0'/>
+ </console>
+ <channel type='spicevmc'>
+ <target type='virtio' name='com.redhat.spice.0'/>
+ </channel>
+ <input type='tablet' bus='usb' />
+ <input type='mouse' bus='ps2'/>
+ <graphics type='spice' autoport='yes' />
+ <video>
+ <model type='qxl' ram='65536' vram='65536' vgamem='16384' heads='1'/>
+ </video>
+ <redirdev bus='usb' type='spicevmc'>
+ </redirdev>
+ <memballoon model='virtio'>
+ </memballoon>
+ </devices>
+</domain>
diff --git a/playbooks/libvirt/openshift-cluster/templates/meta-data b/playbooks/libvirt/openshift-cluster/templates/meta-data
new file mode 100644
index 000000000..6b421770d
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/templates/meta-data
@@ -0,0 +1,3 @@
+instance-id: {{ item[0] }}
+hostname: {{ item[0] }}
+local-hostname: {{ item[0] }}.example.com
diff --git a/playbooks/libvirt/openshift-cluster/templates/network.xml b/playbooks/libvirt/openshift-cluster/templates/network.xml
new file mode 100644
index 000000000..86dcd62bb
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/templates/network.xml
@@ -0,0 +1,23 @@
+<network>
+ <name>openshift-ansible</name>
+ <forward mode='nat'>
+ <nat>
+ <port start='1024' end='65535'/>
+ </nat>
+ </forward>
+ <!-- TODO: query for first available virbr interface available -->
+ <bridge name='virbr3' stp='on' delay='0'/>
+ <!-- TODO: make overridable -->
+ <domain name='example.com'/>
+ <dns>
+ <!-- TODO: automatically add host entries -->
+ </dns>
+ <!-- TODO: query for available address space -->
+ <ip address='192.168.55.1' netmask='255.255.255.0'>
+ <dhcp>
+ <range start='192.168.55.2' end='192.168.55.254'/>
+ <!-- TODO: add static entries addresses for the hosts to be created -->
+ </dhcp>
+ </ip>
+</network>
+
diff --git a/playbooks/libvirt/openshift-cluster/templates/user-data b/playbooks/libvirt/openshift-cluster/templates/user-data
new file mode 100644
index 000000000..77b788109
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/templates/user-data
@@ -0,0 +1,23 @@
+#cloud-config
+disable_root: true
+
+hostname: {{ item[0] }}
+fqdn: {{ item[0] }}.example.com
+manage_etc_hosts: true
+
+users:
+ - default
+ - name: root
+ ssh_authorized_keys:
+ - {{ lookup('file', '~/.ssh/id_rsa.pub') }}
+
+system_info:
+ default_user:
+ name: openshift
+ sudo: ["ALL=(ALL) NOPASSWD: ALL"]
+
+ssh_authorized_keys:
+ - {{ lookup('file', '~/.ssh/id_rsa.pub') }}
+
+bootcmd:
+ - NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart
diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml
new file mode 100644
index 000000000..b173a09dd
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/terminate.yml
@@ -0,0 +1,44 @@
+---
+# TODO: does not handle a non-existant cluster gracefully
+
+- name: Terminate instance(s)
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - set_fact: cluster_group=tag_env-{{ cluster_id }}
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_hosts_to_terminate
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[cluster_group] | default([])
+
+ - name: Destroy VMs
+ virt:
+ name: '{{ item[0] }}'
+ command: '{{ item[1] }}'
+ uri: '{{ libvirt_uri }}'
+ with_nested:
+ - groups['oo_hosts_to_terminate']
+ - [ destroy, undefine ]
+
+ - name: Delete VMs drives
+ command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item }}.qcow2'
+ args:
+ removes: '{{ libvirt_storage_pool_path }}/{{ item }}.qcow2'
+ with_items: groups['oo_hosts_to_terminate']
+
+ - name: Delete the VM cloud-init image
+ file:
+ path: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
+ state: absent
+ with_items: groups['oo_hosts_to_terminate']
+
+ - name: Remove the cloud-init config directory
+ file:
+ path: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/'
+ state: absent
+ with_items: groups['oo_hosts_to_terminate']
+
diff --git a/playbooks/libvirt/openshift-cluster/update.yml b/playbooks/libvirt/openshift-cluster/update.yml
new file mode 100644
index 000000000..57e36db9e
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/update.yml
@@ -0,0 +1,18 @@
+---
+- name: Populate oo_hosts_to_update group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_hosts_to_update
+ add_host:
+ name: "{{ item }}"
+ groups: oo_hosts_to_update
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]) | default([])
+
+- include: ../../common/openshift-cluster/update_repos_and_packages.yml
+
+- include: config.yml
diff --git a/playbooks/libvirt/openshift-cluster/vars.yml b/playbooks/libvirt/openshift-cluster/vars.yml
new file mode 100644
index 000000000..65d954fee
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/vars.yml
@@ -0,0 +1,33 @@
+---
+libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift-ansible"
+libvirt_storage_pool: 'openshift-ansible'
+libvirt_network: openshift-ansible
+libvirt_uri: 'qemu:///system'
+
+deployment_vars:
+ origin:
+ image:
+ url: "http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2"
+ name: CentOS-7-x86_64-GenericCloud.qcow2
+ sha256: e324e3ab1d24a1bbf035ddb365e7f9058c0b454acf48d7aa15c5519fae5998ab
+ ssh_user: openshift
+ sudo: yes
+ online:
+ image:
+ url:
+ name:
+ sha256:
+ ssh_user: root
+ sudo: no
+ enterprise:
+ image:
+ url:
+ name:
+ sha256:
+ ssh_user: openshift
+ sudo: yes
+# origin:
+# fedora:
+# url: "http://download.fedoraproject.org/pub/fedora/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.qcow2"
+# name: Fedora-Cloud-Base-20141203-21.x86_64.qcow2
+# sha256: 3a99bb89f33e3d4ee826c8160053cdb8a72c80cd23350b776ce73cd244467d86
diff --git a/rel-eng/packages/.readme b/rel-eng/packages/.readme
new file mode 100644
index 000000000..8999c8dbc
--- /dev/null
+++ b/rel-eng/packages/.readme
@@ -0,0 +1,3 @@
+the rel-eng/packages directory contains metadata files
+named after their packages. Each file has the latest tagged
+version and the project's relative directory.
diff --git a/rel-eng/packages/openshift-ansible-bin b/rel-eng/packages/openshift-ansible-bin
new file mode 100644
index 000000000..500e1f4b1
--- /dev/null
+++ b/rel-eng/packages/openshift-ansible-bin
@@ -0,0 +1 @@
+0.0.8-1 bin/
diff --git a/rel-eng/packages/openshift-ansible-inventory b/rel-eng/packages/openshift-ansible-inventory
new file mode 100644
index 000000000..cf3ac87ed
--- /dev/null
+++ b/rel-eng/packages/openshift-ansible-inventory
@@ -0,0 +1 @@
+0.0.2-1 inventory/
diff --git a/rel-eng/tito.props b/rel-eng/tito.props
new file mode 100644
index 000000000..eab3f190d
--- /dev/null
+++ b/rel-eng/tito.props
@@ -0,0 +1,5 @@
+[buildconfig]
+builder = tito.builder.Builder
+tagger = tito.tagger.VersionTagger
+changelog_do_not_remove_cherrypick = 0
+changelog_format = %s (%ae)
diff --git a/roles/ansible_tower/tasks/main.yaml b/roles/ansible_tower/tasks/main.yaml
index f58a5b1c2..1d75a95e6 100644
--- a/roles/ansible_tower/tasks/main.yaml
+++ b/roles/ansible_tower/tasks/main.yaml
@@ -9,6 +9,7 @@
- ansible
- telnet
- ack
+ - python-ansible-tower-cli
- name: download Tower setup
get_url: url=http://releases.ansible.com/ansible-tower/setup/ansible-tower-setup-2.1.1.tar.gz dest=/opt/ force=no
@@ -25,3 +26,9 @@
- name: Open firewalld port for https
firewalld: port=8080/tcp permanent=true state=enabled
+- name: Set (httpd_can_network_connect) flag on and keep it persistent across reboots
+ seboolean: name=httpd_can_network_connect state=yes persistent=yes
+
+- name: Set (httpd_can_network_connect_db) flag on and keep it persistent across reboots
+ seboolean: name=httpd_can_network_connect_db state=yes persistent=yes
+
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index 2ecefd588..ca700db17 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -11,5 +11,5 @@
# From the origin rpm there exists instructions on how to
# setup origin properly. The following steps come from there
- name: Change root to be in the Docker group
- user: name=root groups=docker append=yes
+ user: name=root groups=dockerroot append=yes
diff --git a/roles/openshift_ansible_inventory/README.md b/roles/openshift_ansible_inventory/README.md
new file mode 100644
index 000000000..69a07effd
--- /dev/null
+++ b/roles/openshift_ansible_inventory/README.md
@@ -0,0 +1,41 @@
+Openshift Ansible Inventory
+=========
+
+Install and configure openshift-ansible-inventory.
+
+Requirements
+------------
+
+None
+
+Role Variables
+--------------
+
+oo_inventory_group
+oo_inventory_user
+oo_inventory_accounts
+oo_inventory_cache_max_age
+
+Dependencies
+------------
+
+None
+
+Example Playbook
+----------------
+
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
+
+ - hosts: servers
+ roles:
+ - { role: username.rolename, x: 42 }
+
+License
+-------
+
+ASL 2.0
+
+Author Information
+------------------
+
+Openshift operations, Red Hat, Inc
diff --git a/roles/openshift_ansible_inventory/defaults/main.yml b/roles/openshift_ansible_inventory/defaults/main.yml
new file mode 100644
index 000000000..f53c00c80
--- /dev/null
+++ b/roles/openshift_ansible_inventory/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+oo_inventory_group: root
+oo_inventory_owner: root
+oo_inventory_cache_max_age: 1800
diff --git a/roles/openshift_ansible_inventory/handlers/main.yml b/roles/openshift_ansible_inventory/handlers/main.yml
new file mode 100644
index 000000000..e2db43477
--- /dev/null
+++ b/roles/openshift_ansible_inventory/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for openshift_ansible_inventory
diff --git a/roles/openshift_ansible_inventory/meta/main.yml b/roles/openshift_ansible_inventory/meta/main.yml
new file mode 100644
index 000000000..ff3df0a7d
--- /dev/null
+++ b/roles/openshift_ansible_inventory/meta/main.yml
@@ -0,0 +1,8 @@
+---
+galaxy_info:
+ author: Openshift
+ description: Install and configure openshift-ansible-inventory
+ company: Red Hat, Inc
+ license: ASL 2.0
+ min_ansible_version: 1.2
+dependencies: []
diff --git a/roles/openshift_ansible_inventory/tasks/main.yml b/roles/openshift_ansible_inventory/tasks/main.yml
new file mode 100644
index 000000000..3990d5750
--- /dev/null
+++ b/roles/openshift_ansible_inventory/tasks/main.yml
@@ -0,0 +1,11 @@
+---
+- yum:
+ name: openshift-ansible-inventory
+ state: present
+
+- template:
+ src: multi_ec2.yaml.j2
+ dest: /etc/ansible/multi_ec2.yaml
+ group: "{{ oo_inventory_group }}"
+ owner: "{{ oo_inventory_owner }}"
+ mode: "0640"
diff --git a/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2 b/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2
new file mode 100644
index 000000000..23dfe73b8
--- /dev/null
+++ b/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2
@@ -0,0 +1,11 @@
+# multi ec2 inventory configs
+cache_max_age: {{ oo_inventory_cache_max_age }}
+accounts:
+{% for account in oo_inventory_accounts %}
+ - name: {{ account.name }}
+ provider: {{ account.provider }}
+ env_vars:
+ AWS_ACCESS_KEY_ID: {{ account.env_vars.AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: {{ account.env_vars.AWS_SECRET_ACCESS_KEY }}
+
+{% endfor %}
diff --git a/roles/openshift_ansible_inventory/vars/main.yml b/roles/openshift_ansible_inventory/vars/main.yml
new file mode 100644
index 000000000..25c049282
--- /dev/null
+++ b/roles/openshift_ansible_inventory/vars/main.yml
@@ -0,0 +1,2 @@
+---
+# vars file for openshift_ansible_inventory
diff --git a/roles/openshift_common/README.md b/roles/openshift_common/README.md
index c2ae609ff..14c2037e4 100644
--- a/roles/openshift_common/README.md
+++ b/roles/openshift_common/README.md
@@ -12,19 +12,21 @@ rhel-7-server-extra-rpms, and rhel-7-server-ose-beta-rpms repos.
Role Variables
--------------
-| Name | Default value | |
-|-------------------------------|------------------------------|----------------------------------------|
-| openshift_bind_ip | ansible_default_ipv4.address | IP to use for local binding |
-| openshift_debug_level | 0 | Global openshift debug log verbosity |
-| openshift_hostname_workaround | True | Workaround needed to set hostname to IP address |
-| openshift_hostname | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance |
-| openshift_public_ip | UNDEF (Required) | Public IP address to use for this host |
-| openshift_env | default | Envrionment name if multiple OpenShift instances |
+| Name | Default value | |
+|---------------------------|-------------------|---------------------------------------------|
+| openshift_cluster_id | default | Cluster name if multiple OpenShift clusters |
+| openshift_debug_level | 0 | Global openshift debug log verbosity |
+| openshift_hostname | UNDEF | Internal hostname to use for this host (this value will set the hostname on the system) |
+| openshift_ip | UNDEF | Internal IP address to use for this host |
+| openshift_public_hostname | UNDEF | Public hostname to use for this host |
+| openshift_public_ip | UNDEF | Public IP address to use for this host |
Dependencies
------------
os_firewall
+openshift_facts
+openshift_repos
Example Playbook
----------------
@@ -39,4 +41,4 @@ Apache License, Version 2.0
Author Information
------------------
-TODO
+Jason DeTiberus (jdetiber@redhat.com)
diff --git a/roles/openshift_common/defaults/main.yml b/roles/openshift_common/defaults/main.yml
index a541591fb..4d3e0fe9e 100644
--- a/roles/openshift_common/defaults/main.yml
+++ b/roles/openshift_common/defaults/main.yml
@@ -1,8 +1,3 @@
---
-openshift_bind_ip: "{{ ansible_default_ipv4.address }}"
+openshift_cluster_id: 'default'
openshift_debug_level: 0
-
-# TODO: Once openshift stops resolving hostnames for node queries remove
-# this...
-openshift_hostname_workaround: true
-openshift_hostname: "{{ openshift_public_ip if openshift_hostname_workaround else ansible_fqdn }}"
diff --git a/roles/openshift_common/meta/main.yml b/roles/openshift_common/meta/main.yml
index 88b7677d0..81363ec68 100644
--- a/roles/openshift_common/meta/main.yml
+++ b/roles/openshift_common/meta/main.yml
@@ -13,3 +13,5 @@ galaxy_info:
- cloud
dependencies:
- { role: os_firewall }
+- { role: openshift_facts }
+- { role: openshift_repos }
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
index 728bba4e4..c55677c3f 100644
--- a/roles/openshift_common/tasks/main.yml
+++ b/roles/openshift_common/tasks/main.yml
@@ -1,21 +1,16 @@
---
-# fixme: Once openshift stops resolving hostnames for node queries remove this...
-- name: Set hostname to IP Addr (WORKAROUND)
- hostname: name={{ openshift_bind_ip }}
- when: openshift_hostname_workaround
-
-- name: Configure local facts file
- file: path=/etc/ansible/facts.d/ state=directory mode=0750
-
- name: Set common OpenShift facts
- include: set_facts.yml
- facts:
- - section: common
- option: env
- value: "{{ openshift_env | default('default') }}"
- - section: common
- option: host_type
- value: "{{ openshift_host_type }}"
- - section: common
- option: debug_level
- value: "{{ openshift_debug_level }}"
+ openshift_facts:
+ role: common
+ local_facts:
+ cluster_id: "{{ openshift_cluster_id | default('default') }}"
+ debug_level: "{{ openshift_debug_level | default(0) }}"
+ hostname: "{{ openshift_hostname | default(None) }}"
+ ip: "{{ openshift_ip | default(None) }}"
+ public_hostname: "{{ openshift_public_hostname | default(None) }}"
+ public_ip: "{{ openshift_public_ip | default(None) }}"
+ use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}"
+ deployment_type: "{{ openshift_deployment_type }}"
+- name: Set hostname
+ hostname: name={{ openshift.common.hostname }}
+
diff --git a/roles/openshift_common/tasks/set_facts.yml b/roles/openshift_common/tasks/set_facts.yml
deleted file mode 100644
index 349eecd1d..000000000
--- a/roles/openshift_common/tasks/set_facts.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-- name: "Setting local_facts"
- ini_file:
- dest: /etc/ansible/facts.d/openshift.fact
- mode: 0640
- section: "{{ item.section }}"
- option: "{{ item.option }}"
- value: "{{ item.value }}"
- with_items: facts
diff --git a/roles/openshift_common/vars/main.yml b/roles/openshift_common/vars/main.yml
index 623aed9bf..9f657a2c7 100644
--- a/roles/openshift_common/vars/main.yml
+++ b/roles/openshift_common/vars/main.yml
@@ -1,6 +1,11 @@
---
-openshift_master_credentials_dir: /var/lib/openshift/openshift.local.certificates/admin/
-
# TODO: Upstream kubernetes only supports iptables currently, if this changes,
# then these variable should be moved to defaults
+# TODO: it might be possible to still use firewalld if we wire up the created
+# chains with the public zone (or the zone associated with the correct
+# interfaces)
os_firewall_use_firewalld: False
+
+openshift_cert_parent_dir: /var/lib/openshift
+openshift_cert_relative_dir: openshift.local.certificates
+openshift_cert_dir: "{{ openshift_cert_parent_dir }}/{{ openshift_cert_relative_dir }}"
diff --git a/roles/openshift_facts/README.md b/roles/openshift_facts/README.md
new file mode 100644
index 000000000..2fd50e236
--- /dev/null
+++ b/roles/openshift_facts/README.md
@@ -0,0 +1,34 @@
+OpenShift Facts
+===============
+
+Provides the openshift_facts module
+
+Requirements
+------------
+
+None
+
+Role Variables
+--------------
+
+None
+
+Dependencies
+------------
+
+None
+
+Example Playbook
+----------------
+
+TODO
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Jason DeTiberus (jdetiber@redhat.com)
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
new file mode 100755
index 000000000..1e0d5c605
--- /dev/null
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -0,0 +1,478 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+
+DOCUMENTATION = '''
+---
+module: openshift_facts
+short_description: OpenShift Facts
+author: Jason DeTiberus
+requirements: [ ]
+'''
+EXAMPLES = '''
+'''
+
+import ConfigParser
+import copy
+
+class OpenShiftFactsUnsupportedRoleError(Exception):
+ pass
+
+class OpenShiftFactsFileWriteError(Exception):
+ pass
+
+class OpenShiftFactsMetadataUnavailableError(Exception):
+ pass
+
+class OpenShiftFacts():
+ known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'dns']
+
+ def __init__(self, role, filename, local_facts):
+ self.changed = False
+ self.filename = filename
+ if role not in self.known_roles:
+ raise OpenShiftFactsUnsupportedRoleError("Role %s is not supported by this module" % role)
+ self.role = role
+ self.facts = self.generate_facts(local_facts)
+
+ def generate_facts(self, local_facts):
+ local_facts = self.init_local_facts(local_facts)
+ roles = local_facts.keys()
+
+ defaults = self.get_defaults(roles)
+ provider_facts = self.init_provider_facts()
+ facts = self.apply_provider_facts(defaults, provider_facts, roles)
+
+ facts = self.merge_facts(facts, local_facts)
+ facts['current_config'] = self.current_config(facts)
+ self.set_url_facts_if_unset(facts)
+ return dict(openshift=facts)
+
+
+ def set_url_facts_if_unset(self, facts):
+ if 'master' in facts:
+ for (url_var, use_ssl, port, default) in [
+ ('api_url',
+ facts['master']['api_use_ssl'],
+ facts['master']['api_port'],
+ facts['common']['hostname']),
+ ('public_api_url',
+ facts['master']['api_use_ssl'],
+ facts['master']['api_port'],
+ facts['common']['public_hostname']),
+ ('console_url',
+ facts['master']['console_use_ssl'],
+ facts['master']['console_port'],
+ facts['common']['hostname']),
+ ('public_console_url' 'console_use_ssl',
+ facts['master']['console_use_ssl'],
+ facts['master']['console_port'],
+ facts['common']['public_hostname'])]:
+ if url_var not in facts['master']:
+ scheme = 'https' if use_ssl else 'http'
+ netloc = default
+ if (scheme == 'https' and port != '443') or (scheme == 'http' and port != '80'):
+ netloc = "%s:%s" % (netloc, port)
+ facts['master'][url_var] = urlparse.urlunparse((scheme, netloc, '', '', '', ''))
+
+
+ # Query current OpenShift config and return a dictionary containing
+ # settings that may be valuable for determining actions that need to be
+ # taken in the playbooks/roles
+ def current_config(self, facts):
+ current_config=dict()
+ roles = [ role for role in facts if role not in ['common','provider'] ]
+ for role in roles:
+ if 'roles' in current_config:
+ current_config['roles'].append(role)
+ else:
+ current_config['roles'] = [role]
+
+ # TODO: parse the /etc/sysconfig/openshift-{master,node} config to
+ # determine the location of files.
+
+ # Query kubeconfig settings
+ kubeconfig_dir = '/var/lib/openshift/openshift.local.certificates'
+ if role == 'node':
+ kubeconfig_dir = os.path.join(kubeconfig_dir, "node-%s" % facts['common']['hostname'])
+
+ kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig')
+ if os.path.isfile('/usr/bin/openshift') and os.path.isfile(kubeconfig_path):
+ try:
+ _, output, error = module.run_command(["/usr/bin/openshift", "ex",
+ "config", "view", "-o",
+ "json",
+ "--kubeconfig=%s" % kubeconfig_path],
+ check_rc=False)
+ config = json.loads(output)
+
+ try:
+ for cluster in config['clusters']:
+ config['clusters'][cluster]['certificate-authority-data'] = 'masked'
+ except KeyError:
+ pass
+ try:
+ for user in config['users']:
+ config['users'][user]['client-certificate-data'] = 'masked'
+ config['users'][user]['client-key-data'] = 'masked'
+ except KeyError:
+ pass
+
+ current_config['kubeconfig'] = config
+ except Exception:
+ pass
+
+ return current_config
+
+
+ def apply_provider_facts(self, facts, provider_facts, roles):
+ if not provider_facts:
+ return facts
+
+ use_openshift_sdn = provider_facts.get('use_openshift_sdn')
+ if isinstance(use_openshift_sdn, bool):
+ facts['common']['use_openshift_sdn'] = use_openshift_sdn
+
+ common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')]
+ for h_var, ip_var in common_vars:
+ ip_value = provider_facts['network'].get(ip_var)
+ if ip_value:
+ facts['common'][ip_var] = ip_value
+
+ facts['common'][h_var] = self.choose_hostname([provider_facts['network'].get(h_var)], facts['common'][ip_var])
+
+ if 'node' in roles:
+ ext_id = provider_facts.get('external_id')
+ if ext_id:
+ facts['node']['external_id'] = ext_id
+
+ facts['provider'] = provider_facts
+ return facts
+
+ def hostname_valid(self, hostname):
+ if (not hostname or
+ hostname.startswith('localhost') or
+ hostname.endswith('localdomain') or
+ len(hostname.split('.')) < 2):
+ return False
+
+ return True
+
+ def choose_hostname(self, hostnames=[], fallback=''):
+ hostname = fallback
+
+ ips = [ i for i in hostnames if i is not None and re.match(r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z', i) ]
+ hosts = [ i for i in hostnames if i is not None and i not in set(ips) ]
+
+ for host_list in (hosts, ips):
+ for h in host_list:
+ if self.hostname_valid(h):
+ return h
+
+ return hostname
+
+ def get_defaults(self, roles):
+ ansible_facts = self.get_ansible_facts()
+
+ defaults = dict()
+
+ common = dict(use_openshift_sdn=True)
+ ip = ansible_facts['default_ipv4']['address']
+ common['ip'] = ip
+ common['public_ip'] = ip
+
+ rc, output, error = module.run_command(['hostname', '-f'])
+ hostname_f = output.strip() if rc == 0 else ''
+ hostname_values = [hostname_f, ansible_facts['nodename'], ansible_facts['fqdn']]
+ hostname = self.choose_hostname(hostname_values)
+
+ common['hostname'] = hostname
+ common['public_hostname'] = hostname
+ defaults['common'] = common
+
+ if 'master' in roles:
+ # TODO: provide for a better way to override just the port, or just
+ # the urls, instead of forcing both, also to override the hostname
+ # without having to re-generate these urls later
+ master = dict(api_use_ssl=True, api_port='8443',
+ console_use_ssl=True, console_path='/console',
+ console_port='8443', etcd_use_ssl=False,
+ etcd_port='4001', portal_net='172.30.17.0/24')
+ defaults['master'] = master
+
+ if 'node' in roles:
+ node = dict(external_id=common['hostname'], pod_cidr='',
+ labels={}, annotations={})
+ node['resources_cpu'] = ansible_facts['processor_cores']
+ node['resources_memory'] = int(int(ansible_facts['memtotal_mb']) * 1024 * 1024 * 0.75)
+ defaults['node'] = node
+
+ return defaults
+
+ def merge_facts(self, orig, new):
+ facts = dict()
+ for key, value in orig.iteritems():
+ if key in new:
+ if isinstance(value, dict):
+ facts[key] = self.merge_facts(value, new[key])
+ else:
+ facts[key] = copy.copy(new[key])
+ else:
+ facts[key] = copy.deepcopy(value)
+ new_keys = set(new.keys()) - set(orig.keys())
+ for key in new_keys:
+ facts[key] = copy.deepcopy(new[key])
+ return facts
+
+ def query_metadata(self, metadata_url, headers=None, expect_json=False):
+ r, info = fetch_url(module, metadata_url, headers=headers)
+ if info['status'] != 200:
+ raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
+ if expect_json:
+ return module.from_json(r.read())
+ else:
+ return [line.strip() for line in r.readlines()]
+
+ def walk_metadata(self, metadata_url, headers=None, expect_json=False):
+ metadata = dict()
+
+ for line in self.query_metadata(metadata_url, headers, expect_json):
+ if line.endswith('/') and not line == 'public-keys/':
+ key = line[:-1]
+ metadata[key]=self.walk_metadata(metadata_url + line, headers,
+ expect_json)
+ else:
+ results = self.query_metadata(metadata_url + line, headers,
+ expect_json)
+ if len(results) == 1:
+ metadata[line] = results.pop()
+ else:
+ metadata[line] = results
+ return metadata
+
+ def get_provider_metadata(self, metadata_url, supports_recursive=False,
+ headers=None, expect_json=False):
+ try:
+ if supports_recursive:
+ metadata = self.query_metadata(metadata_url, headers, expect_json)
+ else:
+ metadata = self.walk_metadata(metadata_url, headers, expect_json)
+ except OpenShiftFactsMetadataUnavailableError as e:
+ metadata = None
+ return metadata
+
+ def get_ansible_facts(self):
+ if not hasattr(self, 'ansible_facts'):
+ self.ansible_facts = ansible_facts(module)
+ return self.ansible_facts
+
+ def guess_host_provider(self):
+ # TODO: cloud provider facts should probably be submitted upstream
+ ansible_facts = self.get_ansible_facts()
+ product_name = ansible_facts['product_name']
+ product_version = ansible_facts['product_version']
+ virt_type = ansible_facts['virtualization_type']
+ virt_role = ansible_facts['virtualization_role']
+ provider = None
+ metadata = None
+
+ # TODO: this is not exposed through module_utils/facts.py in ansible,
+ # need to create PR for ansible to expose it
+ bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor')
+ if bios_vendor == 'Google':
+ provider = 'gce'
+ metadata_url = 'http://metadata.google.internal/computeMetadata/v1/?recursive=true'
+ headers = {'Metadata-Flavor': 'Google'}
+ metadata = self.get_provider_metadata(metadata_url, True, headers,
+ True)
+
+ # Filter sshKeys and serviceAccounts from gce metadata
+ if metadata:
+ metadata['project']['attributes'].pop('sshKeys', None)
+ metadata['instance'].pop('serviceAccounts', None)
+ elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version):
+ provider = 'ec2'
+ metadata_url = 'http://169.254.169.254/latest/meta-data/'
+ metadata = self.get_provider_metadata(metadata_url)
+ elif re.search(r'OpenStack', product_name):
+ provider = 'openstack'
+ metadata_url = 'http://169.254.169.254/openstack/latest/meta_data.json'
+ metadata = self.get_provider_metadata(metadata_url, True, None, True)
+
+ if metadata:
+ ec2_compat_url = 'http://169.254.169.254/latest/meta-data/'
+ metadata['ec2_compat'] = self.get_provider_metadata(ec2_compat_url)
+
+ # Filter public_keys and random_seed from openstack metadata
+ metadata.pop('public_keys', None)
+ metadata.pop('random_seed', None)
+
+ if not metadata['ec2_compat']:
+ metadata = None
+
+ return dict(name=provider, metadata=metadata)
+
+ def normalize_provider_facts(self, provider, metadata):
+ if provider is None or metadata is None:
+ return {}
+
+ # TODO: test for ipv6_enabled where possible (gce, aws do not support)
+ # and configure ipv6 facts if available
+
+ # TODO: add support for setting user_data if available
+
+ facts = dict(name=provider, metadata=metadata)
+ network = dict(interfaces=[], ipv6_enabled=False)
+ if provider == 'gce':
+ for interface in metadata['instance']['networkInterfaces']:
+ int_info = dict(ips=[interface['ip']], network_type=provider)
+ int_info['public_ips'] = [ ac['externalIp'] for ac in interface['accessConfigs'] ]
+ int_info['public_ips'].extend(interface['forwardedIps'])
+ _, _, network_id = interface['network'].rpartition('/')
+ int_info['network_id'] = network_id
+ network['interfaces'].append(int_info)
+ _, _, zone = metadata['instance']['zone'].rpartition('/')
+ facts['zone'] = zone
+ facts['external_id'] = metadata['instance']['id']
+
+ # Default to no sdn for GCE deployments
+ facts['use_openshift_sdn'] = False
+
+ # GCE currently only supports a single interface
+ network['ip'] = network['interfaces'][0]['ips'][0]
+ network['public_ip'] = network['interfaces'][0]['public_ips'][0]
+ network['hostname'] = metadata['instance']['hostname']
+
+ # TODO: attempt to resolve public_hostname
+ network['public_hostname'] = network['public_ip']
+ elif provider == 'ec2':
+ for interface in sorted(metadata['network']['interfaces']['macs'].values(),
+ key=lambda x: x['device-number']):
+ int_info = dict()
+ var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
+ for ips_var, int_var in var_map.iteritems():
+ ips = interface[int_var]
+ int_info[ips_var] = [ips] if isinstance(ips, basestring) else ips
+ int_info['network_type'] = 'vpc' if 'vpc-id' in interface else 'classic'
+ int_info['network_id'] = interface['subnet-id'] if int_info['network_type'] == 'vpc' else None
+ network['interfaces'].append(int_info)
+ facts['zone'] = metadata['placement']['availability-zone']
+ facts['external_id'] = metadata['instance-id']
+
+ # TODO: actually attempt to determine default local and public ips
+ # by using the ansible default ip fact and the ipv4-associations
+ # form the ec2 metadata
+ network['ip'] = metadata['local-ipv4']
+ network['public_ip'] = metadata['public-ipv4']
+
+ # TODO: verify that local hostname makes sense and is resolvable
+ network['hostname'] = metadata['local-hostname']
+
+ # TODO: verify that public hostname makes sense and is resolvable
+ network['public_hostname'] = metadata['public-hostname']
+ elif provider == 'openstack':
+ # openstack ec2 compat api does not support network interfaces and
+ # the version tested on did not include the info in the openstack
+ # metadata api, should be updated if neutron exposes this.
+
+ facts['zone'] = metadata['availability_zone']
+ facts['external_id'] = metadata['uuid']
+ network['ip'] = metadata['ec2_compat']['local-ipv4']
+ network['public_ip'] = metadata['ec2_compat']['public-ipv4']
+
+ # TODO: verify local hostname makes sense and is resolvable
+ network['hostname'] = metadata['hostname']
+
+ # TODO: verify that public hostname makes sense and is resolvable
+ network['public_hostname'] = metadata['ec2_compat']['public-hostname']
+
+ facts['network'] = network
+ return facts
+
+ def init_provider_facts(self):
+ provider_info = self.guess_host_provider()
+ provider_facts = self.normalize_provider_facts(
+ provider_info.get('name'),
+ provider_info.get('metadata')
+ )
+ return provider_facts
+
+ def get_facts(self):
+ # TODO: transform facts into cleaner format (openshift_<blah> instead
+ # of openshift.<blah>
+ return self.facts
+
+ def init_local_facts(self, facts={}):
+ changed = False
+
+ local_facts = ConfigParser.SafeConfigParser()
+ local_facts.read(self.filename)
+
+ section = self.role
+ if not local_facts.has_section(section):
+ local_facts.add_section(section)
+ changed = True
+
+ for key, value in facts.iteritems():
+ if isinstance(value, bool):
+ value = str(value)
+ if not value:
+ continue
+ if not local_facts.has_option(section, key) or local_facts.get(section, key) != value:
+ local_facts.set(section, key, value)
+ changed = True
+
+ if changed and not module.check_mode:
+ try:
+ fact_dir = os.path.dirname(self.filename)
+ if not os.path.exists(fact_dir):
+ os.makedirs(fact_dir)
+ with open(self.filename, 'w') as fact_file:
+ local_facts.write(fact_file)
+ except (IOError, OSError) as e:
+ raise OpenShiftFactsFileWriteError("Could not create fact file: %s, error: %s" % (self.filename, e))
+ self.changed = changed
+
+ role_facts = dict()
+ for section in local_facts.sections():
+ role_facts[section] = dict()
+ for opt, val in local_facts.items(section):
+ role_facts[section][opt] = val
+ return role_facts
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec = dict(
+ role=dict(default='common',
+ choices=OpenShiftFacts.known_roles,
+ required=False),
+ local_facts=dict(default={}, type='dict', required=False),
+ ),
+ supports_check_mode=True,
+ add_file_common_args=True,
+ )
+
+ role = module.params['role']
+ local_facts = module.params['local_facts']
+ fact_file = '/etc/ansible/facts.d/openshift.fact'
+
+ openshift_facts = OpenShiftFacts(role, fact_file, local_facts)
+
+ file_params = module.params.copy()
+ file_params['path'] = fact_file
+ file_args = module.load_file_common_arguments(file_params)
+ changed = module.set_fs_attributes_if_different(file_args,
+ openshift_facts.changed)
+
+ return module.exit_json(changed=changed,
+ ansible_facts=openshift_facts.get_facts())
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.facts import *
+from ansible.module_utils.urls import *
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_facts/meta/main.yml b/roles/openshift_facts/meta/main.yml
new file mode 100644
index 000000000..0be3afd24
--- /dev/null
+++ b/roles/openshift_facts/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: Jason DeTiberus
+ description:
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.8
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies: []
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
new file mode 100644
index 000000000..5a7d10d25
--- /dev/null
+++ b/roles/openshift_facts/tasks/main.yml
@@ -0,0 +1,3 @@
+---
+- name: Gather OpenShift facts
+ openshift_facts:
diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md
index 5a1b889b2..9f9d0a613 100644
--- a/roles/openshift_master/README.md
+++ b/roles/openshift_master/README.md
@@ -13,21 +13,24 @@ Role Variables
--------------
From this role:
-| Name | Default value |
-|
-|------------------------------------------|-----------------------|----------------------------------------|
-| openshift_master_manage_service_externally | False | Should the openshift-master role manage the openshift-master service? |
-| openshift_master_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-master |
-| openshift_node_ips | [] | List of the openshift node ip addresses, that we want to pre-register to the system when openshift-master starts up |
-| openshift_registry_url | UNDEF (Optional) | Default docker registry to use |
+| Name | Default value | |
+|-------------------------------------|-----------------------|--------------------------------------------------|
+| openshift_master_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-master |
+| openshift_node_ips | [] | List of the openshift node ip addresses to pre-register when openshift-master starts up |
+| openshift_registry_url | UNDEF | Default docker registry to use |
+| openshift_master_api_port | UNDEF | |
+| openshift_master_console_port | UNDEF | |
+| openshift_master_api_url | UNDEF | |
+| openshift_master_console_url | UNDEF | |
+| openshift_master_public_api_url | UNDEF | |
+| openshift_master_public_console_url | UNDEF | |
From openshift_common:
-| Name | Default Value | |
-|-------------------------------|---------------------|---------------------|
-| openshift_debug_level | 0 | Global openshift debug log verbosity |
-| openshift_hostname_workaround | True | |
-| openshift_public_ip | UNDEF (Required) | Public IP address to use for this host |
-| openshift_hostname | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance |
+| Name | Default Value | |
+|-------------------------------|----------------|----------------------------------------|
+| openshift_debug_level | 0 | Global openshift debug log verbosity |
+| openshift_public_ip | UNDEF | Public IP address to use for this host |
+| openshift_hostname | UNDEF | hostname to use for this instance |
Dependencies
------------
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index 0159afbb5..87fb347a8 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -1,16 +1,17 @@
---
-openshift_master_manage_service_externally: false
-openshift_master_debug_level: "{{ openshift_debug_level | default(0) }}"
openshift_node_ips: []
+
+# TODO: update setting these values based on the facts
+# TODO: update for console port change
os_firewall_allow:
- service: etcd embedded
port: 4001/tcp
-- service: etcd peer
- port: 7001/tcp
- service: OpenShift api https
port: 8443/tcp
-- service: OpenShift web console https
- port: 8444/tcp
os_firewall_deny:
- service: OpenShift api http
port: 8080/tcp
+- service: former OpenShift web console port
+ port: 8444/tcp
+- service: former etcd peer port
+ port: 7001/tcp
diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml
index 503d08d41..6fd4dfb51 100644
--- a/roles/openshift_master/handlers/main.yml
+++ b/roles/openshift_master/handlers/main.yml
@@ -1,4 +1,3 @@
---
- name: restart openshift-master
service: name=openshift-master state=restarted
- when: not openshift_master_manage_service_externally
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index d5f4776dc..28bdda618 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -1,81 +1,106 @@
---
+# TODO: actually have api_port, api_use_ssl, console_port, console_use_ssl,
+# etcd_use_ssl actually change the master config.
+
+- name: Set master OpenShift facts
+ openshift_facts:
+ role: 'master'
+ local_facts:
+ debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level) }}"
+ api_port: "{{ openshift_master_api_port | default(None) }}"
+ api_url: "{{ openshift_master_api_url | default(None) }}"
+ api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}"
+ public_api_url: "{{ openshift_master_public_api_url | default(None) }}"
+ console_path: "{{ openshift_master_console_path | default(None) }}"
+ console_port: "{{ openshift_master_console_port | default(None) }}"
+ console_url: "{{ openshift_master_console_url | default(None) }}"
+ console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
+ public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
+ etcd_port: "{{ openshift_master_etcd_port | default(None) }}"
+ etcd_use_ssl: "{{ openshift_master_etcd_use_ssl | default(None) }}"
+ portal_net: "{{ openshift_master_portal_net | default(None) }}"
+
+# TODO: These values need to be configurable
+- name: Set dns OpenShift facts
+ openshift_facts:
+ role: 'dns'
+ local_facts:
+ ip: "{{ openshift.common.ip }}"
+ domain: local
+
- name: Install OpenShift Master package
yum: pkg=openshift-master state=installed
+ register: install_result
+
+- name: Reload systemd units
+ command: systemctl daemon-reload
+ when: install_result | changed
+
+- name: Create certificate parent directory if it doesn't exist
+ file:
+ path: "{{ openshift_cert_parent_dir }}"
+ state: directory
+
+- name: Create config parent directory if it doesn't exist
+ file:
+ path: "{{ openshift_master_config | dirname }}"
+ state: directory
+
+# TODO: should probably use a template lookup for this
+# TODO: should allow for setting --etcd, --kubernetes options
+# TODO: recreate config if values change
+- name: Use enterprise default for openshift_registry_url if not set
+ set_fact:
+ openshift_registry_url: "openshift3_beta/ose-${component}:${version}"
+ when: openshift.common.deployment_type == 'enterprise' and openshift_registry_url is not defined
+
+- name: Use online default for openshift_registry_url if not set
+ set_fact:
+ openshift_registry_url: "docker-registry.ops.rhcloud.com/openshift3_beta/ose-${component}:${version}"
+ when: openshift.common.deployment_type == 'online' and openshift_registry_url is not defined
+
+- name: Create master config
+ command: >
+ /usr/bin/openshift start master --write-config
+ --config={{ openshift_master_config }}
+ --portal-net={{ openshift.master.portal_net }}
+ --master={{ openshift.master.api_url }}
+ --public-master={{ openshift.master.public_api_url }}
+ --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://0.0.0.0:{{ openshift.master.api_port }}
+ {{ ('--images=' ~ openshift_registry_url) if (openshift_registry_url | default('', true) != '') else '' }}
+ {{ ('--nodes=' ~ openshift_node_ips | join(',')) if (openshift_node_ips | default('', true) != '') else '' }}
+ args:
+ chdir: "{{ openshift_cert_parent_dir }}"
+ creates: "{{ openshift_master_config }}"
- name: Configure OpenShift settings
lineinfile:
dest: /etc/sysconfig/openshift-master
regexp: '^OPTIONS='
- line: "OPTIONS=\"--public-master={{ openshift_hostname }} {% if
- openshift_node_ips %} --nodes={{ openshift_node_ips
- | join(',') }} {% endif %} --loglevel={{ openshift_master_debug_level }}\""
+ line: "OPTIONS=\"--config={{ openshift_master_config }} --loglevel={{ openshift.master.debug_level }}\""
notify:
- restart openshift-master
-- name: Set default registry url
- lineinfile:
- dest: /etc/sysconfig/openshift-master
- regexp: '^IMAGES='
- line: "IMAGES={{ openshift_registry_url }}"
- when: openshift_registry_url is defined
- notify:
- - restart openshift-master
-
-- name: Set master OpenShift facts
- include: "{{ role_path | dirname }}/openshift_common/tasks/set_facts.yml"
- facts:
- - section: master
- option: debug_level
- value: "{{ openshift_master_debug_level }}"
- - section: master
- option: public_ip
- value: "{{ openshift_public_ip }}"
- - section: master
- option: externally_managed
- value: "{{ openshift_master_manage_service_externally }}"
-
-# TODO: remove this when origin PR #1298 has landed in OSE
-- name: Workaround for openshift-master taking longer than 90 seconds to issue sdNotify signal
- command: cp /usr/lib/systemd/system/openshift-master.service /etc/systemd/system/
- args:
- creates: /etc/systemd/system/openshift-master.service
-- ini_file:
- dest: /etc/systemd/system/openshift-master.service
- option: TimeoutStartSec
- section: Service
- value: 300
- state: present
- register: result
-- command: systemctl daemon-reload
- when: result | changed
-# End of workaround pending PR #1298
-
- name: Start and enable openshift-master
service: name=openshift-master enabled=yes state=started
- when: not openshift_master_manage_service_externally
- register: result
-#TODO: remove this when origin PR #1204 has landed in OSE
-- name: need to pause here, otherwise we attempt to copy certificates generated by the master before they are generated
- pause: seconds=30
- when: result | changed
-# End of workaround pending PR #1204
-
-- name: Disable openshift-master if openshift-master is managed externally
- service: name=openshift-master enabled=false
- when: openshift_master_manage_service_externally
-
-# TODO: create an os_vars role that has generic env related config and move
-# the root kubeconfig setting there, cannot use dependencies to force ordering
-# with openshift_node and openshift_master because the way conditional
-# dependencies work with current ansible would also exclude the
-# openshift_common dependency.
-- name: Create .kube directory
+- name: Create the OpenShift client config dir(s)
file:
- path: /root/.kube
+ path: "~{{ item }}/.config/openshift"
state: directory
mode: 0700
-- name: Configure root user kubeconfig
- command: cp /var/lib/openshift/openshift.local.certificates/admin/.kubeconfig /root/.kube/.kubeconfig
+ owner: "{{ item }}"
+ group: "{{ item }}"
+ with_items:
+ - root
+ - "{{ ansible_ssh_user }}"
+
+# TODO: Update this file if the contents of the source file are not present in
+# the dest file, will need to make sure to ignore things that could be added
+- name: Create the OpenShift client config(s)
+ command: cp {{ openshift_cert_dir }}/openshift-client/.kubeconfig ~{{ item }}/.config/openshift/.config
args:
- creates: /root/.kube/.kubeconfig
+ creates: ~{{ item }}/.config/openshift/.config
+ with_items:
+ - root
+ - "{{ ansible_ssh_user }}"
diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml
index 9a8c4bba2..c52d957ac 100644
--- a/roles/openshift_master/vars/main.yml
+++ b/roles/openshift_master/vars/main.yml
@@ -1,2 +1,5 @@
---
-openshift_host_type: master
+openshift_master_config: /etc/openshift/master.yaml
+openshift_master_ca_dir: "{{ openshift_cert_dir }}/ca"
+openshift_master_ca_cert: "{{ openshift_master_ca_dir }}/cert.crt"
+openshift_master_ca_key: "{{ openshift_master_ca_dir }}/key.key"
diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md
index 9210bab16..83359f164 100644
--- a/roles/openshift_node/README.md
+++ b/roles/openshift_node/README.md
@@ -16,20 +16,15 @@ Role Variables
From this role:
| Name | Default value | |
|------------------------------------------|-----------------------|----------------------------------------|
-| openshift_node_manage_service_externally | False | Should the openshift-node role manage the openshift-node service? |
| openshift_node_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-node |
-| openshift_master_public_ips | UNDEF (Required) | List of the public IPs for the openhift-master hosts |
-| openshift_master_ips | UNDEF (Required) | List of IP addresses for the openshift-master hosts to be used for node -> master communication |
| openshift_registry_url | UNDEF (Optional) | Default docker registry to use |
-| openshift_node_resources | { capacity: { cpu: , memory: } } | Resource specification for this node, cpu is the number of CPUs to advertise and memory is the amount of memory in bytes to advertise. Default values chosen when not set are the number of logical CPUs for the host and 75% of total system memory |
From openshift_common:
| Name | Default Value | |
|-------------------------------|---------------------|---------------------|
| openshift_debug_level | 0 | Global openshift debug log verbosity |
-| openshift_hostname_workaround | True | |
| openshift_public_ip | UNDEF (Required) | Public IP address to use for this host |
-| openshift_hostname | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance |
+| openshift_hostname | UNDEF (Required) | hostname to use for this instance |
Dependencies
------------
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index c45524f16..df7ec41b6 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -1,10 +1,4 @@
---
-openshift_node_manage_service_externally: false
-openshift_node_debug_level: "{{ openshift_debug_level | default(0) }}"
os_firewall_allow:
- service: OpenShift kubelet
port: 10250/tcp
-openshift_node_resources:
- capacity:
- cpu:
- memory:
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index f7aa36d88..ca2992637 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -1,4 +1,4 @@
---
- name: restart openshift-node
service: name=openshift-node state=restarted
- when: not openshift_node_manage_service_externally
+ when: not openshift.common.use_openshift_sdn|bool
diff --git a/roles/openshift_node/library/openshift_register_node.py b/roles/openshift_node/library/openshift_register_node.py
deleted file mode 100644
index 63079e59b..000000000
--- a/roles/openshift_node/library/openshift_register_node.py
+++ /dev/null
@@ -1,211 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-import os
-import multiprocessing
-import socket
-from subprocess import check_output, Popen
-
-DOCUMENTATION = '''
----
-module: openshift_register_node
-short_description: This module registers an openshift-node with an openshift-master
-author: Jason DeTiberus
-requirements: [ openshift-node ]
-notes: Node resources can be specified using either the resources option or the following options: cpu, memory
-options:
- name:
- description:
- - id for this node (usually the node fqdn)
- required: true
- hostIP:
- description:
- - ip address for this node
- required: false
- cpu:
- description:
- - number of CPUs for this node
- required: false
- default: number of logical CPUs detected
- memory:
- description:
- - Memory available for this node in bytes
- required: false
- default: 80% MemTotal
- resources:
- description:
- - A json string representing Node resources
- required: false
-'''
-EXAMPLES = '''
-# Minimal node registration
-- openshift_register_node: name=ose3.node.example.com
-
-# Node registration with all options (using cpu and memory options)
-- openshift_register_node:
- name: ose3.node.example.com
- hostIP: 192.168.1.1
- apiVersion: v1beta1
- cpu: 1
- memory: 1073741824
-
-# Node registration with all options (using resources option)
-- openshift_register_node:
- name: ose3.node.example.com
- hostIP: 192.168.1.1
- apiVersion: v1beta1
- resources:
- capacity:
- cpu: 1
- memory: 1073741824
-'''
-
-def main():
- module = AnsibleModule(
- argument_spec = dict(
- name = dict(required = True),
- hostIP = dict(),
- apiVersion = dict(),
- cpu = dict(),
- memory = dict(),
- resources = dict(),
- client_config = dict(),
- client_cluster = dict(default = 'master'),
- client_context = dict(default = 'master'),
- client_user = dict(default = 'admin')
- ),
- mutually_exclusive = [
- ['resources', 'cpu'],
- ['resources', 'memory']
- ],
- supports_check_mode=True
- )
-
- user_has_client_config = os.path.exists(os.path.expanduser('~/.kube/.kubeconfig'))
- if not (user_has_client_config or module.params['client_config']):
- module.fail_json(msg="Could not locate client configuration, "
- "client_config must be specified if "
- "~/.kube/.kubeconfig is not present")
-
- client_opts = []
- if module.params['client_config']:
- client_opts.append("--kubeconfig=%s" % module.params['client_config'])
-
- try:
- output = check_output(["/usr/bin/openshift", "ex", "config", "view",
- "-o", "json"] + client_opts,
- stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- module.fail_json(msg="Failed to get client configuration",
- command=e.cmd, returncode=e.returncode, output=e.output)
-
- config = json.loads(output)
- if not (bool(config['clusters']) or bool(config['contexts']) or
- bool(config['current-context']) or bool(config['users'])):
- module.fail_json(msg="Client config missing required values",
- output=output)
-
- client_context = module.params['client_context']
- if client_context:
- config_context = next((context for context in config['contexts']
- if context['name'] == client_context), None)
- if not config_context:
- module.fail_json(msg="Context %s not found in client config" %
- client_context)
- if not config['current-context'] or config['current-context'] != client_context:
- client_opts.append("--context=%s" % client_context)
-
- client_user = module.params['client_user']
- if client_user:
- config_user = next((user for user in config['users']
- if user['name'] == client_user), None)
- if not config_user:
- module.fail_json(msg="User %s not found in client config" %
- client_user)
- if client_user != config_context['context']['user']:
- client_opts.append("--user=%s" % client_user)
-
- client_cluster = module.params['client_cluster']
- if client_cluster:
- config_cluster = next((cluster for cluster in config['clusters']
- if cluster['name'] == client_cluster), None)
- if not client_cluster:
- module.fail_json(msg="Cluster %s not found in client config" %
- client_cluster)
- if client_cluster != config_context['context']['cluster']:
- client_opts.append("--cluster=%s" % client_cluster)
-
- node_def = dict(
- id = module.params['name'],
- kind = 'Node',
- apiVersion = 'v1beta1',
- resources = dict(
- capacity = dict()
- )
- )
-
- for key, value in module.params.iteritems():
- if key in ['cpu', 'memory']:
- node_def['resources']['capacity'][key] = value
- elif key == 'name':
- node_def['id'] = value
- elif key != 'client_config':
- if value:
- node_def[key] = value
-
- if not node_def['resources']['capacity']['cpu']:
- node_def['resources']['capacity']['cpu'] = multiprocessing.cpu_count()
-
- if not node_def['resources']['capacity']['memory']:
- with open('/proc/meminfo', 'r') as mem:
- for line in mem:
- entries = line.split()
- if str(entries.pop(0)) == 'MemTotal:':
- mem_total_kb = int(entries.pop(0))
- mem_capacity = int(mem_total_kb * 1024 * .75)
- node_def['resources']['capacity']['memory'] = mem_capacity
- break
-
- try:
- output = check_output(["/usr/bin/osc", "get", "nodes"] + client_opts,
- stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- module.fail_json(msg="Failed to get node list", command=e.cmd,
- returncode=e.returncode, output=e.output)
-
- if re.search(module.params['name'], output, re.MULTILINE):
- module.exit_json(changed=False, node_def=node_def)
- elif module.check_mode:
- module.exit_json(changed=True, node_def=node_def)
-
- config_def = dict(
- metadata = dict(
- name = "add-node-%s" % module.params['name']
- ),
- kind = 'Config',
- apiVersion = 'v1beta1',
- items = [node_def]
- )
-
- p = Popen(["/usr/bin/osc"] + client_opts + ["create", "node"] + ["-f", "-"],
- stdin=subprocess.PIPE, stdout=subprocess.PIPE,
- stderr=subprocess.PIPE, close_fds=True)
- (out, err) = p.communicate(module.jsonify(config_def))
- ret = p.returncode
-
- if ret != 0:
- if re.search("minion \"%s\" already exists" % module.params['name'],
- err):
- module.exit_json(changed=False,
- msg="node definition already exists", config_def=config_def)
- else:
- module.fail_json(msg="Node creation failed.", ret=ret, out=out,
- err=err, config_def=config_def)
-
- module.exit_json(changed=True, out=out, err=err, ret=ret,
- node_def=config_def)
-
-# import module snippets
-from ansible.module_utils.basic import *
-main()
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 6721c7401..3d56bdd67 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -1,78 +1,50 @@
---
+# TODO: allow for overriding default ports where possible
+# TODO: trigger the external service when restart is needed
+
+- name: Set node OpenShift facts
+ openshift_facts:
+ role: 'node'
+ local_facts:
+ debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}"
+
+- name: Test if node certs and config exist
+ stat: path={{ item }}
+ failed_when: not result.stat.exists
+ register: result
+ with_items:
+ - "{{ openshift_node_cert_dir }}"
+ - "{{ openshift_node_cert_dir }}/ca.crt"
+ - "{{ openshift_node_cert_dir }}/client.crt"
+ - "{{ openshift_node_cert_dir }}/client.key"
+ - "{{ openshift_node_cert_dir }}/.kubeconfig"
+ - "{{ openshift_node_cert_dir }}/node-config.yaml"
+ - "{{ openshift_node_cert_dir }}/server.crt"
+ - "{{ openshift_node_cert_dir }}/server.key"
+
- name: Install OpenShift Node package
yum: pkg=openshift-node state=installed
+ register: install_result
-- local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: mktemp
-
-- name: Retrieve OpenShift Master credentials
- local_action: command /usr/bin/rsync --compress --archive --rsh 'ssh -S none -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' root@{{ openshift_master_public_ips[0] }}:/var/lib/openshift/openshift.local.certificates/admin/ {{ mktemp.stdout }}
- ignore_errors: yes
-
-- file: path=/var/lib/openshift/openshift.local.certificates/admin state=directory
-
-- name: Store OpenShift Master credentials
- local_action: command /usr/bin/rsync --compress --archive --rsh 'ssh -S none -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' {{ mktemp.stdout }}/ root@{{ openshift_public_ip }}:/var/lib/openshift/openshift.local.certificates/admin
- ignore_errors: yes
-
-- local_action: file name={{ mktemp.stdout }} state=absent
+- name: Reload systemd units
+ command: systemctl daemon-reload
+ when: install_result | changed
+# --create-certs=false is a temporary workaround until
+# https://github.com/openshift/origin/pull/1361 is merged upstream and it is
+# the default for nodes
- name: Configure OpenShift Node settings
lineinfile:
dest: /etc/sysconfig/openshift-node
regexp: '^OPTIONS='
- line: "OPTIONS=\"--master=https://{{ openshift_master_ips[0] }}:8443 --loglevel={{ openshift_node_debug_level }}\""
+ line: "OPTIONS=\"--loglevel={{ openshift.node.debug_level }} --config={{ openshift_node_cert_dir }}/node-config.yaml\""
notify:
- restart openshift-node
-- name: Set default registry url
- lineinfile:
- dest: /etc/sysconfig/openshift-node
- regexp: '^IMAGES='
- line: "IMAGES={{ openshift_registry_url }}"
- when: openshift_registry_url is defined
- notify:
- - restart openshift-node
-
-- name: Set OpenShift node facts
- include: "{{ role_path | dirname }}/openshift_common/tasks/set_facts.yml"
- facts:
- - section: node
- option: debug_level
- value: "{{ openshift_node_debug_level }}"
- - section: node
- option: public_ip
- value: "{{ openshift_public_ip }}"
- - section: node
- option: externally_managed
- value: "{{ openshift_node_manage_service_externally }}"
-
-# fixme: Once the openshift_cluster playbook is published state should be started
-# Always bounce service to pick up new credentials
- name: Start and enable openshift-node
- service: name=openshift-node enabled=yes state=restarted
- when: not openshift_node_manage_service_externally
+ service: name=openshift-node enabled=yes state=started
+ when: not openshift.common.use_openshift_sdn|bool
- name: Disable openshift-node if openshift-node is managed externally
service: name=openshift-node enabled=false
- when: openshift_node_manage_service_externally
-
-# TODO: create an os_vars role that has generic env related config and move
-# the root kubeconfig setting there, cannot use dependencies to force ordering
-# with openshift_node and openshift_master because the way conditional
-# dependencies work with current ansible would also exclude the
-# openshift_common dependency.
-- name: Create .kube directory
- file:
- path: /root/.kube
- state: directory
- mode: 0700
-- name: Configure root user kubeconfig
- command: cp /var/lib/openshift/openshift.local.certificates/admin/.kubeconfig /root/.kube/.kubeconfig
- args:
- creates: /root/.kube/.kubeconfig
-
-- name: Register node (if not already registered)
- openshift_register_node:
- name: "{{ openshift_hostname }}"
- resources: "{{ openshift_node_resources }}"
+ when: openshift.common.use_openshift_sdn|bool
diff --git a/roles/openshift_node/vars/main.yml b/roles/openshift_node/vars/main.yml
index 9841d52f9..c6be83139 100644
--- a/roles/openshift_node/vars/main.yml
+++ b/roles/openshift_node/vars/main.yml
@@ -1,2 +1,2 @@
---
-openshift_host_type: node
+openshift_node_cert_dir: /etc/openshift/node
diff --git a/roles/openshift_register_nodes/README.md b/roles/openshift_register_nodes/README.md
new file mode 100644
index 000000000..b96faa044
--- /dev/null
+++ b/roles/openshift_register_nodes/README.md
@@ -0,0 +1,34 @@
+OpenShift Register Nodes
+========================
+
+TODO
+
+Requirements
+------------
+
+TODO
+
+Role Variables
+--------------
+
+TODO
+
+Dependencies
+------------
+
+TODO
+
+Example Playbook
+----------------
+
+TODO
+
+License
+-------
+
+Apache License Version 2.0
+
+Author Information
+------------------
+
+Jason DeTiberus (jdetiber@redhat.com)
diff --git a/roles/openshift_register_nodes/defaults/main.yml b/roles/openshift_register_nodes/defaults/main.yml
new file mode 100644
index 000000000..a0befab44
--- /dev/null
+++ b/roles/openshift_register_nodes/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+openshift_kube_api_version: v1beta1
diff --git a/roles/openshift_register_nodes/library/kubernetes_register_node.py b/roles/openshift_register_nodes/library/kubernetes_register_node.py
new file mode 100755
index 000000000..afa9eb27d
--- /dev/null
+++ b/roles/openshift_register_nodes/library/kubernetes_register_node.py
@@ -0,0 +1,601 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+#
+# disable pylint checks
+# temporarily disabled until items can be addressed:
+# fixme - until all TODO comments have been addressed
+# permanently disabled unless someone wants to refactor the object model:
+# too-few-public-methods
+# no-self-use
+# too-many-arguments
+# too-many-locals
+# too-many-branches
+# pylint:disable=fixme, too-many-arguments, no-self-use
+# pylint:disable=too-many-locals, too-many-branches, too-few-public-methods
+"""Ansible module to register a kubernetes node to the cluster"""
+
+import os
+
+DOCUMENTATION = '''
+---
+module: kubernetes_register_node
+short_description: Registers a kubernetes node with a master
+description:
+ - Registers a kubernetes node with a master
+options:
+ name:
+ default: null
+ description:
+ - Identifier for this node (usually the node fqdn).
+ required: true
+ api_verison:
+ choices: ['v1beta1', 'v1beta3']
+ default: 'v1beta1'
+ description:
+ - Kubernetes API version to use
+ required: true
+ host_ip:
+ default: null
+ description:
+ - IP Address to associate with the node when registering.
+ Available in the following API versions: v1beta1.
+ required: false
+ hostnames:
+ default: []
+ description:
+ - Valid hostnames for this node. Available in the following API
+ versions: v1beta3.
+ required: false
+ external_ips:
+ default: []
+ description:
+ - External IP Addresses for this node. Available in the following API
+ versions: v1beta3.
+ required: false
+ internal_ips:
+ default: []
+ description:
+ - Internal IP Addresses for this node. Available in the following API
+ versions: v1beta3.
+ required: false
+ cpu:
+ default: null
+ description:
+ - Number of CPUs to allocate for this node. When using the v1beta1
+ API, you must specify the CPU count as a floating point number
+ with no more than 3 decimal places. API version v1beta3 and newer
+ accepts arbitrary float values.
+ required: false
+ memory:
+ default: null
+ description:
+ - Memory available for this node. When using the v1beta1 API, you
+ must specify the memory size in bytes. API version v1beta3 and
+ newer accepts binary SI and decimal SI values.
+ required: false
+'''
+EXAMPLES = '''
+# Minimal node registration
+- openshift_register_node: name=ose3.node.example.com
+
+# Node registration using the v1beta1 API and assigning 1 CPU core and 10 GB of
+# Memory
+- openshift_register_node:
+ name: ose3.node.example.com
+ api_version: v1beta1
+ hostIP: 192.168.1.1
+ cpu: 1
+ memory: 500000000
+
+# Node registration using the v1beta3 API, setting an alternate hostname,
+# internalIP, externalIP and assigning 3.5 CPU cores and 1 TiB of Memory
+- openshift_register_node:
+ name: ose3.node.example.com
+ api_version: v1beta3
+ external_ips: ['192.168.1.5']
+ internal_ips: ['10.0.0.5']
+ hostnames: ['ose2.node.internal.local']
+ cpu: 3.5
+ memory: 1Ti
+'''
+
+
+class ClientConfigException(Exception):
+ """Client Configuration Exception"""
+ pass
+
+class ClientConfig(object):
+ """ Representation of a client config
+
+ Attributes:
+ config (dict): dictionary representing the client configuration
+
+ Args:
+ client_opts (list of str): client options to use
+ module (AnsibleModule):
+
+ Raises:
+ ClientConfigException:
+ """
+ def __init__(self, client_opts, module):
+ kubectl = module.params['kubectl_cmd']
+ _, output, _ = module.run_command((kubectl +
+ ["config", "view", "-o", "json"] +
+ client_opts), check_rc=True)
+ self.config = json.loads(output)
+
+ if not (bool(self.config['clusters']) or
+ bool(self.config['contexts']) or
+ bool(self.config['current-context']) or
+ bool(self.config['users'])):
+ raise ClientConfigException(
+ "Client config missing required values: %s" % output
+ )
+
+ def current_context(self):
+ """ Gets the current context for the client config
+
+ Returns:
+ str: The current context as set in the config
+ """
+ return self.config['current-context']
+
+ def section_has_value(self, section_name, value):
+ """ Test if specified section contains a value
+
+ Args:
+ section_name (str): config section to test
+ value (str): value to test if present
+ Returns:
+ bool: True if successful, false otherwise
+ """
+ section = self.config[section_name]
+ if isinstance(section, dict):
+ return value in section
+ else:
+ val = next((item for item in section
+ if item['name'] == value), None)
+ return val is not None
+
+ def has_context(self, context):
+ """ Test if specified context exists in config
+
+ Args:
+ context (str): value to test if present
+ Returns:
+ bool: True if successful, false otherwise
+ """
+ return self.section_has_value('contexts', context)
+
+ def has_user(self, user):
+ """ Test if specified user exists in config
+
+ Args:
+ context (str): value to test if present
+ Returns:
+ bool: True if successful, false otherwise
+ """
+ return self.section_has_value('users', user)
+
+ def has_cluster(self, cluster):
+ """ Test if specified cluster exists in config
+
+ Args:
+ context (str): value to test if present
+ Returns:
+ bool: True if successful, false otherwise
+ """
+ return self.section_has_value('clusters', cluster)
+
+ def get_value_for_context(self, context, attribute):
+ """ Get the value of attribute in context
+
+ Args:
+ context (str): context to search
+ attribute (str): attribute wanted
+ Returns:
+ str: The value for attribute in context
+ """
+ contexts = self.config['contexts']
+ if isinstance(contexts, dict):
+ return contexts[context][attribute]
+ else:
+ return next((c['context'][attribute] for c in contexts
+ if c['name'] == context), None)
+
+ def get_user_for_context(self, context):
+ """ Get the user attribute in context
+
+ Args:
+ context (str): context to search
+ Returns:
+ str: The value for the attribute in context
+ """
+ return self.get_value_for_context(context, 'user')
+
+ def get_cluster_for_context(self, context):
+ """ Get the cluster attribute in context
+
+ Args:
+ context (str): context to search
+ Returns:
+ str: The value for the attribute in context
+ """
+ return self.get_value_for_context(context, 'cluster')
+
+ def get_namespace_for_context(self, context):
+ """ Get the namespace attribute in context
+
+ Args:
+ context (str): context to search
+ Returns:
+ str: The value for the attribute in context
+ """
+ return self.get_value_for_context(context, 'namespace')
+
+class Util(object):
+ """Utility methods"""
+ @staticmethod
+ def remove_empty_elements(mapping):
+ """ Recursively removes empty elements from a dict
+
+ Args:
+ mapping (dict): dict to remove empty attributes from
+ Returns:
+ dict: A copy of the dict with empty elements removed
+ """
+ if isinstance(mapping, dict):
+ copy = mapping.copy()
+ for key, val in mapping.iteritems():
+ if not val:
+ del copy[key]
+ return copy
+ else:
+ return mapping
+
+class NodeResources(object):
+ """ Kubernetes Node Resources
+
+ Attributes:
+ resources (dict): A dictionary representing the node resources
+
+ Args:
+ version (str): kubernetes api version
+ cpu (str): string representation of the cpu resources for the node
+ memory (str): string representation of the memory resources for the
+ node
+ """
+ def __init__(self, version, cpu=None, memory=None):
+ if version == 'v1beta1':
+ self.resources = dict(capacity=dict())
+ self.resources['capacity']['cpu'] = cpu
+ self.resources['capacity']['memory'] = memory
+
+ def get_resources(self):
+ """ Get the dict representing the node resources
+
+ Returns:
+ dict: representation of the node resources with any empty
+ elements removed
+ """
+ return Util.remove_empty_elements(self.resources)
+
+class NodeSpec(object):
+ """ Kubernetes Node Spec
+
+ Attributes:
+ spec (dict): A dictionary representing the node resources
+
+ Args:
+ version (str): kubernetes api version
+ cpu (str): string representation of the cpu resources for the node
+ memory (str): string representation of the memory resources for the
+ node
+ cidr (str): string representation of the cidr block available for
+ the node
+ externalID (str): The external id of the node
+ """
+ def __init__(self, version, cpu=None, memory=None, cidr=None,
+ externalID=None):
+ if version == 'v1beta3':
+ self.spec = dict(podCIDR=cidr, externalID=externalID,
+ capacity=dict())
+ self.spec['capacity']['cpu'] = cpu
+ self.spec['capacity']['memory'] = memory
+
+ def get_spec(self):
+ """ Get the dict representing the node spec
+
+ Returns:
+ dict: representation of the node spec with any empty elements
+ removed
+ """
+ return Util.remove_empty_elements(self.spec)
+
+class NodeStatus(object):
+ """ Kubernetes Node Status
+
+ Attributes:
+ status (dict): A dictionary representing the node status
+
+ Args:
+ version (str): kubernetes api version
+ externalIPs (list, optional): externalIPs for the node
+ internalIPs (list, optional): internalIPs for the node
+ hostnames (list, optional): hostnames for the node
+ """
+ def add_addresses(self, address_type, addresses):
+ """ Adds addresses of the specified type
+
+ Args:
+ address_type (str): address type
+ addresses (list): addresses to add
+ """
+ address_list = []
+ for address in addresses:
+ address_list.append(dict(type=address_type, address=address))
+ return address_list
+
+ def __init__(self, version, externalIPs=None, internalIPs=None,
+ hostnames=None):
+ if version == 'v1beta3':
+ addresses = []
+ if externalIPs is not None:
+ addresses += self.add_addresses('ExternalIP', externalIPs)
+ if internalIPs is not None:
+ addresses += self.add_addresses('InternalIP', internalIPs)
+ if hostnames is not None:
+ addresses += self.add_addresses('Hostname', hostnames)
+
+ self.status = dict(addresses=addresses)
+
+ def get_status(self):
+ """ Get the dict representing the node status
+
+ Returns:
+ dict: representation of the node status with any empty elements
+ removed
+ """
+ return Util.remove_empty_elements(self.status)
+
+class Node(object):
+ """ Kubernetes Node
+
+ Attributes:
+ status (dict): A dictionary representing the node
+
+ Args:
+ module (AnsibleModule):
+ client_opts (list): client connection options
+ version (str, optional): kubernetes api version
+ node_name (str, optional): name for node
+ hostIP (str, optional): node host ip
+ hostnames (list, optional): hostnames for the node
+ externalIPs (list, optional): externalIPs for the node
+ internalIPs (list, optional): internalIPs for the node
+ cpu (str, optional): cpu resources for the node
+ memory (str, optional): memory resources for the node
+ labels (list, optional): labels for the node
+ annotations (list, optional): annotations for the node
+ podCIDR (list, optional): cidr block to use for pods
+ externalID (str, optional): external id of the node
+ """
+ def __init__(self, module, client_opts, version='v1beta1', node_name=None,
+ hostIP=None, hostnames=None, externalIPs=None,
+ internalIPs=None, cpu=None, memory=None, labels=None,
+ annotations=None, podCIDR=None, externalID=None):
+ self.module = module
+ self.client_opts = client_opts
+ if version == 'v1beta1':
+ self.node = dict(id=node_name,
+ kind='Node',
+ apiVersion=version,
+ hostIP=hostIP,
+ resources=NodeResources(version, cpu, memory),
+ cidr=podCIDR,
+ labels=labels,
+ annotations=annotations,
+ externalID=externalID)
+ elif version == 'v1beta3':
+ metadata = dict(name=node_name,
+ labels=labels,
+ annotations=annotations)
+ self.node = dict(kind='Node',
+ apiVersion=version,
+ metadata=metadata,
+ spec=NodeSpec(version, cpu, memory, podCIDR,
+ externalID),
+ status=NodeStatus(version, externalIPs,
+ internalIPs, hostnames))
+
+ def get_name(self):
+ """ Get the name for the node
+
+ Returns:
+ str: node name
+ """
+ if self.node['apiVersion'] == 'v1beta1':
+ return self.node['id']
+ elif self.node['apiVersion'] == 'v1beta3':
+ return self.node['name']
+
+ def get_node(self):
+ """ Get the dict representing the node
+
+ Returns:
+ dict: representation of the node with any empty elements
+ removed
+ """
+ node = self.node.copy()
+ if self.node['apiVersion'] == 'v1beta1':
+ node['resources'] = self.node['resources'].get_resources()
+ elif self.node['apiVersion'] == 'v1beta3':
+ node['spec'] = self.node['spec'].get_spec()
+ node['status'] = self.node['status'].get_status()
+ return Util.remove_empty_elements(node)
+
+ def exists(self):
+ """ Tests if the node already exists
+
+ Returns:
+ bool: True if node exists, otherwise False
+ """
+ kubectl = self.module.params['kubectl_cmd']
+ _, output, _ = self.module.run_command((kubectl + ["get", "nodes"] +
+ self.client_opts),
+ check_rc=True)
+ if re.search(self.module.params['name'], output, re.MULTILINE):
+ return True
+ return False
+
+ def create(self):
+ """ Creates the node
+
+ Returns:
+ bool: True if node creation successful
+ """
+ kubectl = self.module.params['kubectl_cmd']
+ cmd = kubectl + self.client_opts + ['create', '-f', '-']
+ exit_code, output, error = self.module.run_command(
+ cmd, data=self.module.jsonify(self.get_node())
+ )
+ if exit_code != 0:
+ if re.search("minion \"%s\" already exists" % self.get_name(),
+ error):
+ self.module.exit_json(msg="node definition already exists",
+ changed=False, node=self.get_node())
+ else:
+ self.module.fail_json(msg="Node creation failed.",
+ exit_code=exit_code,
+ output=output, error=error,
+ node=self.get_node())
+ else:
+ return True
+
+def main():
+ """ main """
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, type='str'),
+ host_ip=dict(type='str'),
+ hostnames=dict(type='list', default=[]),
+ external_ips=dict(type='list', default=[]),
+ internal_ips=dict(type='list', default=[]),
+ api_version=dict(type='str', default='v1beta1',
+ choices=['v1beta1', 'v1beta3']),
+ cpu=dict(type='str'),
+ memory=dict(type='str'),
+ # TODO: needs documented
+ labels=dict(type='dict', default={}),
+ # TODO: needs documented
+ annotations=dict(type='dict', default={}),
+ # TODO: needs documented
+ pod_cidr=dict(type='str'),
+ # TODO: needs documented
+ external_id=dict(type='str'),
+ # TODO: needs documented
+ client_config=dict(type='str'),
+ # TODO: needs documented
+ client_cluster=dict(type='str', default='master'),
+ # TODO: needs documented
+ client_context=dict(type='str', default='default'),
+ # TODO: needs documented
+ client_namespace=dict(type='str', default='default'),
+ # TODO: needs documented
+ client_user=dict(type='str', default='system:openshift-client'),
+ # TODO: needs documented
+ kubectl_cmd=dict(type='list', default=['kubectl']),
+ # TODO: needs documented
+ kubeconfig_flag=dict(type='str'),
+ # TODO: needs documented
+ default_client_config=dict(type='str')
+ ),
+ mutually_exclusive=[
+ ['host_ip', 'external_ips'],
+ ['host_ip', 'internal_ips'],
+ ['host_ip', 'hostnames'],
+ ],
+ supports_check_mode=True
+ )
+
+ client_config = '~/.kube/.kubeconfig'
+ if 'default_client_config' in module.params:
+ client_config = module.params['default_client_config']
+ user_has_client_config = os.path.exists(os.path.expanduser(client_config))
+ if not (user_has_client_config or module.params['client_config']):
+ module.fail_json(msg="Could not locate client configuration, "
+ "client_config must be specified if "
+ "~/.kube/.kubeconfig is not present")
+
+ client_opts = []
+ if module.params['client_config']:
+ kubeconfig_flag = '--kubeconfig'
+ if 'kubeconfig_flag' in module.params:
+ kubeconfig_flag = module.params['kubeconfig_flag']
+ client_opts.append(kubeconfig_flag + '=' +
+ os.path.expanduser(module.params['client_config']))
+
+ try:
+ config = ClientConfig(client_opts, module)
+ except ClientConfigException as ex:
+ module.fail_json(msg="Failed to get client configuration",
+ exception=str(ex))
+
+ client_context = module.params['client_context']
+ if config.has_context(client_context):
+ if client_context != config.current_context():
+ client_opts.append("--context=%s" % client_context)
+ else:
+ module.fail_json(msg="Context %s not found in client config" %
+ client_context)
+
+ client_user = module.params['client_user']
+ if config.has_user(client_user):
+ if client_user != config.get_user_for_context(client_context):
+ client_opts.append("--user=%s" % client_user)
+ else:
+ module.fail_json(msg="User %s not found in client config" %
+ client_user)
+
+ client_cluster = module.params['client_cluster']
+ if config.has_cluster(client_cluster):
+ if client_cluster != config.get_cluster_for_context(client_context):
+ client_opts.append("--cluster=%s" % client_cluster)
+ else:
+ module.fail_json(msg="Cluster %s not found in client config" %
+ client_cluster)
+
+ client_namespace = module.params['client_namespace']
+ if client_namespace != config.get_namespace_for_context(client_context):
+ client_opts.append("--namespace=%s" % client_namespace)
+
+ node = Node(module, client_opts, module.params['api_version'],
+ module.params['name'], module.params['host_ip'],
+ module.params['hostnames'], module.params['external_ips'],
+ module.params['internal_ips'], module.params['cpu'],
+ module.params['memory'], module.params['labels'],
+ module.params['annotations'], module.params['pod_cidr'],
+ module.params['external_id'])
+
+ # TODO: attempt to support changing node settings where possible and/or
+ # modifying node resources
+ if node.exists():
+ module.exit_json(changed=False, node=node.get_node())
+ elif module.check_mode:
+ module.exit_json(changed=True, node=node.get_node())
+ else:
+ if node.create():
+ module.exit_json(changed=True,
+ msg="Node created successfully",
+ node=node.get_node())
+ else:
+ module.fail_json(msg="Unknown error creating node",
+ node=node.get_node())
+
+# ignore pylint errors related to the module_utils import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_register_nodes/meta/main.yml b/roles/openshift_register_nodes/meta/main.yml
new file mode 100644
index 000000000..e40a152c1
--- /dev/null
+++ b/roles/openshift_register_nodes/meta/main.yml
@@ -0,0 +1,17 @@
+---
+galaxy_info:
+ author: Jason DeTiberus
+ description:
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.8
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies:
+- { role: openshift_facts }
+
diff --git a/roles/openshift_register_nodes/tasks/main.yml b/roles/openshift_register_nodes/tasks/main.yml
new file mode 100644
index 000000000..d4d72d126
--- /dev/null
+++ b/roles/openshift_register_nodes/tasks/main.yml
@@ -0,0 +1,56 @@
+---
+# TODO: support new create-config command to generate node certs and config
+# TODO: recreate master/node configs if settings that affect the configs
+# change (hostname, public_hostname, ip, public_ip, etc)
+
+
+# TODO: use a template lookup here
+# TODO: create a failed_when condition
+- name: Use enterprise default for openshift_registry_url if not set
+ set_fact:
+ openshift_registry_url: "openshift3_beta/ose-${component}:${version}"
+ when: openshift.common.deployment_type == 'enterprise' and openshift_registry_url is not defined
+
+- name: Use online default for openshift_registry_url if not set
+ set_fact:
+ openshift_registry_url: "docker-registry.ops.rhcloud.com/openshift3_beta/ose-${component}:${version}"
+ when: openshift.common.deployment_type == 'online' and openshift_registry_url is not defined
+
+- name: Create node config
+ command: >
+ /usr/bin/openshift admin create-node-config
+ --node-dir={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}
+ --node={{ item.openshift.common.hostname }}
+ --hostnames={{ [item.openshift.common.hostname, item.openshift.common.public_hostname]|unique|join(",") }}
+ --dns-domain={{ openshift.dns.domain }}
+ --dns-ip={{ openshift.dns.ip }}
+ --master={{ openshift.master.api_url }}
+ --signer-key={{ openshift_master_ca_key }}
+ --signer-cert={{ openshift_master_ca_cert }}
+ --certificate-authority={{ openshift_master_ca_cert }}
+ --signer-serial={{ openshift_master_ca_dir }}/serial.txt
+ --node-client-certificate-authority={{ openshift_master_ca_cert }}
+ {{ ('--images=' ~ openshift_registry_url) if openshift_registry_url is defined else '' }}
+ --listen=https://0.0.0.0:10250
+ args:
+ chdir: "{{ openshift_cert_parent_dir }}"
+ creates: "{{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}"
+ with_items: openshift_nodes
+
+- name: Register unregistered nodes
+ kubernetes_register_node:
+ kubectl_cmd: ['osc']
+ default_client_config: '~/.config/openshift/.config'
+ name: "{{ item.openshift.common.hostname }}"
+ api_version: "{{ openshift_kube_api_version }}"
+ cpu: "{{ item.openshift.node.resources_cpu | default(None) }}"
+ memory: "{{ item.openshift.node.resources_memory | default(None) }}"
+ pod_cidr: "{{ item.openshift.node.pod_cidr | default(None) }}"
+ host_ip: "{{ item.openshift.common.ip }}"
+ labels: "{{ item.openshift.node.labels | default({}) }}"
+ annotations: "{{ item.openshift.node.annotations | default({}) }}"
+ external_id: "{{ item.openshift.node.external_id }}"
+ # TODO: support customizing other attributes such as: client_config,
+ # client_cluster, client_context, client_user
+ with_items: openshift_nodes
+ register: register_result
diff --git a/roles/openshift_register_nodes/vars/main.yml b/roles/openshift_register_nodes/vars/main.yml
new file mode 100644
index 000000000..bd497f08f
--- /dev/null
+++ b/roles/openshift_register_nodes/vars/main.yml
@@ -0,0 +1,7 @@
+---
+openshift_cert_parent_dir: /var/lib/openshift
+openshift_cert_relative_dir: openshift.local.certificates
+openshift_cert_dir: "{{ openshift_cert_parent_dir }}/{{ openshift_cert_relative_dir }}"
+openshift_master_ca_dir: "{{ openshift_cert_dir }}/ca"
+openshift_master_ca_cert: "{{ openshift_master_ca_dir }}/cert.crt"
+openshift_master_ca_key: "{{ openshift_master_ca_dir }}/key.key"
diff --git a/roles/openshift_repos/README.md b/roles/openshift_repos/README.md
new file mode 100644
index 000000000..6bbedd839
--- /dev/null
+++ b/roles/openshift_repos/README.md
@@ -0,0 +1,38 @@
+OpenShift Repos
+================
+
+Configures repositories for an OpenShift installation
+
+Requirements
+------------
+
+A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms,
+rhel-7-server-extra-rpms, and rhel-7-server-ose-beta-rpms repos.
+
+Role Variables
+--------------
+
+| Name | Default value | |
+|-------------------------------|---------------|----------------------------------------------|
+| openshift_deployment_type | None | Possible values enterprise, origin, online |
+| openshift_additional_repos | {} | TODO |
+
+Dependencies
+------------
+
+None.
+
+Example Playbook
+----------------
+
+TODO
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+TODO
diff --git a/roles/openshift_repos/defaults/main.yaml b/roles/openshift_repos/defaults/main.yaml
new file mode 100644
index 000000000..7c5a14cd7
--- /dev/null
+++ b/roles/openshift_repos/defaults/main.yaml
@@ -0,0 +1,2 @@
+---
+openshift_additional_repos: {}
diff --git a/roles/openshift_repos/files/online/repos/enterprise-v3.repo b/roles/openshift_repos/files/online/repos/enterprise-v3.repo
new file mode 100644
index 000000000..d324c142a
--- /dev/null
+++ b/roles/openshift_repos/files/online/repos/enterprise-v3.repo
@@ -0,0 +1,10 @@
+[enterprise-v3]
+name=OpenShift Enterprise Beta3
+baseurl=https://gce-mirror1.ops.rhcloud.com/libra/libra-7-ose-beta3/
+ https://mirror.ops.rhcloud.com/libra/libra-7-ose-beta3/
+enabled=1
+gpgcheck=0
+failovermethod=priority
+sslverify=False
+sslclientcert=/var/lib/yum/client-cert.pem
+sslclientkey=/var/lib/yum/client-key.pem \ No newline at end of file
diff --git a/roles/repos/files/online/rhel-7-libra-candidate.repo b/roles/openshift_repos/files/online/repos/rhel-7-libra-candidate.repo
index b4215679f..b4215679f 100644
--- a/roles/repos/files/online/rhel-7-libra-candidate.repo
+++ b/roles/openshift_repos/files/online/repos/rhel-7-libra-candidate.repo
diff --git a/roles/openshift_repos/files/origin/repos/maxamillion-origin-next-epel-7.repo b/roles/openshift_repos/files/origin/repos/maxamillion-origin-next-epel-7.repo
new file mode 100644
index 000000000..0b21e0a65
--- /dev/null
+++ b/roles/openshift_repos/files/origin/repos/maxamillion-origin-next-epel-7.repo
@@ -0,0 +1,7 @@
+[maxamillion-origin-next]
+name=Copr repo for origin-next owned by maxamillion
+baseurl=https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/
+skip_if_unavailable=True
+gpgcheck=1
+gpgkey=https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg
+enabled=1
diff --git a/roles/openshift_repos/files/removed/repos/epel7-openshift.repo b/roles/openshift_repos/files/removed/repos/epel7-openshift.repo
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/roles/openshift_repos/files/removed/repos/epel7-openshift.repo
diff --git a/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-extras.repo b/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-extras.repo
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-extras.repo
diff --git a/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-server.repo b/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-server.repo
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-server.repo
diff --git a/roles/openshift_repos/meta/main.yml b/roles/openshift_repos/meta/main.yml
new file mode 100644
index 000000000..0558b822c
--- /dev/null
+++ b/roles/openshift_repos/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: TODO
+ description: OpenShift Repositories
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.7
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- { role: openshift_facts }
diff --git a/roles/repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index 43786da41..12e98b7a1 100644
--- a/roles/repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -1,12 +1,19 @@
---
# TODO: Add flag for enabling EPEL repo, default to false
+# TODO: Add subscription-management config, with parameters
+# for username, password, poolid(name), and official repos to
+# enable/disable. Might need to make a module that extends the
+# subscription management module to take a poolid and enable/disable the
+# proper repos correctly.
+
- assert:
that: openshift_deployment_type in known_openshift_deployment_types
-# TODO: remove this when origin support actually works
-- fail: msg="OpenShift Origin support is not currently enabled"
- when: openshift_deployment_type == 'origin'
+- name: Ensure libselinux-python is installed
+ yum:
+ pkg: libselinux-python
+ state: present
- name: Create any additional repos that are defined
template:
@@ -25,17 +32,15 @@
path: "/etc/yum.repos.d/{{ item | basename }}"
state: absent
with_fileglob:
- - '*/*'
- when: not (item | search("/files/" + openshift_deployment_type + "/")) and (item | search(".repo$"))
+ - '*/repos/*'
+ when: not (item | search("/files/" ~ openshift_deployment_type ~ "/repos"))
- name: Configure gpg keys if needed
copy: src={{ item }} dest=/etc/pki/rpm-gpg/
with_fileglob:
- - "{{ openshift_deployment_type }}/*"
- when: item | basename | match("RPM-GPG-KEY-")
+ - "{{ openshift_deployment_type }}/gpg_keys/*"
- name: Configure yum repositories
copy: src={{ item }} dest=/etc/yum.repos.d/
with_fileglob:
- - "{{ openshift_deployment_type }}/*"
- when: item | basename | search(".*\.repo$")
+ - "{{ openshift_deployment_type }}/repos/*"
diff --git a/roles/repos/templates/yum_repo.j2 b/roles/openshift_repos/templates/yum_repo.j2
index 7ea2c7460..2d9243545 100644
--- a/roles/repos/templates/yum_repo.j2
+++ b/roles/openshift_repos/templates/yum_repo.j2
@@ -1,4 +1,3 @@
-# {{ ansible_managed }}
{% for repo in openshift_additional_repos %}
[{{ repo.id }}]
name={{ repo.name | default(repo.id) }}
diff --git a/roles/repos/vars/main.yml b/roles/openshift_repos/vars/main.yml
index bbb4c77e7..bbb4c77e7 100644
--- a/roles/repos/vars/main.yml
+++ b/roles/openshift_repos/vars/main.yml
diff --git a/roles/openshift_sdn_master/defaults/main.yml b/roles/openshift_sdn_master/defaults/main.yml
deleted file mode 100644
index da7655546..000000000
--- a/roles/openshift_sdn_master/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-openshift_sdn_master_debug_level: "{{ openshift_debug_level | default(0) }}"
diff --git a/roles/openshift_sdn_master/meta/main.yml b/roles/openshift_sdn_master/meta/main.yml
index e6e5514d1..5de32cc13 100644
--- a/roles/openshift_sdn_master/meta/main.yml
+++ b/roles/openshift_sdn_master/meta/main.yml
@@ -11,4 +11,5 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies: []
+dependencies:
+- { role: openshift_common }
diff --git a/roles/openshift_sdn_master/tasks/main.yml b/roles/openshift_sdn_master/tasks/main.yml
index e1761afdc..77e7a80ba 100644
--- a/roles/openshift_sdn_master/tasks/main.yml
+++ b/roles/openshift_sdn_master/tasks/main.yml
@@ -1,24 +1,35 @@
---
+# TODO: add task to set the sdn subnet if openshift-sdn-master hasn't been
+# started yet
+
+- name: Set master sdn OpenShift facts
+ openshift_facts:
+ role: 'master_sdn'
+ local_facts:
+ debug_level: "{{ openshift_master_sdn_debug_level | default(openshift.common.debug_level) }}"
+
- name: Install openshift-sdn-master
yum:
pkg: openshift-sdn-master
state: installed
+ register: install_result
+- name: Reload systemd units
+ command: systemctl daemon-reload
+ when: install_result | changed
+
+# TODO: we should probably generate certs specifically for sdn
- name: Configure openshift-sdn-master settings
lineinfile:
dest: /etc/sysconfig/openshift-sdn-master
regexp: '^OPTIONS='
- line: "OPTIONS=\"-v={{ openshift_sdn_master_debug_level }}\""
+ line: "OPTIONS=\"-v={{ openshift.master_sdn.debug_level }} -etcd-endpoints={{ openshift_sdn_master_url}}
+ -etcd-cafile={{ openshift_cert_dir }}/ca/ca.crt
+ -etcd-certfile={{ openshift_cert_dir }}/openshift-client/cert.crt
+ -etcd-keyfile={{ openshift_cert_dir }}/openshift-client/key.key\""
notify:
- restart openshift-sdn-master
-- name: Set openshift-sdn-master facts
- include: "{{ role_path | dirname }}/openshift_common/tasks/set_facts.yml"
- facts:
- - section: sdn-master
- option: debug_level
- value: "{{ openshift_sdn_master_debug_level }}"
-
- name: Enable openshift-sdn-master
service:
name: openshift-sdn-master
diff --git a/roles/openshift_sdn_node/README.md b/roles/openshift_sdn_node/README.md
index 294550219..e6b6a9503 100644
--- a/roles/openshift_sdn_node/README.md
+++ b/roles/openshift_sdn_node/README.md
@@ -17,19 +17,12 @@ From this role:
| openshift_sdn_node_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-master |
-From openshift_node:
-| Name | Default value | |
-|-----------------------|------------------|--------------------------------------|
-| openshift_master_ips | UNDEF (Required) | List of IP addresses for the openshift-master hosts to be used for node -> master communication |
-
-
From openshift_common:
| Name | Default value | |
|-------------------------------|---------------------|----------------------------------------|
| openshift_debug_level | 0 | Global openshift debug log verbosity |
-| openshift_hostname_workaround | True | |
| openshift_public_ip | UNDEF (Required) | Public IP address to use for this host |
-| openshift_hostname | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance |
+| openshift_hostname | UNDEF (Required) | hostname to use for this instance |
Dependencies
------------
diff --git a/roles/openshift_sdn_node/defaults/main.yml b/roles/openshift_sdn_node/defaults/main.yml
deleted file mode 100644
index 9612d9d91..000000000
--- a/roles/openshift_sdn_node/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-openshift_sdn_node_debug_level: "{{ openshift_debug_level | default(0) }}"
diff --git a/roles/openshift_sdn_node/meta/main.yml b/roles/openshift_sdn_node/meta/main.yml
index ab45ff51e..ffe10f836 100644
--- a/roles/openshift_sdn_node/meta/main.yml
+++ b/roles/openshift_sdn_node/meta/main.yml
@@ -11,4 +11,5 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies: []
+dependencies:
+- { role: openshift_common }
diff --git a/roles/openshift_sdn_node/tasks/main.yml b/roles/openshift_sdn_node/tasks/main.yml
index ff05a6972..37a30d019 100644
--- a/roles/openshift_sdn_node/tasks/main.yml
+++ b/roles/openshift_sdn_node/tasks/main.yml
@@ -1,11 +1,23 @@
---
+- name: Set node sdn OpenShift facts
+ openshift_facts:
+ role: 'node_sdn'
+ local_facts:
+ debug_level: "{{ openshift_node_sdn_debug_level | default(openshift.common.debug_level) }}"
+
- name: Install openshift-sdn-node
yum:
pkg: openshift-sdn-node
state: installed
+ register: install_result
+
+- name: Reload systemd units
+ command: systemctl daemon-reload
+ when: install_result | changed
# TODO: we are specifying -hostname= for OPTIONS as a workaround for
# openshift-sdn-node not properly detecting the hostname.
+# TODO: we should probably generate certs specifically for sdn
- name: Configure openshift-sdn-node settings
lineinfile:
dest: /etc/sysconfig/openshift-sdn-node
@@ -14,28 +26,35 @@
backrefs: yes
with_items:
- regex: '^(OPTIONS=)'
- line: '\1"-v={{ openshift_sdn_node_debug_level }} -hostname={{ openshift_hostname }}"'
+ line: '\1"-v={{ openshift.node_sdn.debug_level }} -hostname={{ openshift.common.hostname }}
+ -etcd-cafile={{ openshift_node_cert_dir }}/ca.crt
+ -etcd-certfile={{ openshift_node_cert_dir }}/client.crt
+ -etcd-keyfile={{ openshift_node_cert_dir }}/client.key\"'
- regex: '^(MASTER_URL=)'
- line: '\1"http://{{ openshift_master_ips | first }}:4001"'
+ line: '\1"{{ openshift_sdn_master_url }}"'
- regex: '^(MINION_IP=)'
- line: '\1"{{ openshift_public_ip }}"'
- # TODO lock down the insecure-registry config to a more sane value than
- # 0.0.0.0/0
- - regex: '^(DOCKER_OPTIONS=)'
- line: '\1"--insecure-registry=0.0.0.0/0 -b=lbr0 --mtu=1450 --selinux-enabled"'
+ line: '\1"{{ openshift.common.ip }}"'
+ notify: restart openshift-sdn-node
+
+- name: Ensure we aren't setting DOCKER_OPTIONS in /etc/sysconfig/openshift-sdn-node
+ lineinfile:
+ dest: /etc/sysconfig/openshift-sdn-node
+ regexp: '^DOCKER_OPTIONS='
+ state: absent
+ notify: restart openshift-sdn-node
+
+# TODO lock down the insecure-registry config to a more sane value than
+# 0.0.0.0/0
+- name: Configure docker insecure-registry setting
+ lineinfile:
+ dest: /etc/sysconfig/docker
+ regexp: INSECURE_REGISTRY=
+ line: INSECURE_REGISTRY='--insecure-registry=0.0.0.0/0'
notify: restart openshift-sdn-node
-- name: Set openshift-sdn-node facts
- include: "{{ role_path | dirname }}/openshift_common/tasks/set_facts.yml"
- facts:
- - section: sdn-node
- option: debug_level
- value: "{{ openshift_sdn_node_debug_level }}"
-# fixme: Once the openshift_cluster playbook is published state should be started
-# Always bounce service to pick up new credentials
- name: Start and enable openshift-sdn-node
service:
name: openshift-sdn-node
enabled: yes
- state: restarted
+ state: started
diff --git a/roles/os_env_extras_node/tasks/main.yml b/roles/os_env_extras_node/tasks/main.yml
new file mode 100644
index 000000000..208065df2
--- /dev/null
+++ b/roles/os_env_extras_node/tasks/main.yml
@@ -0,0 +1,5 @@
+---
+# From the origin rpm there exists instructions on how to
+# setup origin properly. The following steps come from there
+- name: Change root to be in the Docker group
+ user: name=root groups=dockerroot append=yes
diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/os_firewall/library/os_firewall_manage_iptables.py
index fef710055..1cb539a8c 100644..100755
--- a/roles/os_firewall/library/os_firewall_manage_iptables.py
+++ b/roles/os_firewall/library/os_firewall_manage_iptables.py
@@ -1,6 +1,7 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-
+# vim: expandtab:tabstop=4:shiftwidth=4
+# pylint: disable=fixme, missing-docstring
from subprocess import call, check_output
DOCUMENTATION = '''
@@ -16,6 +17,7 @@ EXAMPLES = '''
class IpTablesError(Exception):
def __init__(self, msg, cmd, exit_code, output):
+ super(IpTablesError, self).__init__(msg)
self.msg = msg
self.cmd = cmd
self.exit_code = exit_code
@@ -35,13 +37,14 @@ class IpTablesSaveError(IpTablesError):
class IpTablesCreateChainError(IpTablesError):
- def __init__(self, chain, msg, cmd, exit_code, output):
- super(IpTablesCreateChainError, self).__init__(msg, cmd, exit_code, output)
+ def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long
+ super(IpTablesCreateChainError, self).__init__(msg, cmd, exit_code,
+ output)
self.chain = chain
class IpTablesCreateJumpRuleError(IpTablesError):
- def __init__(self, chain, msg, cmd, exit_code, output):
+ def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long
super(IpTablesCreateJumpRuleError, self).__init__(msg, cmd, exit_code,
output)
self.chain = chain
@@ -50,12 +53,14 @@ class IpTablesCreateJumpRuleError(IpTablesError):
# TODO: impliment rollbacks for any events that where successful and an
# exception was thrown later. for example, when the chain is created
# successfully, but the add/remove rule fails.
-class IpTablesManager:
- def __init__(self, module, ip_version, check_mode, chain):
+class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
+ def __init__(self, module):
self.module = module
- self.ip_version = ip_version
- self.check_mode = check_mode
- self.chain = chain
+ self.ip_version = module.params['ip_version']
+ self.check_mode = module.check_mode
+ self.chain = module.params['chain']
+ self.create_jump_rule = module.params['create_jump_rule']
+ self.jump_rule_chain = module.params['jump_rule_chain']
self.cmd = self.gen_cmd()
self.save_cmd = self.gen_save_cmd()
self.output = []
@@ -65,18 +70,21 @@ class IpTablesManager:
try:
self.output.append(check_output(self.save_cmd,
stderr=subprocess.STDOUT))
- except subprocess.CalledProcessError as e:
+ except subprocess.CalledProcessError as ex:
raise IpTablesSaveError(
msg="Failed to save iptables rules",
- cmd=e.cmd, exit_code=e.returncode, output=e.output)
+ cmd=ex.cmd, exit_code=ex.returncode, output=ex.output)
+
+ def verify_chain(self):
+ if not self.chain_exists():
+ self.create_chain()
+ if self.create_jump_rule and not self.jump_rule_exists():
+ self.create_jump()
def add_rule(self, port, proto):
rule = self.gen_rule(port, proto)
if not self.rule_exists(rule):
- if not self.chain_exists():
- self.create_chain()
- if not self.jump_rule_exists():
- self.create_jump_rule()
+ self.verify_chain()
if self.check_mode:
self.changed = True
@@ -87,13 +95,13 @@ class IpTablesManager:
self.output.append(check_output(cmd))
self.changed = True
self.save()
- except subprocess.CalledProcessError as e:
+ except subprocess.CalledProcessError as ex:
raise IpTablesCreateChainError(
chain=self.chain,
msg="Failed to create rule for "
- "%s %s" % (self.proto, self.port),
- cmd=e.cmd, exit_code=e.returncode,
- output=e.output)
+ "%s %s" % (proto, port),
+ cmd=ex.cmd, exit_code=ex.returncode,
+ output=ex.output)
def remove_rule(self, port, proto):
rule = self.gen_rule(port, proto)
@@ -107,31 +115,31 @@ class IpTablesManager:
self.output.append(check_output(cmd))
self.changed = True
self.save()
- except subprocess.CalledProcessError as e:
- raise IpTablesRemoveChainError(
+ except subprocess.CalledProcessError as ex:
+ raise IpTablesRemoveRuleError(
chain=self.chain,
msg="Failed to remove rule for %s %s" % (proto, port),
- cmd=e.cmd, exit_code=e.returncode, output=e.output)
+ cmd=ex.cmd, exit_code=ex.returncode, output=ex.output)
def rule_exists(self, rule):
check_cmd = self.cmd + ['-C'] + rule
- return True if subprocess.call(check_cmd) == 0 else False
+ return True if call(check_cmd) == 0 else False
def gen_rule(self, port, proto):
return [self.chain, '-p', proto, '-m', 'state', '--state', 'NEW',
'-m', proto, '--dport', str(port), '-j', 'ACCEPT']
- def create_jump_rule(self):
+ def create_jump(self):
if self.check_mode:
self.changed = True
self.output.append("Create jump rule for chain %s" % self.chain)
else:
try:
- cmd = self.cmd + ['-L', 'INPUT', '--line-numbers']
+ cmd = self.cmd + ['-L', self.jump_rule_chain, '--line-numbers']
output = check_output(cmd, stderr=subprocess.STDOUT)
# break the input rules into rows and columns
- input_rules = map(lambda s: s.split(), output.split('\n'))
+ input_rules = [s.split() for s in output.split('\n')]
# Find the last numbered rule
last_rule_num = None
@@ -144,41 +152,38 @@ class IpTablesManager:
continue
last_rule_target = rule[1]
- # Raise an exception if we do not find a valid INPUT rule
- if not last_rule_num or not last_rule_target:
- raise IpTablesCreateJumpRuleError(
- chain=self.chain,
- msg="Failed to find existing INPUT rules",
- cmd=None, exit_code=None, output=None)
-
# Naively assume that if the last row is a REJECT rule, then
# we can add insert our rule right before it, otherwise we
# assume that we can just append the rule.
- if last_rule_target == 'REJECT':
+ if (last_rule_num and last_rule_target
+ and last_rule_target == 'REJECT'):
# insert rule
- cmd = self.cmd + ['-I', 'INPUT', str(last_rule_num)]
+ cmd = self.cmd + ['-I', self.jump_rule_chain,
+ str(last_rule_num)]
else:
# append rule
- cmd = self.cmd + ['-A', 'INPUT']
+ cmd = self.cmd + ['-A', self.jump_rule_chain]
cmd += ['-j', self.chain]
output = check_output(cmd, stderr=subprocess.STDOUT)
- changed = True
+ self.changed = True
self.output.append(output)
- except subprocess.CalledProcessError as e:
- if '--line-numbers' in e.cmd:
+ self.save()
+ except subprocess.CalledProcessError as ex:
+ if '--line-numbers' in ex.cmd:
raise IpTablesCreateJumpRuleError(
chain=self.chain,
- msg="Failed to query existing INPUT rules to "
- "determine jump rule location",
- cmd=e.cmd, exit_code=e.returncode,
- output=e.output)
+ msg=("Failed to query existing " +
+ self.jump_rule_chain +
+ " rules to determine jump rule location"),
+ cmd=ex.cmd, exit_code=ex.returncode,
+ output=ex.output)
else:
raise IpTablesCreateJumpRuleError(
chain=self.chain,
- msg="Failed to create jump rule for chain %s" %
- self.chain,
- cmd=e.cmd, exit_code=e.returncode,
- output=e.output)
+ msg=("Failed to create jump rule for chain " +
+ self.chain),
+ cmd=ex.cmd, exit_code=ex.returncode,
+ output=ex.output)
def create_chain(self):
if self.check_mode:
@@ -192,27 +197,27 @@ class IpTablesManager:
self.changed = True
self.output.append("Successfully created chain %s" %
self.chain)
- except subprocess.CalledProcessError as e:
+ self.save()
+ except subprocess.CalledProcessError as ex:
raise IpTablesCreateChainError(
chain=self.chain,
msg="Failed to create chain: %s" % self.chain,
- cmd=e.cmd, exit_code=e.returncode, output=e.output
+ cmd=ex.cmd, exit_code=ex.returncode, output=ex.output
)
def jump_rule_exists(self):
- cmd = self.cmd + ['-C', 'INPUT', '-j', self.chain]
- return True if subprocess.call(cmd) == 0 else False
+ cmd = self.cmd + ['-C', self.jump_rule_chain, '-j', self.chain]
+ return True if call(cmd) == 0 else False
def chain_exists(self):
cmd = self.cmd + ['-L', self.chain]
- return True if subprocess.call(cmd) == 0 else False
+ return True if call(cmd) == 0 else False
def gen_cmd(self):
cmd = 'iptables' if self.ip_version == 'ipv4' else 'ip6tables'
return ["/usr/sbin/%s" % cmd]
- def gen_save_cmd(self):
- cmd = 'iptables' if self.ip_version == 'ipv4' else 'ip6tables'
+ def gen_save_cmd(self): # pylint: disable=no-self-use
return ['/usr/libexec/iptables/iptables.init', 'save']
@@ -220,9 +225,13 @@ def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
- action=dict(required=True, choices=['add', 'remove']),
- protocol=dict(required=True, choices=['tcp', 'udp']),
- port=dict(required=True, type='int'),
+ action=dict(required=True, choices=['add', 'remove',
+ 'verify_chain']),
+ chain=dict(required=False, default='OS_FIREWALL_ALLOW'),
+ create_jump_rule=dict(required=False, type='bool', default=True),
+ jump_rule_chain=dict(required=False, default='INPUT'),
+ protocol=dict(required=False, choices=['tcp', 'udp']),
+ port=dict(required=False, type='int'),
ip_version=dict(required=False, default='ipv4',
choices=['ipv4', 'ipv6']),
),
@@ -232,23 +241,33 @@ def main():
action = module.params['action']
protocol = module.params['protocol']
port = module.params['port']
- ip_version = module.params['ip_version']
- chain = 'OS_FIREWALL_ALLOW'
- iptables_manager = IpTablesManager(module, ip_version, module.check_mode, chain)
+ if action in ['add', 'remove']:
+ if not protocol:
+ error = "protocol is required when action is %s" % action
+ module.fail_json(msg=error)
+ if not port:
+ error = "port is required when action is %s" % action
+ module.fail_json(msg=error)
+
+ iptables_manager = IpTablesManager(module)
try:
if action == 'add':
iptables_manager.add_rule(port, protocol)
elif action == 'remove':
iptables_manager.remove_rule(port, protocol)
- except IpTablesError as e:
- module.fail_json(msg=e.msg)
+ elif action == 'verify_chain':
+ iptables_manager.verify_chain()
+ except IpTablesError as ex:
+ module.fail_json(msg=ex.msg)
return module.exit_json(changed=iptables_manager.changed,
output=iptables_manager.output)
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/roles/os_firewall/meta/main.yml b/roles/os_firewall/meta/main.yml
index 7a8cef6c5..8592371e8 100644
--- a/roles/os_firewall/meta/main.yml
+++ b/roles/os_firewall/meta/main.yml
@@ -1,3 +1,4 @@
+---
galaxy_info:
author: Jason DeTiberus
description: os_firewall
diff --git a/roles/os_firewall/tasks/firewall/firewalld.yml b/roles/os_firewall/tasks/firewall/firewalld.yml
index 469cfab6f..5089eb3e0 100644
--- a/roles/os_firewall/tasks/firewall/firewalld.yml
+++ b/roles/os_firewall/tasks/firewall/firewalld.yml
@@ -3,6 +3,7 @@
yum:
name: firewalld
state: present
+ register: install_result
- name: Check if iptables-services is installed
command: rpm -q iptables-services
@@ -20,6 +21,10 @@
- ip6tables
when: pkg_check.rc == 0
+- name: Reload systemd units
+ command: systemctl daemon-reload
+ when: install_result | changed
+
- name: Start and enable firewalld service
service:
name: firewalld
@@ -39,6 +44,7 @@
- iptables
- ip6tables
when: pkg_check.rc == 0
+ ignore_errors: yes
# TODO: Ansible 1.9 will eliminate the need for separate firewalld tasks for
# enabling rules and making them permanent with the immediate flag
diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml
index 87e77c083..9af9d8d29 100644
--- a/roles/os_firewall/tasks/firewall/iptables.yml
+++ b/roles/os_firewall/tasks/firewall/iptables.yml
@@ -6,6 +6,7 @@
with_items:
- iptables
- iptables-services
+ register: install_result
- name: Check if firewalld is installed
command: rpm -q firewalld
@@ -20,14 +21,15 @@
enabled: no
when: pkg_check.rc == 0
-- name: Start and enable iptables services
+- name: Reload systemd units
+ command: systemctl daemon-reload
+ when: install_result | changed
+
+- name: Start and enable iptables service
service:
- name: "{{ item }}"
+ name: iptables
state: started
enabled: yes
- with_items:
- - iptables
- - ip6tables
register: result
- name: need to pause here, otherwise the iptables service starting can sometimes cause ssh to fail
@@ -40,6 +42,7 @@
register: result
changed_when: "'firewalld' in result.stdout"
when: pkg_check.rc == 0
+ ignore_errors: yes
- name: Add iptables allow rules
os_firewall_manage_iptables:
diff --git a/roles/os_update_latest/tasks/main.yml b/roles/os_update_latest/tasks/main.yml
new file mode 100644
index 000000000..4a2c3d47a
--- /dev/null
+++ b/roles/os_update_latest/tasks/main.yml
@@ -0,0 +1,3 @@
+---
+- name: Update all packages
+ yum: name=* state=latest
diff --git a/roles/os_zabbix/library/zbxapi.py b/roles/os_zabbix/library/zbxapi.py
new file mode 100755
index 000000000..f4f52909b
--- /dev/null
+++ b/roles/os_zabbix/library/zbxapi.py
@@ -0,0 +1,273 @@
+#!/usr/bin/env python
+
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Purpose: An ansible module to communicate with zabbix.
+#
+
+import json
+import httplib2
+import sys
+import os
+import re
+
+class ZabbixAPI(object):
+ '''
+ ZabbixAPI class
+ '''
+ classes = {
+ 'Action': ['create', 'delete', 'get', 'update'],
+ 'Alert': ['get'],
+ 'Application': ['create', 'delete', 'get', 'massadd', 'update'],
+ 'Configuration': ['export', 'import'],
+ 'Dcheck': ['get'],
+ 'Dhost': ['get'],
+ 'Drule': ['copy', 'create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
+ 'Dservice': ['get'],
+ 'Event': ['acknowledge', 'get'],
+ 'Graph': ['create', 'delete', 'get', 'update'],
+ 'Graphitem': ['get'],
+ 'Graphprototype': ['create', 'delete', 'get', 'update'],
+ 'History': ['get'],
+ 'Hostgroup': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'massadd', 'massremove', 'massupdate', 'update'],
+ 'Hostinterface': ['create', 'delete', 'get', 'massadd', 'massremove', 'replacehostinterfaces', 'update'],
+ 'Host': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'massadd', 'massremove', 'massupdate', 'update'],
+ 'Hostprototype': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
+ 'Httptest': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
+ 'Iconmap': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
+ 'Image': ['create', 'delete', 'get', 'update'],
+ 'Item': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
+ 'Itemprototype': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
+ 'Maintenance': ['create', 'delete', 'get', 'update'],
+ 'Map': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
+ 'Mediatype': ['create', 'delete', 'get', 'update'],
+ 'Proxy': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
+ 'Screen': ['create', 'delete', 'get', 'update'],
+ 'Screenitem': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update', 'updatebyposition'],
+ 'Script': ['create', 'delete', 'execute', 'get', 'getscriptsbyhosts', 'update'],
+ 'Service': ['adddependencies', 'addtimes', 'create', 'delete', 'deletedependencies', 'deletetimes', 'get', 'getsla', 'isreadable', 'iswritable', 'update'],
+ 'Template': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'massadd', 'massremove', 'massupdate', 'update'],
+ 'Templatescreen': ['copy', 'create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
+ 'Templatescreenitem': ['get'],
+ 'Trigger': ['adddependencies', 'create', 'delete', 'deletedependencies', 'get', 'isreadable', 'iswritable', 'update'],
+ 'Triggerprototype': ['create', 'delete', 'get', 'update'],
+ 'User': ['addmedia', 'create', 'delete', 'deletemedia', 'get', 'isreadable', 'iswritable', 'login', 'logout', 'update', 'updatemedia', 'updateprofile'],
+ 'Usergroup': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'massadd', 'massupdate', 'update'],
+ 'Usermacro': ['create', 'createglobal', 'delete', 'deleteglobal', 'get', 'update', 'updateglobal'],
+ 'Usermedia': ['get'],
+ }
+
+ def __init__(self, data={}):
+ self.server = data['server'] or None
+ self.username = data['user'] or None
+ self.password = data['password'] or None
+ if any(map(lambda value: value == None, [self.server, self.username, self.password])):
+ print 'Please specify zabbix server url, username, and password.'
+ sys.exit(1)
+
+ self.verbose = data.has_key('verbose')
+ self.use_ssl = data.has_key('use_ssl')
+ self.auth = None
+
+ for class_name, method_names in self.classes.items():
+ #obj = getattr(self, class_name)(self)
+ #obj.__dict__
+ setattr(self, class_name.lower(), getattr(self, class_name)(self))
+
+ results = self.user.login(user=self.username, password=self.password)
+
+ if results[0]['status'] == '200':
+ if results[1].has_key('result'):
+ self.auth = results[1]['result']
+ elif results[1].has_key('error'):
+ print "Unable to authenticate with zabbix server. {0} ".format(results[1]['error'])
+ sys.exit(1)
+ else:
+ print "Error in call to zabbix. Http status: {0}.".format(results[0]['status'])
+ sys.exit(1)
+
+ def perform(self, method, params):
+ '''
+ This method calls your zabbix server.
+
+ It requires the following parameters in order for a proper request to be processed:
+
+ jsonrpc - the version of the JSON-RPC protocol used by the API; the Zabbix API implements JSON-RPC version 2.0;
+ method - the API method being called;
+ params - parameters that will be passed to the API method;
+ id - an arbitrary identifier of the request;
+ auth - a user authentication token; since we don't have one yet, it's set to null.
+ '''
+ http_method = "POST"
+ if params.has_key("http_method"):
+ http_method = params['http_method']
+
+ jsonrpc = "2.0"
+ if params.has_key('jsonrpc'):
+ jsonrpc = params['jsonrpc']
+
+ rid = 1
+ if params.has_key('id'):
+ rid = params['id']
+
+ http = None
+ if self.use_ssl:
+ http = httplib2.Http()
+ else:
+ http = httplib2.Http( disable_ssl_certificate_validation=True,)
+
+ headers = params.get('headers', {})
+ headers["Content-type"] = "application/json"
+
+ body = {
+ "jsonrpc": jsonrpc,
+ "method": method,
+ "params": params,
+ "id": rid,
+ 'auth': self.auth,
+ }
+
+ if method in ['user.login','api.version']:
+ del body['auth']
+
+ body = json.dumps(body)
+
+ if self.verbose:
+ print body
+ print method
+ print headers
+ httplib2.debuglevel = 1
+
+ response, results = http.request(self.server, http_method, body, headers)
+
+ if self.verbose:
+ print response
+ print results
+
+ try:
+ results = json.loads(results)
+ except ValueError as e:
+ results = {"error": e.message}
+
+ return response, results
+
+ '''
+ This bit of metaprogramming is where the ZabbixAPI subclasses are created.
+ For each of ZabbixAPI.classes we create a class from the key and methods
+ from the ZabbixAPI.classes values. We pass a reference to ZabbixAPI class
+ to each subclass in order for each to be able to call the perform method.
+ '''
+ @staticmethod
+ def meta(class_name, method_names):
+ # This meta method allows a class to add methods to it.
+ def meta_method(Class, method_name):
+ # This template method is a stub method for each of the subclass
+ # methods.
+ def template_method(self, **params):
+ return self.parent.perform(class_name.lower()+"."+method_name, params)
+ template_method.__doc__ = "https://www.zabbix.com/documentation/2.4/manual/api/reference/%s/%s" % (class_name.lower(), method_name)
+ template_method.__name__ = method_name
+ # this is where the template method is placed inside of the subclass
+ # e.g. setattr(User, "create", stub_method)
+ setattr(Class, template_method.__name__, template_method)
+
+ # This class call instantiates a subclass. e.g. User
+ Class=type(class_name, (object,), { '__doc__': "https://www.zabbix.com/documentation/2.4/manual/api/reference/%s" % class_name.lower() })
+ # This init method gets placed inside of the Class
+ # to allow it to be instantiated. A reference to the parent class(ZabbixAPI)
+ # is passed in to allow each class access to the perform method.
+ def __init__(self, parent):
+ self.parent = parent
+ # This attaches the init to the subclass. e.g. Create
+ setattr(Class, __init__.__name__, __init__)
+ # For each of our ZabbixAPI.classes dict values
+ # Create a method and attach it to our subclass.
+ # e.g. 'User': ['delete', 'get', 'updatemedia', 'updateprofile',
+ # 'update', 'iswritable', 'logout', 'addmedia', 'create',
+ # 'login', 'deletemedia', 'isreadable'],
+ # User.delete
+ # User.get
+ for method_name in method_names:
+ meta_method(Class, method_name)
+ # Return our subclass with all methods attached
+ return Class
+
+# Attach all ZabbixAPI.classes to ZabbixAPI class through metaprogramming
+for class_name, method_names in ZabbixAPI.classes.items():
+ setattr(ZabbixAPI, class_name, ZabbixAPI.meta(class_name, method_names))
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+ user=dict(default=None, type='str'),
+ password=dict(default=None, type='str'),
+ zbx_class=dict(choices=ZabbixAPI.classes.keys()),
+ action=dict(default=None, type='str'),
+ params=dict(),
+ debug=dict(default=False, type='bool'),
+ ),
+ #supports_check_mode=True
+ )
+
+ user = module.params.get('user', None)
+ if not user:
+ user = os.environ['ZABBIX_USER']
+
+ pw = module.params.get('password', None)
+ if not pw:
+ pw = os.environ['ZABBIX_PASSWORD']
+
+ server = module.params['server']
+
+ if module.params['debug']:
+ options['debug'] = True
+
+ api_data = {
+ 'user': user,
+ 'password': pw,
+ 'server': server,
+ }
+
+ if not user or not pw or not server:
+ module.fail_json('Please specify the user, password, and the zabbix server.')
+
+ zapi = ZabbixAPI(api_data)
+
+ zbx_class = module.params.get('zbx_class')
+ action = module.params.get('action')
+ params = module.params.get('params', {})
+
+
+ # Get the instance we are trying to call
+ zbx_class_inst = zapi.__getattribute__(zbx_class.lower())
+ # Get the instance's method we are trying to call
+ zbx_action_method = zapi.__getattribute__(zbx_class.capitalize()).__dict__[action]
+ # Make the call with the incoming params
+ results = zbx_action_method(zbx_class_inst, **params)
+
+ # Results Section
+ changed_state = False
+ status = results[0]['status']
+ if status not in ['200', '201']:
+ #changed_state = False
+ module.fail_json(msg="Http response: [%s] - Error: %s" % (str(results[0]), results[1]))
+
+ module.exit_json(**{'results': results[1]['result']})
+
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/repos/defaults/main.yaml b/roles/repos/defaults/main.yaml
deleted file mode 100644
index 6fe2bf621..000000000
--- a/roles/repos/defaults/main.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-# TODO: once we are able to configure/deploy origin using the openshift roles,
-# then we should default to origin
-openshift_deployment_type: online
-openshift_additional_repos: {}
diff --git a/roles/repos/files/online/RPM-GPG-KEY-redhat-beta b/roles/repos/files/online/RPM-GPG-KEY-redhat-beta
deleted file mode 100644
index 7b40671a4..000000000
--- a/roles/repos/files/online/RPM-GPG-KEY-redhat-beta
+++ /dev/null
@@ -1,61 +0,0 @@
------BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1.2.6 (GNU/Linux)
-
-mQINBEmkAzABEAC2/c7bP1lHQ3XScxbIk0LQWe1YOiibQBRLwf8Si5PktgtuPibT
-kKpZjw8p4D+fM7jD1WUzUE0X7tXg2l/eUlMM4dw6XJAQ1AmEOtlwSg7rrMtTvM0A
-BEtI7Km6fC6sU6RtBMdcqD1cH/6dbsfh8muznVA7UlX+PRBHVzdWzj6y8h84dBjo
-gzcbYu9Hezqgj/lLzicqsSZPz9UdXiRTRAIhp8V30BD8uRaaa0KDDnD6IzJv3D9P
-xQWbFM4Z12GN9LyeZqmD7bpKzZmXG/3drvfXVisXaXp3M07t3NlBa3Dt8NFIKZ0D
-FRXBz5bvzxRVmdH6DtkDWXDPOt+Wdm1rZrCOrySFpBZQRpHw12eo1M1lirANIov7
-Z+V1Qh/aBxj5EUu32u9ZpjAPPNtQF6F/KjaoHHHmEQAuj4DLex4LY646Hv1rcv2i
-QFuCdvLKQGSiFBrfZH0j/IX3/0JXQlZzb3MuMFPxLXGAoAV9UP/Sw/WTmAuTzFVm
-G13UYFeMwrToOiqcX2VcK0aC1FCcTP2z4JW3PsWvU8rUDRUYfoXovc7eg4Vn5wHt
-0NBYsNhYiAAf320AUIHzQZYi38JgVwuJfFu43tJZE4Vig++RQq6tsEx9Ftz3EwRR
-fJ9z9mEvEiieZm+vbOvMvIuimFVPSCmLH+bI649K8eZlVRWsx3EXCVb0nQARAQAB
-tDBSZWQgSGF0LCBJbmMuIChiZXRhIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0LmNv
-bT6JAjYEEwECACAFAkpSM+cCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRCT
-ioDK8hVB6/9tEAC0+KmzeKceXQ/GTUoU6jy9vtkFCFrmv+c7ol4XpdTt0QhqBOwy
-6m2mKWwmm8KfYfy0cADQ4y/EcoXl7FtFBwYmkCuEQGXhTDn9DvVjhooIq59LEMBQ
-OW879RwwzRIZ8ebbjMUjDPF5MfPQqP2LBu9N4KvXlZp4voykwuuaJ+cbsKZR6pZ6
-0RQKPHKP+NgUFC0fff7XY9cuOZZWFAeKRhLN2K7bnRHKxp+kELWb6R9ZfrYwZjWc
-MIPbTd1khE53L4NTfpWfAnJRtkPSDOKEGVlVLtLq4HEAxQt07kbslqISRWyXER3u
-QOJj64D1ZiIMz6t6uZ424VE4ry9rBR0Jz55cMMx5O/ni9x3xzFUgH8Su2yM0r3jE
-Rf24+tbOaPf7tebyx4OKe+JW95hNVstWUDyGbs6K9qGfI/pICuO1nMMFTo6GqzQ6
-DwLZvJ9QdXo7ujEtySZnfu42aycaQ9ZLC2DOCQCUBY350Hx6FLW3O546TAvpTfk0
-B6x+DV7mJQH7MGmRXQsE7TLBJKjq28Cn4tVp04PmybQyTxZdGA/8zY6pPl6xyVMH
-V68hSBKEVT/rlouOHuxfdmZva1DhVvUC6Xj7+iTMTVJUAq/4Uyn31P1OJmA2a0PT
-CAqWkbJSgKFccsjPoTbLyxhuMSNkEZFHvlZrSK9vnPzmfiRH0Orx3wYpMQ==
-=21pb
------END PGP PUBLIC KEY BLOCK-----
-The following public key can be used to verify RPM packages built and
-signed by Red Hat, Inc. for this beta using `rpm -K' using the GNU GPG
-package. Questions about this key should be sent to security@redhat.com.
-
-
------BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1.0.6 (GNU/Linux)
-Comment: For info see http://www.gnupg.org
-
-mQGiBDySTqsRBACzc7xuCIp10oj5B2PAV4XzDeVxprv/WTMreSNSK+iC0bEz0IBp
-Vnn++qtyiXfH+bGIE9jqZgIEnpttWhUOaU5LhcLFzy+m8NWfngIFP9QfGmGAe9Gd
-LFeAdhj4RmSG/vgr7vDd83Hz22dv403Ar/sliWO4vDOrMmZBG57WGYTWtwCgkMsi
-UUQuJ6slbzKn82w+bYxOlL0EAIylWJGaTkKOTL5DqVR3ik9aT0Dt3FNVYiuhcKBe
-II4E3KOIVA9kO8in1IZjx2gs6K2UV+GsoAVANdfKL7l9O+k+J8OxhE74oycvYJxW
-QzCgXMZkNcvW5wyXwEMcr6TVd/5BGztcMw8oT3/l2MtAEG/vn1XaWToRSO1XDMDz
-+AjUA/4m0mTkN8S4wjzJG8lqN7+quW3UOaiCe8J3SFrrrhE0XbY9cTJI/9nuXHU1
-VjqOSmXQYH2Db7UOroFTBiWhlAedA4O4yuK52AJnvSsHbnJSEmn9rpo5z1Q8F+qI
-mDlzriJdrIrVLeDiUeTlpH3kpG38D7007GhXBV72k1gpMoMcpbQ3UmVkIEhhdCwg
-SW5jLiAoQmV0YSBUZXN0IFNvZnR3YXJlKSA8cmF3aGlkZUByZWRoYXQuY29tPohX
-BBMRAgAXBQI8l5p/BQsHCgMEAxUDAgMWAgECF4AACgkQ/TcmiYl9oHqdeQCfZjw4
-F9sir3XfRAjVe9kYNcQ8hnIAn0WgyT7H5RriWYTOCfauOmd+cAW4iEYEEBECAAYF
-AjyXmqQACgkQIZGAzdtCpg5nDQCfepuRUyuVJvhuQkPWySETYvRw+WoAnjAWhx6q
-0npMx4OE1JGFi8ymKXktuQENBDySTq4QBADKL/mK7S8E3synxISlu7R6fUvu07Oc
-RoX96n0Di6T+BS99hC44XzHjMDhUX2ZzVvYS88EZXoUDDkB/8g7SwZrOJ/QE1zrI
-JmSVciNhSYWwqeT40Evs88ajZUfDiNbS/cSC6oui98iS4vxd7sE7IPY+FSx9vuAR
-xOa9vBnJY/dx0wADBQQAosm+Iltt2uigC6LJzxNOoIdB5r0GqTC1o5sHCeNqXJhU
-ExAG8m74uzMlYVLOpGZi4y4NwwAWvCWC0MWWnnu+LGFy1wKiJKRjhv5F+WkFutY5
-WHV5L44vp9jSIlBCRG+84jheTh8xqhndM9wOfPwWdYYu1vxrB8Tn6kA17PcYfHSI
-RgQYEQIABgUCPJJergAKCRD9NyaJiX2geiCPAJ4nEM4NtI9Uj8lONDk6FU86PmoL
-yACfb68fBd2pWEzLKsOk9imIobHHpzE=
-=gpIn
------END PGP PUBLIC KEY BLOCK-----
diff --git a/roles/repos/files/online/RPM-GPG-KEY-redhat-release b/roles/repos/files/online/RPM-GPG-KEY-redhat-release
deleted file mode 100644
index 0f83b622d..000000000
--- a/roles/repos/files/online/RPM-GPG-KEY-redhat-release
+++ /dev/null
@@ -1,63 +0,0 @@
-The following public key can be used to verify RPM packages built and
-signed by Red Hat, Inc. This key is used for packages in Red Hat
-products shipped after November 2009, and for all updates to those
-products.
-
-Questions about this key should be sent to security@redhat.com.
-
-pub 4096R/FD431D51 2009-10-22 Red Hat, Inc. (release key 2) <security@redhat.com>
-
------BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1.2.6 (GNU/Linux)
-
-mQINBErgSTsBEACh2A4b0O9t+vzC9VrVtL1AKvUWi9OPCjkvR7Xd8DtJxeeMZ5eF
-0HtzIG58qDRybwUe89FZprB1ffuUKzdE+HcL3FbNWSSOXVjZIersdXyH3NvnLLLF
-0DNRB2ix3bXG9Rh/RXpFsNxDp2CEMdUvbYCzE79K1EnUTVh1L0Of023FtPSZXX0c
-u7Pb5DI5lX5YeoXO6RoodrIGYJsVBQWnrWw4xNTconUfNPk0EGZtEnzvH2zyPoJh
-XGF+Ncu9XwbalnYde10OCvSWAZ5zTCpoLMTvQjWpbCdWXJzCm6G+/hx9upke546H
-5IjtYm4dTIVTnc3wvDiODgBKRzOl9rEOCIgOuGtDxRxcQkjrC+xvg5Vkqn7vBUyW
-9pHedOU+PoF3DGOM+dqv+eNKBvh9YF9ugFAQBkcG7viZgvGEMGGUpzNgN7XnS1gj
-/DPo9mZESOYnKceve2tIC87p2hqjrxOHuI7fkZYeNIcAoa83rBltFXaBDYhWAKS1
-PcXS1/7JzP0ky7d0L6Xbu/If5kqWQpKwUInXtySRkuraVfuK3Bpa+X1XecWi24JY
-HVtlNX025xx1ewVzGNCTlWn1skQN2OOoQTV4C8/qFpTW6DTWYurd4+fE0OJFJZQF
-buhfXYwmRlVOgN5i77NTIJZJQfYFj38c/Iv5vZBPokO6mffrOTv3MHWVgQARAQAB
-tDNSZWQgSGF0LCBJbmMuIChyZWxlYXNlIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0
-LmNvbT6JAjYEEwECACAFAkrgSTsCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAK
-CRAZni+R/UMdUWzpD/9s5SFR/ZF3yjY5VLUFLMXIKUztNN3oc45fyLdTI3+UClKC
-2tEruzYjqNHhqAEXa2sN1fMrsuKec61Ll2NfvJjkLKDvgVIh7kM7aslNYVOP6BTf
-C/JJ7/ufz3UZmyViH/WDl+AYdgk3JqCIO5w5ryrC9IyBzYv2m0HqYbWfphY3uHw5
-un3ndLJcu8+BGP5F+ONQEGl+DRH58Il9Jp3HwbRa7dvkPgEhfFR+1hI+Btta2C7E
-0/2NKzCxZw7Lx3PBRcU92YKyaEihfy/aQKZCAuyfKiMvsmzs+4poIX7I9NQCJpyE
-IGfINoZ7VxqHwRn/d5mw2MZTJjbzSf+Um9YJyA0iEEyD6qjriWQRbuxpQXmlAJbh
-8okZ4gbVFv1F8MzK+4R8VvWJ0XxgtikSo72fHjwha7MAjqFnOq6eo6fEC/75g3NL
-Ght5VdpGuHk0vbdENHMC8wS99e5qXGNDued3hlTavDMlEAHl34q2H9nakTGRF5Ki
-JUfNh3DVRGhg8cMIti21njiRh7gyFI2OccATY7bBSr79JhuNwelHuxLrCFpY7V25
-OFktl15jZJaMxuQBqYdBgSay2G0U6D1+7VsWufpzd/Abx1/c3oi9ZaJvW22kAggq
-dzdA27UUYjWvx42w9menJwh/0jeQcTecIUd0d0rFcw/c1pvgMMl/Q73yzKgKYw==
-=zbHE
------END PGP PUBLIC KEY BLOCK-----
-The following public key can be used to verify RPM packages built and
-signed by Red Hat, Inc. This key is a supporting (auxiliary) key for
-Red Hat products shipped after November 2006 and for all updates to
-those products.
-
-Questions about this key should be sent to security@redhat.com.
-
------BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1.2.6 (GNU/Linux)
-
-mQGiBEVwDGkRBACwPhZIpvkjI8wV9sFTDoqyPLx1ub8Sd/w+YuI5Ovm49mvvEQVT
-VLg8FgE5JlST59AbsLDyVtRa9CxIvN5syBVrWWWtHtDnnylFBcqG/A6J3bI4E9/A
-UtSL5Zxbav0+utP6f3wOpxQrxc+WIDVgpurdBKAQ3dsobGBqypeX6FXZ5wCgou6C
-yZpGIBqosJaDWLzNeOfb/70D/1thLkQyhW3JJ6cHCYJHNfBShvbLWBf6S231mgmu
-MyMlt8Kmipc9bw+saaAkSkVsQ/ZbfjrWB7e5kbMruKLVrH+nGhamlHYUGyAPtsPg
-Uj/NUSj5BmrCsOkMpn43ngTLssE9MLhSPj2nIHGFv9B+iVLvomDdwnaBRgQ1aK8z
-z6MAA/406yf5yVJ/MlTWs1/68VwDhosc9BtU1V5IE0NXgZUAfBJzzfVzzKQq6zJ2
-eZsMLhr96wbsW13zUZt1ing+ulwh2ee4meuJq6h/971JspFY/XBhcfq4qCNqVjsq
-SZnWoGdCO6J8CxPIemD2IUHzjoyyeEj3RVydup6pcWZAmhzkKrQzUmVkIEhhdCwg
-SW5jLiAoYXV4aWxpYXJ5IGtleSkgPHNlY3VyaXR5QHJlZGhhdC5jb20+iF4EExEC
-AB4FAkVwDGkCGwMGCwkIBwMCAxUCAwMWAgECHgECF4AACgkQRWiciC+mWOC1rQCg
-ooNLCFOzNPcvhd9Za8C801HmnsYAniCw3yzrCqtjYnxDDxlufH0FVTwX
-=d/bm
------END PGP PUBLIC KEY BLOCK-----
-
diff --git a/roles/repos/files/online/epel7-kubernetes.repo b/roles/repos/files/online/epel7-kubernetes.repo
deleted file mode 100644
index 1deae2939..000000000
--- a/roles/repos/files/online/epel7-kubernetes.repo
+++ /dev/null
@@ -1,6 +0,0 @@
-[maxamillion-epel7-kubernetes]
-name=Copr repo for epel7-kubernetes owned by maxamillion
-baseurl=http://copr-be.cloud.fedoraproject.org/results/maxamillion/epel7-kubernetes/epel-7-$basearch/
-skip_if_unavailable=True
-gpgcheck=0
-enabled=1
diff --git a/roles/repos/files/online/epel7-openshift.repo b/roles/repos/files/online/epel7-openshift.repo
deleted file mode 100644
index c7629872d..000000000
--- a/roles/repos/files/online/epel7-openshift.repo
+++ /dev/null
@@ -1,6 +0,0 @@
-[maxamillion-origin-next]
-name=Copr repo for origin-next owned by maxamillion
-baseurl=http://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/
-skip_if_unavailable=False
-gpgcheck=0
-enabled=1
diff --git a/roles/repos/files/online/oso-rhui-rhel-7-extras.repo b/roles/repos/files/online/oso-rhui-rhel-7-extras.repo
deleted file mode 100644
index cfe41f691..000000000
--- a/roles/repos/files/online/oso-rhui-rhel-7-extras.repo
+++ /dev/null
@@ -1,23 +0,0 @@
-[oso-rhui-rhel-server-extras]
-name=OpenShift Online RHUI Mirror RH Enterprise Linux - Extras
-baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-extras/
- https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-extras/
-enabled=1
-gpgcheck=1
-gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release,file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-beta
-failovermethod=priority
-sslverify=False
-sslclientcert=/var/lib/yum/client-cert.pem
-sslclientkey=/var/lib/yum/client-key.pem
-
-[oso-rhui-rhel-server-extras-htb]
-name=OpenShift Online RHUI Mirror RH Enterprise Linux - Extras HTB
-baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-extras-htb/
- https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-extras-htb/
-enabled=0
-gpgcheck=1
-gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release,file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-beta
-failovermethod=priority
-sslverify=False
-sslclientcert=/var/lib/yum/client-cert.pem
-sslclientkey=/var/lib/yum/client-key.pem
diff --git a/roles/repos/files/online/oso-rhui-rhel-7-server.repo b/roles/repos/files/online/oso-rhui-rhel-7-server.repo
deleted file mode 100644
index ddc93193d..000000000
--- a/roles/repos/files/online/oso-rhui-rhel-7-server.repo
+++ /dev/null
@@ -1,21 +0,0 @@
-[oso-rhui-rhel-server-releases]
-name=OpenShift Online RHUI Mirror RH Enterprise Linux 7
-baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-releases/
- https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-releases/
-enabled=1
-gpgcheck=1
-gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
-sslverify=False
-sslclientcert=/var/lib/yum/client-cert.pem
-sslclientkey=/var/lib/yum/client-key.pem
-
-[oso-rhui-rhel-server-releases-optional]
-name=OpenShift Online RHUI Mirror RH Enterprise Linux 7 - Optional
-baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-releases-optional/
- https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-releases-optional/
-enabled=1
-gpgcheck=1
-gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
-sslverify=False
-sslclientcert=/var/lib/yum/client-cert.pem
-sslclientkey=/var/lib/yum/client-key.pem
diff --git a/roles/yum_repos/README.md b/roles/yum_repos/README.md
new file mode 100644
index 000000000..51ecd5d34
--- /dev/null
+++ b/roles/yum_repos/README.md
@@ -0,0 +1,113 @@
+Yum Repos
+=========
+
+This role allows easy deployment of yum repository config files.
+
+Requirements
+------------
+
+Yum
+
+Role Variables
+--------------
+
+| Name | Default value | |
+|-------------------|---------------|--------------------------------------------|
+| repo_files | None | |
+| repo_enabled | 1 | Should repos be enabled by default |
+| repo_gpgcheck | 1 | Should repo gpgcheck be enabled by default |
+
+Dependencies
+------------
+
+Example Playbook
+----------------
+
+A single repo file containing a single repo:
+ - hosts: servers
+ roles:
+ - role: yum_repos
+ repo_files:
+ - id: my_repo
+ repos:
+ - id: my_repo
+ name: My Awesome Repo
+ baseurl: https://my.awesome.repo/is/available/here
+ skip_if_unavailable: yes
+ gpgkey: https://my.awesome.repo/pubkey.gpg
+
+A single repo file containing a single repo, disabling gpgcheck
+ - hosts: servers
+ roles:
+ - role: yum_repos
+ repo_files:
+ - id: my_other_repo
+ repos:
+ - id: my_other_repo
+ name: My Other Awesome Repo
+ baseurl: https://my.other.awesome.repo/is/available/here
+ gpgcheck: no
+
+A single repo file containing a single disabled repo
+ - hosts: servers
+ roles:
+ - role: yum_repos
+ repo_files:
+ - id: my_other_repo
+ repos:
+ - id: my_other_repo
+ name: My Other Awesome Repo
+ baseurl: https://my.other.awesome.repo/is/available/here
+ enabled: no
+
+A single repo file containing multiple repos
+ - hosts: servers
+ roles:
+ - role: yum_repos
+ repo_files:
+ id: my_repos
+ repos:
+ - id: my_repo
+ name: My Awesome Repo
+ baseurl: https://my.awesome.repo/is/available/here
+ gpgkey: https://my.awesome.repo/pubkey.gpg
+ - id: my_other_repo
+ name: My Other Awesome Repo
+ baseurl: https://my.other.awesome.repo/is/available/here
+ gpgkey: https://my.other.awesome.repo/pubkey.gpg
+
+Multiple repo files containing multiple repos
+ - hosts: servers
+ roles:
+ - role: yum_repos
+ repo_files:
+ - id: my_repos
+ repos:
+ - id: my_repo
+ name: My Awesome Repo
+ baseurl: https://my.awesome.repo/is/available/here
+ gpgkey: https://my.awesome.repo/pubkey.gpg
+ - id: my_other_repo
+ name: My Other Awesome Repo
+ baseurl: https://my.other.awesome.repo/is/available/here
+ gpgkey: https://my.other.awesome.repo/pubkey.gpg
+ - id: joes_repos
+ repos:
+ - id: joes_repo
+ name: Joe's Less Awesome Repo
+ baseurl: https://joes.repo/is/here
+ gpgkey: https://joes.repo/pubkey.gpg
+ - id: joes_otherrepo
+ name: Joe's Other Less Awesome Repo
+ baseurl: https://joes.repo/is/there
+ gpgkey: https://joes.repo/pubkey.gpg
+
+License
+-------
+
+ASL 2.0
+
+Author Information
+------------------
+
+openshift online operations
diff --git a/roles/yum_repos/defaults/main.yml b/roles/yum_repos/defaults/main.yml
new file mode 100644
index 000000000..515fb7a4a
--- /dev/null
+++ b/roles/yum_repos/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+repo_enabled: 1
+repo_gpgcheck: 1
diff --git a/roles/yum_repos/meta/main.yml b/roles/yum_repos/meta/main.yml
new file mode 100644
index 000000000..6b8374da9
--- /dev/null
+++ b/roles/yum_repos/meta/main.yml
@@ -0,0 +1,8 @@
+---
+galaxy_info:
+ author: openshift operations
+ description:
+ company: Red Hat, Inc.
+ license: ASL 2.0
+ min_ansible_version: 1.2
+dependencies: []
diff --git a/roles/yum_repos/tasks/main.yml b/roles/yum_repos/tasks/main.yml
new file mode 100644
index 000000000..a9903c6c6
--- /dev/null
+++ b/roles/yum_repos/tasks/main.yml
@@ -0,0 +1,47 @@
+---
+# Convert old params to new params
+- set_fact:
+ repo_files:
+ - id: "{{ repo_tag }}"
+ repos:
+ - id: "{{ repo_tag }}"
+ name: "{{ repo_name }}"
+ baseurl: "{{ repo_baseurl }}"
+ enabled: "{{ repo_enabled }}"
+ gpgcheck: "{{ repo_gpg_check | default(repo_gpgcheck) }}"
+ sslverify: "{{ repo_sslverify | default(None) }}"
+ sslclientcert: "{{ repo_sslclientcert | default(None) }}"
+ sslclientkey: "{{ repo_sslclientkey | default(None) }}"
+ gpgkey: "{{ repo_gpgkey | default(None) }}"
+ when: repo_files is not defined
+
+- name: Verify repo_files is a list
+ assert:
+ that:
+ - repo_files is iterable and repo_files is not string and repo_files is not mapping
+
+- name: Verify repo_files items have an id and a repos list
+ assert:
+ that:
+ - item is mapping
+ - "'id' in item"
+ - "'repos' in item"
+ - item.repos is iterable and item.repos is not string and item.repos is not mapping
+ with_items: repo_files
+
+- name: Verify that repo_files.repos have the required keys
+ assert:
+ that:
+ - item.1 is mapping
+ - "'id' in item.1"
+ - "'name' in item.1"
+ - "'baseurl' in item.1"
+ with_subelements:
+ - repo_files
+ - repos
+
+- name: Installing yum-repo template
+ template:
+ src: yumrepo.j2
+ dest: /etc/yum.repos.d/{{ item.id }}.repo
+ with_items: repo_files
diff --git a/roles/yum_repos/templates/yumrepo.j2 b/roles/yum_repos/templates/yumrepo.j2
new file mode 100644
index 000000000..0dfdbfe43
--- /dev/null
+++ b/roles/yum_repos/templates/yumrepo.j2
@@ -0,0 +1,18 @@
+{% set repos = item.repos %}
+{% for repo in repos %}
+[{{ repo.id }}]
+name={{ repo.name }}
+baseurl={{ repo.baseurl }}
+{% set repo_enabled_value = repo.enabled | default(repo_enabled) %}
+{% set enable_repo = 1 if (repo_enabled_value | int(0) == 1 or repo_enabled_value | lower in ['true', 'yes']) else 0 %}
+enabled={{ enable_repo }}
+{% set repo_gpgcheck_value = repo.gpgcheck | default(repo_gpgcheck) %}
+{% set enable_gpgcheck = 1 if (repo_gpgcheck_value | int(0) == 1 or repo_gpgcheck_value | lower in ['true', 'yes']) else 0 %}
+gpgcheck={{ enable_gpgcheck }}
+{% for key, value in repo.iteritems() %}
+{% if key not in ['id', 'name', 'baseurl', 'enabled', 'gpgcheck'] and value is defined and value != '' %}
+{{ key }}={{ value }}
+{% endif %}
+{% endfor %}
+
+{% endfor %}