summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Dockerfile17
-rw-r--r--README.md10
-rw-r--r--README_AEP.md240
-rw-r--r--README_ANSIBLE_CONTAINER.md15
-rw-r--r--README_AWS.md43
-rw-r--r--README_origin.md7
-rw-r--r--README_vagrant.md28
-rw-r--r--Vagrantfile41
-rwxr-xr-xbin/cluster10
l---------bin/openshift_ansible/aws1
-rwxr-xr-xbin/ossh_bash_completion21
-rw-r--r--bin/ossh_zsh_completion13
-rw-r--r--docs/best_practices_guide.adoc2
-rw-r--r--filter_plugins/oo_filters.py11
-rw-r--r--git/.pylintrc6
-rw-r--r--inventory/aws/hosts/hosts2
-rw-r--r--inventory/byo/hosts.example11
-rw-r--r--inventory/gce/hosts/hosts2
-rw-r--r--inventory/libvirt/hosts/hosts2
-rwxr-xr-xinventory/multi_ec2.py2
-rw-r--r--inventory/openstack/hosts/hosts2
-rw-r--r--playbooks/adhoc/create_pv/create_pv.yaml142
-rw-r--r--playbooks/adhoc/create_pv/pv-template.j216
-rw-r--r--playbooks/adhoc/deploy_monitoring_containers/deploy.yml58
-rw-r--r--playbooks/adhoc/deploy_monitoring_containers/oso-f22-host-monitoring.service36
-rw-r--r--playbooks/adhoc/deploy_monitoring_containers/oso-rhel7-zagg-client.service39
-rw-r--r--playbooks/adhoc/zabbix_setup/clean_zabbix.yml37
-rw-r--r--playbooks/adhoc/zabbix_setup/create_app.yml34
-rw-r--r--playbooks/adhoc/zabbix_setup/create_application.yml18
-rw-r--r--playbooks/adhoc/zabbix_setup/create_template.yml30
l---------playbooks/adhoc/zabbix_setup/roles1
-rw-r--r--playbooks/adhoc/zabbix_setup/setup_zabbix.yml9
-rw-r--r--playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml28
-rw-r--r--playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml332
-rw-r--r--playbooks/aws/openshift-cluster/config.yml1
-rw-r--r--playbooks/aws/openshift-cluster/library/ec2_ami_find.py2
-rw-r--r--playbooks/aws/openshift-cluster/tasks/launch_instances.yml2
-rw-r--r--playbooks/aws/openshift-cluster/vars.online.int.yml8
-rw-r--r--playbooks/aws/openshift-cluster/vars.online.prod.yml8
-rw-r--r--playbooks/aws/openshift-cluster/vars.online.stage.yml8
-rw-r--r--playbooks/aws/openshift-cluster/vars.yml12
-rw-r--r--playbooks/byo/rhel_subscribe.yml12
-rw-r--r--playbooks/byo/vagrant.yml4
-rw-r--r--playbooks/common/openshift-cluster/config.yml9
-rw-r--r--playbooks/common/openshift-node/config.yml1
-rw-r--r--playbooks/libvirt/openshift-cluster/launch.yml2
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack.yaml103
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml9
-rw-r--r--playbooks/openstack/openshift-cluster/launch.yml4
-rw-r--r--roles/etcd/tasks/main.yml2
-rw-r--r--roles/etcd/templates/etcd.conf.j24
-rw-r--r--roles/fluentd_master/tasks/main.yml3
-rw-r--r--roles/openshift_common/README.md6
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-persistent-sti.json2
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-sti.json2
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/eap6-basic-sti.json2
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/eap6-https-sti.json2
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-persistent-sti.json2
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-sti.json2
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-persistent-sti.json2
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-sti.json2
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-persistent-sti.json2
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-sti.json2
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-basic-sti.json2
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-https-sti.json2
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-persistent-sti.json2
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-sti.json2
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-persistent-sti.json2
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-sti.json2
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-persistent-sti.json2
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-sti.json2
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-basic-sti.json2
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-https-sti.json2
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-persistent-sti.json2
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-sti.json2
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-persistent-sti.json2
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-sti.json2
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-persistent-sti.json2
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-sti.json2
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py3
-rw-r--r--roles/openshift_manage_node/tasks/main.yml7
-rw-r--r--roles/openshift_master/tasks/main.yml5
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j214
-rw-r--r--roles/openshift_master/templates/v1_partials/oauthConfig.j214
-rw-r--r--roles/openshift_node/README.md12
-rw-r--r--roles/openshift_node/tasks/main.yml8
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j23
-rw-r--r--roles/os_zabbix/library/__init__.py0
-rw-r--r--roles/os_zabbix/library/test.yml92
-rw-r--r--roles/os_zabbix/library/zbx_host.py162
-rw-r--r--roles/os_zabbix/library/zbx_hostgroup.py116
-rw-r--r--roles/os_zabbix/library/zbx_item.py160
-rw-r--r--roles/os_zabbix/library/zbx_mediatype.py149
-rw-r--r--roles/os_zabbix/library/zbx_template.py126
-rw-r--r--roles/os_zabbix/library/zbx_trigger.py175
-rw-r--r--roles/os_zabbix/library/zbx_user.py146
-rw-r--r--roles/os_zabbix/library/zbx_usergroup.py160
-rwxr-xr-xroles/os_zabbix/library/zbxapi.py382
-rw-r--r--roles/rhel_subscribe/tasks/enterprise.yml4
99 files changed, 2268 insertions, 988 deletions
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 000000000..ab16ca609
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,17 @@
+FROM rhel7
+
+MAINTAINER Aaron Weitekamp <aweiteka@redhat.com>
+
+RUN yum -y install http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
+
+# Not sure if all of these packages are necessary
+# only git and ansible are known requirements
+RUN yum install -y --enablerepo rhel-7-server-extras-rpms net-tools bind-utils git ansible
+
+ADD ./ /opt/openshift-ansible/
+
+ENTRYPOINT ["/usr/bin/ansible-playbook"]
+
+CMD ["/opt/openshift-ansible/playbooks/byo/config.yml"]
+
+LABEL RUN docker run -it --rm --privileged --net=host -v ~/.ssh:/root/.ssh -v /etc/ansible:/etc/ansible --name NAME -e NAME=NAME -e IMAGE=IMAGE IMAGE
diff --git a/README.md b/README.md
index 2bdaefd4c..489f9b8e9 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
-#openshift-ansible
+#Openshift and Atomic Enterprise Ansible
-This repo contains OpenShift Ansible code.
+This repo contains Ansible code for Openshift and Atomic Enterprise.
##Setup
- Install base dependencies:
@@ -23,12 +23,13 @@ This repo contains OpenShift Ansible code.
- Bring your own host deployments:
- [OpenShift Enterprise](README_OSE.md)
- [OpenShift Origin](README_origin.md)
+ - [Atomic Enterprise](README_AEP.md)
- Build
- [How to build the openshift-ansible rpms](BUILD.md)
- Directory Structure:
- - [bin/cluster](bin/cluster) - python script to easily create OpenShift 3 clusters
+ - [bin/cluster](bin/cluster) - python script to easily create clusters
- [docs](docs) - Documentation for the project
- [filter_plugins/](filter_plugins) - custom filters used to manipulate data in Ansible
- [inventory/](inventory) - houses Ansible dynamic inventory scripts
@@ -36,6 +37,9 @@ This repo contains OpenShift Ansible code.
- [roles/](roles) - shareable Ansible tasks
##Contributing
+- [Best Practices Guide](docs/best_practices_guide.adoc)
+- [Core Concepts](docs/core_concepts_guide.adoc)
+- [Style Guide](docs/style_guide.adoc)
###Feature Roadmap
Our Feature Roadmap is available on the OpenShift Origin Infrastructure [Trello board](https://trello.com/b/nbkIrqKa/openshift-origin-infrastructure). All ansible items will be tagged with [installv3].
diff --git a/README_AEP.md b/README_AEP.md
new file mode 100644
index 000000000..e29888617
--- /dev/null
+++ b/README_AEP.md
@@ -0,0 +1,240 @@
+# Installing AEP from dev puddles using ansible
+
+* [Requirements](#requirements)
+* [Caveats](#caveats)
+* [Known Issues](#known-issues)
+* [Configuring the host inventory](#configuring-the-host-inventory)
+* [Creating the default variables for the hosts and host groups](#creating-the-default-variables-for-the-hosts-and-host-groups)
+* [Running the ansible playbooks](#running-the-ansible-playbooks)
+* [Post-ansible steps](#post-ansible-steps)
+* [Overriding detected ip addresses and hostnames](#overriding-detected-ip-addresses-and-hostnames)
+
+## Requirements
+* ansible
+ * Tested using ansible 1.9.1 and 1.9.2
+ * There is currently a known issue with ansible-1.9.0, you can downgrade to 1.8.4 on Fedora by installing one of the builds from Koji: http://koji.fedoraproject.org/koji/packageinfo?packageID=13842
+ * Available in Fedora channels
+ * Available for EL with EPEL and Optional channel
+* One or more RHEL 7.1 VMs
+* Either ssh key based auth for the root user or ssh key based auth for a user
+ with sudo access (no password)
+* A checkout of atomic-enterprise-ansible from https://github.com/projectatomic/atomic-enterprise-ansible/
+
+ ```sh
+ git clone https://github.com/projectatomic/atomic-enterprise-ansible.git
+ cd atomic-enterprise-ansible
+ ```
+
+## Caveats
+This ansible repo is currently under heavy revision for providing OSE support;
+the following items are highly likely to change before the OSE support is
+merged into the upstream repo:
+ * the current git branch for testing
+ * how the inventory file should be configured
+ * variables that need to be set
+ * bootstrapping steps
+ * other configuration steps
+
+## Known Issues
+* Host subscriptions are not configurable yet, the hosts need to be
+ pre-registered with subscription-manager or have the RHEL base repo
+ pre-configured. If using subscription-manager the following commands will
+ disable all but the rhel-7-server rhel-7-server-extras and
+ rhel-server7-ose-beta repos:
+```sh
+subscription-manager repos --disable="*"
+subscription-manager repos \
+--enable="rhel-7-server-rpms" \
+--enable="rhel-7-server-extras-rpms" \
+--enable="rhel-7-server-ose-3.0-rpms"
+```
+* Configuration of router is not automated yet
+* Configuration of docker-registry is not automated yet
+
+## Configuring the host inventory
+[Ansible docs](http://docs.ansible.com/intro_inventory.html)
+
+Example inventory file for configuring one master and two nodes for the test
+environment. This can be configured in the default inventory file
+(/etc/ansible/hosts), or using a custom file and passing the --inventory
+option to ansible-playbook.
+
+/etc/ansible/hosts:
+```ini
+# This is an example of a bring your own (byo) host inventory
+
+# Create an OSEv3 group that contains the masters and nodes groups
+[OSEv3:children]
+masters
+nodes
+
+# Set variables common for all OSEv3 hosts
+[OSEv3:vars]
+# SSH user, this user should allow ssh based auth without requiring a password
+ansible_ssh_user=root
+
+# If ansible_ssh_user is not root, ansible_sudo must be set to true
+#ansible_sudo=true
+
+# To deploy origin, change deployment_type to origin
+deployment_type=enterprise
+
+# Pre-release registry URL
+oreg_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3/ose-${component}:${version}
+
+# Pre-release additional repo
+openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel',
+'baseurl':
+'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os',
+'enabled': 1, 'gpgcheck': 0}]
+
+# Origin copr repo
+#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name':
+'OpenShift Origin COPR', 'baseurl':
+'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/',
+'enabled': 1, 'gpgcheck': 1, gpgkey:
+'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
+
+# host group for masters
+[masters]
+ose3-master.example.com
+
+# host group for nodes
+[nodes]
+ose3-node[1:2].example.com
+```
+
+The hostnames above should resolve both from the hosts themselves and
+the host where ansible is running (if different).
+
+## Running the ansible playbooks
+From the atomic-enterprise-ansible checkout run:
+```sh
+ansible-playbook playbooks/byo/config.yml
+```
+**Note:** this assumes that the host inventory is /etc/ansible/hosts, if using a different
+inventory file use the -i option for ansible-playbook.
+
+## Post-ansible steps
+#### Create the default router
+On the master host:
+```sh
+oadm router --create=true \
+ --credentials=/etc/openshift/master/openshift-router.kubeconfig \
+ --images='docker-buildvm-rhose.usersys.redhat.com:5000/openshift3/ose-${component}:${version}'
+```
+
+#### Create the default docker-registry
+On the master host:
+```sh
+oadm registry --create=true \
+ --credentials=/etc/openshift/master/openshift-registry.kubeconfig \
+ --images='docker-buildvm-rhose.usersys.redhat.com:5000/openshift3/ose-${component}:${version}' \
+ --mount-host=/var/lib/openshift/docker-registry
+```
+
+## Overriding detected ip addresses and hostnames
+Some deployments will require that the user override the detected hostnames
+and ip addresses for the hosts. To see what the default values will be you can
+run the openshift_facts playbook:
+```sh
+ansible-playbook playbooks/byo/openshift_facts.yml
+```
+The output will be similar to:
+```
+ok: [10.3.9.45] => {
+ "result": {
+ "ansible_facts": {
+ "openshift": {
+ "common": {
+ "hostname": "jdetiber-osev3-ansible-005dcfa6-27c6-463d-9b95-ef059579befd.os1.phx2.redhat.com",
+ "ip": "172.16.4.79",
+ "public_hostname": "jdetiber-osev3-ansible-005dcfa6-27c6-463d-9b95-ef059579befd.os1.phx2.redhat.com",
+ "public_ip": "10.3.9.45",
+ "use_openshift_sdn": true
+ },
+ "provider": {
+ ... <snip> ...
+ }
+ }
+ },
+ "changed": false,
+ "invocation": {
+ "module_args": "",
+ "module_name": "openshift_facts"
+ }
+ }
+}
+ok: [10.3.9.42] => {
+ "result": {
+ "ansible_facts": {
+ "openshift": {
+ "common": {
+ "hostname": "jdetiber-osev3-ansible-c6ae8cdc-ba0b-4a81-bb37-14549893f9d3.os1.phx2.redhat.com",
+ "ip": "172.16.4.75",
+ "public_hostname": "jdetiber-osev3-ansible-c6ae8cdc-ba0b-4a81-bb37-14549893f9d3.os1.phx2.redhat.com",
+ "public_ip": "10.3.9.42",
+ "use_openshift_sdn": true
+ },
+ "provider": {
+ ...<snip>...
+ }
+ }
+ },
+ "changed": false,
+ "invocation": {
+ "module_args": "",
+ "module_name": "openshift_facts"
+ }
+ }
+}
+ok: [10.3.9.36] => {
+ "result": {
+ "ansible_facts": {
+ "openshift": {
+ "common": {
+ "hostname": "jdetiber-osev3-ansible-bc39a3d3-cdd7-42fe-9c12-9fac9b0ec320.os1.phx2.redhat.com",
+ "ip": "172.16.4.73",
+ "public_hostname": "jdetiber-osev3-ansible-bc39a3d3-cdd7-42fe-9c12-9fac9b0ec320.os1.phx2.redhat.com",
+ "public_ip": "10.3.9.36",
+ "use_openshift_sdn": true
+ },
+ "provider": {
+ ...<snip>...
+ }
+ }
+ },
+ "changed": false,
+ "invocation": {
+ "module_args": "",
+ "module_name": "openshift_facts"
+ }
+ }
+}
+```
+Now, we want to verify the detected common settings to verify that they are
+what we expect them to be (if not, we can override them).
+
+* hostname
+ * Should resolve to the internal ip from the instances themselves.
+ * openshift_hostname will override.
+* ip
+ * Should be the internal ip of the instance.
+ * openshift_ip will override.
+* public hostname
+ * Should resolve to the external ip from hosts outside of the cloud
+ * provider openshift_public_hostname will override.
+* public_ip
+ * Should be the externally accessible ip associated with the instance
+ * openshift_public_ip will override
+* use_openshift_sdn
+ * Should be true unless the cloud is GCE.
+ * openshift_use_openshift_sdn overrides
+
+To override the the defaults, you can set the variables in your inventory:
+```
+...snip...
+[masters]
+ose3-master.example.com openshift_ip=1.1.1.1 openshift_hostname=ose3-master.example.com openshift_public_ip=2.2.2.2 openshift_public_hostname=ose3-master.public.example.com
+...snip...
+```
diff --git a/README_ANSIBLE_CONTAINER.md b/README_ANSIBLE_CONTAINER.md
new file mode 100644
index 000000000..30c5f8503
--- /dev/null
+++ b/README_ANSIBLE_CONTAINER.md
@@ -0,0 +1,15 @@
+# Running ansible in a docker container
+* Building ansible container:
+
+ ```sh
+ git clone https://github.com/openshift/openshift-ansible.git
+ cd openshift-ansible
+ docker build --rm -t ansible .
+ ```
+* Create /etc/ansible directory on the host machine and copy inventory file (hosts) into it.
+* Copy ssh public key of the host machine to master and nodes machines in the cluster.
+* Running the ansible container:
+
+ ```sh
+ docker run -it --rm --privileged --net=host -v ~/.ssh:/root/.ssh -v /etc/ansible:/etc/ansible ansible
+ ```
diff --git a/README_AWS.md b/README_AWS.md
index 0e3128a92..c511741b9 100644
--- a/README_AWS.md
+++ b/README_AWS.md
@@ -22,6 +22,27 @@ Note: You must source this file before running any Ansible commands.
Alternatively, you could configure credentials in either ~/.boto or ~/.aws/credentials, see the [boto docs](http://docs.pythonboto.org/en/latest/boto_config_tut.html) for the format.
+Subscribe to CentOS
+-------------------
+
+1. [CentOS on AWS](https://aws.amazon.com/marketplace/pp/B00O7WM7QW)
+
+
+Set up Security Group
+---------------------
+By default, a cluster is launched into the `public` security group. Make sure you allow hosts to talk to each other on port `4789` for SDN.
+You may also want to allow access from the outside world on the following ports:
+
+```
+• 22 - ssh
+• 80 - Web Apps
+• 443 - Web Apps (https)
+• 4789 - SDN / VXLAN
+• 8443 - Openshift Console
+• 10250 - kubelet
+```
+
+
(Optional) Setup your $HOME/.ssh/config file
-------------------------------------------
In case of a cluster creation, or any other case where you don't know the machine hostname in advance, you can use `.ssh/config`
@@ -40,7 +61,7 @@ Alternatively, you can configure your ssh-agent to hold the credentials to conne
By default, a cluster is launched with the following configuration:
-- Instance type: m3.large
+- Instance type: m4.large
- AMI: ami-307b3658 (for online deployments, ami-acd999c4 for origin deployments and ami-10663b78 for enterprise deployments)
- Region: us-east-1
- Keypair name: libra
@@ -62,7 +83,7 @@ Node specific defaults:
If needed, these values can be changed by setting environment variables on your system.
-- export ec2_instance_type='m3.large'
+- export ec2_instance_type='m4.large'
- export ec2_image='ami-307b3658'
- export ec2_region='us-east-1'
- export ec2_keypair='libra'
@@ -130,3 +151,21 @@ The --deployment-type flag can be passed to bin/cluster to specify the deploymen
bin/cluster create aws --deployment-type=online <cluster-id>
```
Note: If no deployment type is specified, then the default is origin.
+
+
+## Post-ansible steps
+Create the default router
+-------------------------
+On the master host:
+```sh
+oadm router --create=true \
+ --credentials=/etc/openshift/master/openshift-router.kubeconfig
+```
+
+Create the default docker-registry
+----------------------------------
+On the master host:
+```sh
+oadm registry --create=true \
+ --credentials=/etc/openshift/master/openshift-registry.kubeconfig
+``` \ No newline at end of file
diff --git a/README_origin.md b/README_origin.md
index fb0931132..f13fe660a 100644
--- a/README_origin.md
+++ b/README_origin.md
@@ -99,10 +99,13 @@ oadm router --create=true \
On the master host:
```sh
oadm registry --create=true \
- --credentials=/etc/openshift/master/openshift-registry.kubeconfig \
- --mount-host=/var/lib/openshift/docker-registry
+ --credentials=/etc/openshift/master/openshift-registry.kubeconfig
```
+If you would like persistent storage, refer to the
+[OpenShift documentation](https://docs.openshift.org/latest/admin_guide/install/docker_registry.html)
+for more information on deployment options for the built in docker-registry.
+
## Overriding detected ip addresses and hostnames
Some deployments will require that the user override the detected hostnames
and ip addresses for the hosts. To see what the default values will be you can
diff --git a/README_vagrant.md b/README_vagrant.md
index 26ec52c0a..5f87d6633 100644
--- a/README_vagrant.md
+++ b/README_vagrant.md
@@ -2,9 +2,28 @@ Requirements
------------
- vagrant (tested against version 1.7.2)
- vagrant-hostmanager plugin (tested against version 1.5.0)
+- vagrant-registration plugin (only required for enterprise deployment type)
- vagrant-libvirt (tested against version 0.0.26)
- Only required if using libvirt instead of virtualbox
+For ``enterprise`` deployment types the base RHEL box has to be added to Vagrant:
+
+1. Download the RHEL7 vagrant image (libvirt or virtualbox) available from the [Red Hat Container Development Kit downloads in the customer portal](https://access.redhat.com/downloads/content/293/ver=1/rhel---7/1.0.1/x86_64/product-downloads)
+
+2. Install it into vagrant
+
+ ``$ vagrant box add --name rhel-7 /path/to/rhel-server-libvirt-7.1-3.x86_64.box``
+
+3. (optional, recommended) Increase the disk size of the image to 20GB - This is a two step process. (these instructions are specific to libvirt)
+
+ Resize the actual qcow2 image:
+
+ ``$ qemu-img resize ~/.vagrant.d/boxes/rhel-7/0/libvirt/box.img 20GB``
+
+ Edit `~/.vagrant.d/boxes/rhel-7/0/libvirt/metadata.json` to reflect the new size. A corrected metadata.json looks like this:
+
+ ``{"provider": "libvirt", "format": "qcow2", "virtual_size": 20}``
+
Usage
-----
```
@@ -21,5 +40,10 @@ vagrant provision
Environment Variables
---------------------
The following environment variables can be overriden:
-- OPENSHIFT_DEPLOYMENT_TYPE (defaults to origin, choices: origin, enterprise, online)
-- OPENSHIFT_NUM_NODES (the number of nodes to create, defaults to 2)
+- ``OPENSHIFT_DEPLOYMENT_TYPE`` (defaults to origin, choices: origin, enterprise, online)
+- ``OPENSHIFT_NUM_NODES`` (the number of nodes to create, defaults to 2)
+
+For ``enterprise`` deployment types these env variables should also be specified:
+- ``rhel_subscription_user``: rhsm user
+- ``rhel_subscription_pass``: rhsm password
+- (optional) ``rhel_subscription_pool``: poolID to attach a specific subscription besides what auto-attach detects
diff --git a/Vagrantfile b/Vagrantfile
index a832ae84e..4675b5d60 100644
--- a/Vagrantfile
+++ b/Vagrantfile
@@ -15,6 +15,28 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.hostmanager.manage_host = true
config.hostmanager.include_offline = true
config.ssh.insert_key = false
+
+ if deployment_type === 'enterprise'
+ unless Vagrant.has_plugin?('vagrant-registration')
+ raise 'vagrant-registration-plugin is required for enterprise deployment'
+ end
+ username = ENV['rhel_subscription_user']
+ password = ENV['rhel_subscription_pass']
+ unless username and password
+ raise 'rhel_subscription_user and rhel_subscription_pass are required'
+ end
+ config.registration.username = username
+ config.registration.password = password
+ # FIXME this is temporary until vagrant/ansible registration modules
+ # are capable of handling specific subscription pools
+ if not ENV['rhel_subscription_pool'].nil?
+ config.vm.provision "shell" do |s|
+ s.inline = "subscription-manager attach --pool=$1 || true"
+ s.args = "#{ENV['rhel_subscription_pool']}"
+ end
+ end
+ end
+
config.vm.provider "virtualbox" do |vbox, override|
override.vm.box = "chef/centos-7.1"
vbox.memory = 1024
@@ -28,10 +50,15 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
libvirt.cpus = 2
libvirt.memory = 1024
libvirt.driver = 'kvm'
- override.vm.box = "centos-7.1"
- override.vm.box_url = "https://download.gluster.org/pub/gluster/purpleidea/vagrant/centos-7.1/centos-7.1.box"
- override.vm.box_download_checksum = "b2a9f7421e04e73a5acad6fbaf4e9aba78b5aeabf4230eebacc9942e577c1e05"
- override.vm.box_download_checksum_type = "sha256"
+ case deployment_type
+ when "enterprise"
+ override.vm.box = "rhel-7"
+ when "origin"
+ override.vm.box = "centos-7.1"
+ override.vm.box_url = "https://download.gluster.org/pub/gluster/purpleidea/vagrant/centos-7.1/centos-7.1.box"
+ override.vm.box_download_checksum = "b2a9f7421e04e73a5acad6fbaf4e9aba78b5aeabf4230eebacc9942e577c1e05"
+ override.vm.box_download_checksum_type = "sha256"
+ end
end
num_nodes.times do |n|
@@ -53,12 +80,12 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
ansible.sudo = true
ansible.groups = {
"masters" => ["master"],
- "nodes" => ["node1", "node2"],
+ "nodes" => ["master", "node1", "node2"],
}
ansible.extra_vars = {
- openshift_deployment_type: "origin",
+ deployment_type: deployment_type,
}
- ansible.playbook = "playbooks/byo/config.yml"
+ ansible.playbook = "playbooks/byo/vagrant.yml"
end
end
end
diff --git a/bin/cluster b/bin/cluster
index 7eb4a4448..c80fe0cab 100755
--- a/bin/cluster
+++ b/bin/cluster
@@ -23,6 +23,16 @@ class Cluster(object):
'-o ControlMaster=auto '
'-o ControlPersist=600s '
)
+ # Because of `UserKnownHostsFile=/dev/null`
+ # our `.ssh/known_hosts` file most probably misses the ssh host public keys
+ # of our servers.
+ # In that case, ansible serializes the execution of ansible modules
+ # because we might be interactively prompted to accept the ssh host public keys.
+ # Because of `StrictHostKeyChecking=no` we know that we won't be prompted
+ # So, we don't want our modules execution to be serialized.
+ os.environ['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
+ # TODO: A more secure way to proceed would consist in dynamically
+ # retrieving the ssh host public keys from the IaaS interface
def get_deployment_type(self, args):
"""
diff --git a/bin/openshift_ansible/aws b/bin/openshift_ansible/aws
new file mode 120000
index 000000000..eb0575b4d
--- /dev/null
+++ b/bin/openshift_ansible/aws
@@ -0,0 +1 @@
+../../inventory/aws/ \ No newline at end of file
diff --git a/bin/ossh_bash_completion b/bin/ossh_bash_completion
index 1467de858..5072161f0 100755
--- a/bin/ossh_bash_completion
+++ b/bin/ossh_bash_completion
@@ -1,6 +1,12 @@
__ossh_known_hosts(){
- if [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- /usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+ if python -c 'import openshift_ansible' &>/dev/null; then
+ /usr/bin/python -c 'from openshift_ansible import multi_ec2; m=multi_ec2.MultiEc2(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+
+ elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then
+ /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+
+ elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
+ /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
fi
}
@@ -19,8 +25,15 @@ _ossh()
complete -F _ossh ossh oscp
__opssh_known_hosts(){
- if [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- /usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+ if python -c 'import openshift_ansible' &>/dev/null; then
+ /usr/bin/python -c 'from openshift_ansible.multi_ec2 import MultiEc2; m=MultiEc2(); m.run(); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in m.result["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+
+ elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then
+ /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+
+ elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
+ /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+
fi
}
diff --git a/bin/ossh_zsh_completion b/bin/ossh_zsh_completion
index 6ab930dc4..44500c618 100644
--- a/bin/ossh_zsh_completion
+++ b/bin/ossh_zsh_completion
@@ -1,9 +1,16 @@
#compdef ossh oscp
_ossh_known_hosts(){
- if [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items()])')
- fi
+ if python -c 'import openshift_ansible' &>/dev/null; then
+ print $(/usr/bin/python -c 'from openshift_ansible import multi_ec2; m=multi_ec2.MultiEc2(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
+
+ elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then
+ print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
+
+ elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
+ print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
+
+ fi
}
_ossh(){
diff --git a/docs/best_practices_guide.adoc b/docs/best_practices_guide.adoc
index a146b93ad..08d95b2b8 100644
--- a/docs/best_practices_guide.adoc
+++ b/docs/best_practices_guide.adoc
@@ -421,7 +421,7 @@ For consistency, role names SHOULD follow the above naming pattern. It is import
Many times the `technology` portion of the pattern will line up with a package name. It is advised that whenever possible, the package name should be used.
.Examples:
-* The role to configure an OpenShift Master is called `openshift_master`
+* The role to configure a master is called `openshift_master`
* The role to configure OpenShift specific yum repositories is called `openshift_repos`
=== Filters
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 47033a88e..c3408702d 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -132,6 +132,16 @@ class FilterModule(object):
return rval
@staticmethod
+ def oo_combine_dict(data, in_joiner='=', out_joiner=' '):
+ '''Take a dict in the form of { 'key': 'value', 'key': 'value' } and
+ arrange them as a string 'key=value key=value'
+ '''
+ if not issubclass(type(data), dict):
+ raise errors.AnsibleFilterError("|failed expects first param is a dict")
+
+ return out_joiner.join([in_joiner.join([k, v]) for k, v in data.items()])
+
+ @staticmethod
def oo_ami_selector(data, image_name):
''' This takes a list of amis and an image name and attempts to return
the latest ami.
@@ -309,6 +319,7 @@ class FilterModule(object):
"oo_ami_selector": self.oo_ami_selector,
"oo_ec2_volume_definition": self.oo_ec2_volume_definition,
"oo_combine_key_value": self.oo_combine_key_value,
+ "oo_combine_dict": self.oo_combine_dict,
"oo_split": self.oo_split,
"oo_filter_list": self.oo_filter_list,
"oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs
diff --git a/git/.pylintrc b/git/.pylintrc
index af8f1656f..fe6eef6de 100644
--- a/git/.pylintrc
+++ b/git/.pylintrc
@@ -71,7 +71,7 @@ confidence=
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
# w0511 - fixme - disabled because TODOs are acceptable
-disable=E1608,W1627,E1601,E1603,E1602,E1605,E1604,E1607,E1606,W1621,W1620,W1623,W1622,W1625,W1624,W1609,W1608,W1607,W1606,W1605,W1604,W1603,W1602,W1601,W1639,W1640,I0021,W1638,I0020,W1618,W1619,W1630,W1626,W1637,W1634,W1635,W1610,W1611,W1612,W1613,W1614,W1615,W1616,W1617,W1632,W1633,W0704,W1628,W1629,W1636,W0511
+disable=E1608,W1627,E1601,E1603,E1602,E1605,E1604,E1607,E1606,W1621,W1620,W1623,W1622,W1625,W1624,W1609,W1608,W1607,W1606,W1605,W1604,W1603,W1602,W1601,W1639,W1640,I0021,W1638,I0020,W1618,W1619,W1630,W1626,W1637,W1634,W1635,W1610,W1611,W1612,W1613,W1614,W1615,W1616,W1617,W1632,W1633,W0704,W1628,W1629,W1636,W0511,R0801
[REPORTS]
@@ -205,7 +205,7 @@ docstring-min-length=-1
[SIMILARITIES]
# Minimum lines number of a similarity.
-min-similarity-lines=4
+min-similarity-lines=0
# Ignore comments when computing similarities.
ignore-comments=yes
@@ -214,7 +214,7 @@ ignore-comments=yes
ignore-docstrings=yes
# Ignore imports when computing similarities.
-ignore-imports=no
+ignore-imports=yes
[VARIABLES]
diff --git a/inventory/aws/hosts/hosts b/inventory/aws/hosts/hosts
index 34a4396bd..bf4e0845a 100644
--- a/inventory/aws/hosts/hosts
+++ b/inventory/aws/hosts/hosts
@@ -1 +1 @@
-localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter=/usr/bin/python2
+localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter='/usr/bin/env python2'
diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example
index 56f4da5a2..646790c42 100644
--- a/inventory/byo/hosts.example
+++ b/inventory/byo/hosts.example
@@ -33,7 +33,7 @@ deployment_type=enterprise
#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
# htpasswd auth
-#openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/openshift/htpasswd'}]
+openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/openshift/htpasswd'}]
# Allow all auth
#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
@@ -54,6 +54,15 @@ deployment_type=enterprise
#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
#openshift_master_cluster_defer_ha=True
+# default subdomain to use for exposed routes
+#osm_default_subdomain=apps.test.example.com
+
+# additional cors origins
+#osm_custom_cors_origins=['foo.example.com', 'bar.example.com']
+
+# default project node selector
+#osm_default_node_selector='region=primary'
+
# host group for masters
[masters]
ose3-master[1:3]-ansible.test.example.com
diff --git a/inventory/gce/hosts/hosts b/inventory/gce/hosts/hosts
index 34a4396bd..bf4e0845a 100644
--- a/inventory/gce/hosts/hosts
+++ b/inventory/gce/hosts/hosts
@@ -1 +1 @@
-localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter=/usr/bin/python2
+localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter='/usr/bin/env python2'
diff --git a/inventory/libvirt/hosts/hosts b/inventory/libvirt/hosts/hosts
index 34a4396bd..bf4e0845a 100644
--- a/inventory/libvirt/hosts/hosts
+++ b/inventory/libvirt/hosts/hosts
@@ -1 +1 @@
-localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter=/usr/bin/python2
+localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter='/usr/bin/env python2'
diff --git a/inventory/multi_ec2.py b/inventory/multi_ec2.py
index b7ce9e5dc..2cbf33473 100755
--- a/inventory/multi_ec2.py
+++ b/inventory/multi_ec2.py
@@ -78,7 +78,7 @@ class MultiEc2(object):
},
]
- self.config['cache_max_age'] = 0
+ self.config['cache_max_age'] = 300
else:
raise RuntimeError("Could not find valid ec2 credentials in the environment.")
diff --git a/inventory/openstack/hosts/hosts b/inventory/openstack/hosts/hosts
index 9cdc31449..2d2194a4d 100644
--- a/inventory/openstack/hosts/hosts
+++ b/inventory/openstack/hosts/hosts
@@ -1 +1 @@
-localhost ansible_sudo=no ansible_python_interpreter=/usr/bin/python2 connection=local
+localhost ansible_sudo=no ansible_python_interpreter='/usr/bin/env python2' connection=local
diff --git a/playbooks/adhoc/create_pv/create_pv.yaml b/playbooks/adhoc/create_pv/create_pv.yaml
new file mode 100644
index 000000000..684a0ca72
--- /dev/null
+++ b/playbooks/adhoc/create_pv/create_pv.yaml
@@ -0,0 +1,142 @@
+---
+#example run:
+# ansible-playbook -e "cli_volume_size=1" \
+# -e "cli_device_name=/dev/xvdf" \
+# -e "cli_hosttype=master" \
+# -e "cli_environment=ops" \
+# create_pv.yaml
+# FIXME: we need to change "environment" to "clusterid" as that's what it really is now.
+#
+- name: Create a volume and attach it to master
+ hosts: localhost
+ gather_facts: no
+ vars:
+ cli_volume_type: gp2
+ cli_volume_iops: ''
+ oo_name: "{{ groups['tag_host-type_' ~ cli_hosttype] |
+ intersect(groups['tag_environment_' ~ cli_environment]) |
+ first }}"
+ pre_tasks:
+ - fail:
+ msg: "This playbook requires {{item}} to be set."
+ when: "{{ item }} is not defined or {{ item }} == ''"
+ with_items:
+ - cli_volume_size
+ - cli_device_name
+ - cli_hosttype
+ - cli_environment
+
+ - name: set oo_name fact
+ set_fact:
+ oo_name: "{{ oo_name }}"
+
+
+ - name: Select a single master to run this on
+ add_host:
+ hostname: "{{ oo_name }}"
+ ansible_ssh_host: "{{ hostvars[oo_name].ec2_public_dns_name }}"
+ groups: oo_master
+
+ - name: Create a volume and attach it
+ ec2_vol:
+ state: present
+ instance: "{{ hostvars[oo_name]['ec2_id'] }}"
+ region: "{{ hostvars[oo_name]['ec2_region'] }}"
+ volume_size: "{{ cli_volume_size }}"
+ volume_type: "{{ cli_volume_type }}"
+ device_name: "{{ cli_device_name }}"
+ iops: "{{ cli_volume_iops }}"
+ register: vol
+
+ - debug: var=vol
+
+- name: Configure the drive
+ gather_facts: no
+ hosts: oo_master
+ user: root
+ connection: ssh
+ vars:
+ pv_tmpdir: /tmp/persistentvolumes
+
+ post_tasks:
+ - name: Setting facts for template
+ set_fact:
+ pv_name: "pv-{{cli_volume_size}}-{{ hostvars[hostvars.localhost.oo_name]['ec2_tag_Name'] }}-{{hostvars.localhost.vol.volume_id }}"
+ vol_az: "{{ hostvars[hostvars.localhost.oo_name]['ec2_placement'] }}"
+ vol_id: "{{ hostvars.localhost.vol.volume_id }}"
+ vol_size: "{{ cli_volume_size }}"
+ pv_mntdir: "{{ pv_tmpdir }}/mnt-{{ 1000 | random }}"
+
+ - set_fact:
+ pv_template: "{{ pv_tmpdir }}/{{ pv_name }}.yaml"
+
+ - name: "Mkdir {{ pv_tmpdir }}"
+ file:
+ state: directory
+ path: "{{ pv_tmpdir }}"
+ mode: '0750'
+
+ - name: "Mkdir {{ pv_mntdir }}"
+ file:
+ state: directory
+ path: "{{ pv_mntdir }}"
+ mode: '0750'
+
+ - name: Create pv file from template
+ template:
+ src: ./pv-template.j2
+ dest: "{{ pv_template }}"
+ owner: root
+ mode: '0640'
+
+ - name: mkfs
+ filesystem:
+ dev: "{{ cli_device_name }}"
+ fstype: ext4
+
+ - name: Mount the dev
+ mount:
+ name: "{{ pv_mntdir }}"
+ src: "{{ cli_device_name }}"
+ fstype: ext4
+ state: mounted
+
+ - name: chgrp g+rwXs
+ file:
+ path: "{{ pv_mntdir }}"
+ mode: 'g+rwXs'
+ recurse: yes
+ seuser: system_u
+ serole: object_r
+ setype: svirt_sandbox_file_t
+ selevel: s0
+
+ - name: umount
+ mount:
+ name: "{{ pv_mntdir }}"
+ src: "{{ cli_device_name }}"
+ state: unmounted
+ fstype: ext4
+
+ - name: detach drive
+ delegate_to: localhost
+ ec2_vol:
+ region: "{{ hostvars[hostvars.localhost.oo_name].ec2_region }}"
+ id: "{{ hostvars.localhost.vol.volume_id }}"
+ instance: None
+
+ - name: "Remove {{ pv_mntdir }}"
+ file:
+ state: absent
+ path: "{{ pv_mntdir }}"
+
+ # We have to use the shell module because we can't set env vars with the command module.
+ - name: "Place PV into oc"
+ shell: "KUBECONFIG=/etc/openshift/master/admin.kubeconfig oc create -f {{ pv_template | quote }}"
+ register: oc_output
+
+ - debug: var=oc_output
+
+ - fail:
+ msg: "Failed to add {{ pv_template }} to master."
+ when: oc_output.rc != 0
diff --git a/playbooks/adhoc/create_pv/pv-template.j2 b/playbooks/adhoc/create_pv/pv-template.j2
new file mode 100644
index 000000000..5654ef6c4
--- /dev/null
+++ b/playbooks/adhoc/create_pv/pv-template.j2
@@ -0,0 +1,16 @@
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: {{ pv_name }}
+ labels:
+ type: ebs
+spec:
+ capacity:
+ storage: {{ vol_size }}Gi
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Recycle
+ awsElasticBlockStore:
+ volumeID: aws://{{ vol_az }}/{{ vol_id }}
+ fsType: ext4
diff --git a/playbooks/adhoc/deploy_monitoring_containers/deploy.yml b/playbooks/adhoc/deploy_monitoring_containers/deploy.yml
deleted file mode 100644
index 44df693d5..000000000
--- a/playbooks/adhoc/deploy_monitoring_containers/deploy.yml
+++ /dev/null
@@ -1,58 +0,0 @@
----
-- name: Setup hosts
- hosts: localhost
- gather_facts: no
- user: root
- tasks:
- - name: build inven
- add_host: "name={{ hostvars[item]['ec2_public_dns_name'] }} groups=oo_hosts"
- with_items: groups['tag_env-host-type_kwoodsontest2-openshift-node']
-
- - debug: msg=oo_hosts
-
-- name: Deploy host-monitoring
- hosts: oo_hosts
- user: root
- tasks:
- - name: Deploy docker oso-f22-host-monitoring
- command: docker pull docker-registry.ops.rhcloud.com/ops/oso-f22-host-monitoring
-
- - name: Deploy oso-rhel7-zagg-client
- command: docker pull docker-registry.ops.rhcloud.com/ops/oso-rhel7-zagg-client
-
- - name: Copy oso-f22-host-monitoring systemd file
- copy:
- src: oso-f22-host-monitoring.service
- dest: /etc/systemd/system/oso-f22-host-monitoring.service
- owner: root
- group: root
- mode: 0644
- register: pcp_systemd
-
- - name: Copy zagg-client systemd file
- copy:
- src: oso-rhel7-zagg-client.service
- dest: /etc/systemd/system/oso-rhel7-zagg-client.service
- owner: root
- group: root
- mode: 0644
- register: zagg_systemd
-
- - name: reload systemd
- command: /usr/bin/systemctl --system daemon-reload
- when: pcp_systemd.changed or zagg_systemd.changed
-
- - name: pasue for a few seconds
- pause: seconds=5
-
- - name: Start the oso-f22-host-monitoring service
- service:
- name: oso-f22-host-monitoring
- state: started
- enabled: yes
-
- - name: Start the oso-rhel7-zagg-client service
- service:
- name: oso-rhel7-zagg-client
- state: started
- enabled: yes
diff --git a/playbooks/adhoc/deploy_monitoring_containers/oso-f22-host-monitoring.service b/playbooks/adhoc/deploy_monitoring_containers/oso-f22-host-monitoring.service
deleted file mode 100644
index 852be09b6..000000000
--- a/playbooks/adhoc/deploy_monitoring_containers/oso-f22-host-monitoring.service
+++ /dev/null
@@ -1,36 +0,0 @@
-# This is a systemd file to run this docker container under systemd.
-# To make this work:
-# * pull the image (probably from ops docker registry)
-# * place this file in /etc/systemd/system without the .systemd extension
-# * run the commands:
-# systemctl daemon-reload
-# systemctl enable pcp-docker
-# systemctl start pcp-docker
-#
-#
-[Unit]
-Description=PCP Collector Contatainer
-Requires=docker.service
-After=docker.service
-
-
-[Service]
-Type=simple
-TimeoutStartSec=5m
-#Slice=container-small.slice
-
-ExecStartPre=-/usr/bin/docker rm "oso-f22-host-monitoring"
-
-ExecStart=/usr/bin/docker run --rm --name=oso-f22-host-monitoring \
- --privileged --net=host --pid=host --ipc=host \
- -v /sys:/sys:ro -v /etc/localtime:/etc/localtime:ro \
- -v /var/lib/docker:/var/lib/docker:ro -v /run:/run \
- -v /var/log:/var/log \
- docker-registry.ops.rhcloud.com/ops/oso-f22-host-monitoring
-
-ExecReload=-/usr/bin/docker stop "oso-f22-host-monitoring"
-ExecReload=-/usr/bin/docker rm "oso-f22-host-monitoring"
-ExecStop=-/usr/bin/docker stop "oso-f22-host-monitoring"
-
-[Install]
-WantedBy=default.target
diff --git a/playbooks/adhoc/deploy_monitoring_containers/oso-rhel7-zagg-client.service b/playbooks/adhoc/deploy_monitoring_containers/oso-rhel7-zagg-client.service
deleted file mode 100644
index 381c7b487..000000000
--- a/playbooks/adhoc/deploy_monitoring_containers/oso-rhel7-zagg-client.service
+++ /dev/null
@@ -1,39 +0,0 @@
-# This is a systemd file to run this docker container under systemd.
-# To make this work:
-# * pull the image (probably from ops docker registry)
-# * place this file in /etc/systemd/system without the .systemd extension
-# * run the commands:
-# systemctl daemon-reload
-# systemctl enable zagg-client-docker
-# systemctl start zagg-client-docker
-#
-#
-[Unit]
-Description=Zagg Client Contatainer
-Requires=docker.service
-After=docker.service
-
-
-[Service]
-Type=simple
-TimeoutStartSec=5m
-#Slice=container-small.slice
-
-ExecStartPre=-/usr/bin/docker rm "oso-rhel7-zagg-client"
-
-
-ExecStart=/usr/bin/docker run --name oso-rhel7-zagg-client \
- -e ZAGG_SERVER=SERVERNAME \
- -e ZAGG_USER=USERNAME \
- -e ZAGG_PASSWORD=PASSWORD \
- -v /etc/localtime:/etc/localtime \
- -v /run/pcp:/run/pcp \
- docker-registry.ops.rhcloud.com/ops/oso-rhel7-zagg-client
-
-
-ExecReload=-/usr/bin/docker stop "oso-rhel7-zagg-client"
-ExecReload=-/usr/bin/docker rm "oso-rhel7-zagg-client"
-ExecStop=-/usr/bin/docker stop "oso-rhel7-zagg-client"
-
-[Install]
-WantedBy=default.target
diff --git a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml
index bd71e6d1d..a31cbef65 100644
--- a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml
+++ b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml
@@ -2,65 +2,50 @@
- hosts: localhost
gather_facts: no
vars:
- g_zserver: http://oso-rhel7-zabbix-web.kwoodsontest2.opstest.online.openshift.com/zabbix/api_jsonrpc.php
+ g_zserver: http://localhost/zabbix/api_jsonrpc.php
g_zuser: Admin
g_zpassword: zabbix
roles:
- - ../roles/os_zabbix
+ - ../../../roles/os_zabbix
post_tasks:
- - zbxapi:
+ - zbx_template:
server: "{{ g_zserver }}"
user: "{{ g_zuser }}"
password: "{{ g_zpassword }}"
- zbx_class: Template
state: list
- params:
- output: extend
- search:
- host: 'Template Heartbeat'
+ name: 'Template Heartbeat'
register: templ_heartbeat
- - zbxapi:
+ - zbx_template:
server: "{{ g_zserver }}"
user: "{{ g_zuser }}"
password: "{{ g_zpassword }}"
- zbx_class: Template
state: list
- params:
- output: extend
- search:
- host: 'Template App Zabbix Server'
+ name: 'Template App Zabbix Server'
register: templ_zabbix_server
- - zbxapi:
+ - zbx_template:
server: "{{ g_zserver }}"
user: "{{ g_zuser }}"
password: "{{ g_zpassword }}"
- zbx_class: Template
state: list
- params:
- output: extend
- search:
- host: 'Template App Zabbix Agent'
+ name: 'Template App Zabbix Agent'
register: templ_zabbix_agent
- - zbxapi:
+ - zbx_template:
server: "{{ g_zserver }}"
user: "{{ g_zuser }}"
password: "{{ g_zpassword }}"
- zbx_class: Template
state: list
register: templates
- debug: var=templ_heartbeat.results
- - zbxapi:
+ - zbx_template:
server: "{{ g_zserver }}"
user: "{{ g_zuser }}"
password: "{{ g_zpassword }}"
- zbx_class: Template
state: absent
- params: "{{templates.results | difference(templ_zabbix_agent.results) | difference(templ_zabbix_server.results) | oo_collect('templateid') }}"
- register: template_results
+ with_items: "{{ templates.results | difference(templ_zabbix_agent.results) | difference(templ_zabbix_server.results) | oo_collect('host') }}"
when: templ_heartbeat.results | length == 0
diff --git a/playbooks/adhoc/zabbix_setup/create_app.yml b/playbooks/adhoc/zabbix_setup/create_app.yml
deleted file mode 100644
index 3a08b2301..000000000
--- a/playbooks/adhoc/zabbix_setup/create_app.yml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-- hosts: localhost
- gather_facts: no
- vars_files:
- - vars/template_heartbeat.yml
- - vars/template_os_linux.yml
- vars:
- g_zserver: http://oso-rhel7-zabbix-web.kwoodsontest2.opstest.online.openshift.com/zabbix/api_jsonrpc.php
- g_zuser: Admin
- g_zpassword: zabbix
- roles:
- - ../roles/os_zabbix
- post_tasks:
- - zbxapi:
- server: "{{ g_zserver }}"
- user: "{{ g_zuser }}"
- password: "{{ g_zpassword }}"
- zbx_class: Template
- state: list
- params:
- output: extend
- register: templates
-
- - debug: var=templates
-
- - name: Create app
- include: create_application.yml
- vars:
- ctp_template: "{{ g_template_heartbeat }}"
- ctp_zserver: "{{ g_zserver }}"
- ctp_zuser: "{{ g_zuser }}"
- ctp_zpassword: "{{ g_zpassword }}"
-
-
diff --git a/playbooks/adhoc/zabbix_setup/create_application.yml b/playbooks/adhoc/zabbix_setup/create_application.yml
deleted file mode 100644
index aa6c40ed8..000000000
--- a/playbooks/adhoc/zabbix_setup/create_application.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- debug: var=ctp_template
-
-- name: Create Application
- zbxapi:
- server: "{{ ctp_zserver }}"
- user: "{{ ctp_zuser }}"
- password: "{{ ctp_zpassword }}"
- zbx_class: Application
- state: present
- params:
- name: "{{ ctp_template.application['name'] }}"
- hostid: 10085
- search:
- name: "{{ ctp_template.application['name'] }}"
- register: ctp_created_application
-
-- debug: var=ctp_created_application
diff --git a/playbooks/adhoc/zabbix_setup/create_template.yml b/playbooks/adhoc/zabbix_setup/create_template.yml
index b055e78eb..50fff53b2 100644
--- a/playbooks/adhoc/zabbix_setup/create_template.yml
+++ b/playbooks/adhoc/zabbix_setup/create_template.yml
@@ -2,16 +2,14 @@
- debug: var=ctp_template
- name: Create Template
- zbxapi:
+ zbx_template:
server: "{{ ctp_zserver }}"
user: "{{ ctp_zuser }}"
password: "{{ ctp_zpassword }}"
- zbx_class: Template
- state: present
- params: "{{ ctp_template.params }}"
- register: ctp_created_templates
+ name: "{{ ctp_template.name }}"
+ register: ctp_created_template
-- debug: var=ctp_created_templates
+- debug: var=ctp_created_template
#- name: Create Application
# zbxapi:
@@ -22,7 +20,7 @@
# state: present
# params:
# name: "{{ ctp_template.application.name}}"
-# hostid: "{{ ctp_created_templates.results[0].templateid }}"
+# hostid: "{{ ctp_created_template.results[0].templateid }}"
# search:
# name: "{{ ctp_template.application.name}}"
# register: ctp_created_application
@@ -30,28 +28,28 @@
#- debug: var=ctp_created_application
- name: Create Items
- zbxapi:
+ zbx_item:
server: "{{ ctp_zserver }}"
user: "{{ ctp_zuser }}"
password: "{{ ctp_zpassword }}"
- zbx_class: Item
- state: present
- params: "{{ item | oo_set_zbx_item_hostid(ctp_created_templates.results) }}"
+ key: "{{ item.key }}"
+ name: "{{ item.name | default(item.key, true) }}"
+ value_type: "{{ item.value_type | default('int') }}"
+ template_name: "{{ ctp_template.name }}"
with_items: ctp_template.zitems
register: ctp_created_items
#- debug: var=ctp_created_items
- name: Create Triggers
- zbxapi:
+ zbx_trigger:
server: "{{ ctp_zserver }}"
user: "{{ ctp_zuser }}"
password: "{{ ctp_zpassword }}"
- zbx_class: Trigger
- state: present
- params: "{{ item }}"
+ description: "{{ item.description }}"
+ expression: "{{ item.expression }}"
+ priority: "{{ item.priority }}"
with_items: ctp_template.ztriggers
- register: ctp_created_triggers
when: ctp_template.ztriggers is defined
#- debug: var=ctp_created_triggers
diff --git a/playbooks/adhoc/zabbix_setup/roles b/playbooks/adhoc/zabbix_setup/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/playbooks/adhoc/zabbix_setup/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/playbooks/adhoc/zabbix_setup/setup_zabbix.yml b/playbooks/adhoc/zabbix_setup/setup_zabbix.yml
index 286f699e5..1729194b5 100644
--- a/playbooks/adhoc/zabbix_setup/setup_zabbix.yml
+++ b/playbooks/adhoc/zabbix_setup/setup_zabbix.yml
@@ -5,20 +5,17 @@
- vars/template_heartbeat.yml
- vars/template_os_linux.yml
vars:
- g_zserver: http://oso-rhel7-zabbix-web.kwoodsontest2.opstest.online.openshift.com/zabbix/api_jsonrpc.php
+ g_zserver: http://localhost/zabbix/api_jsonrpc.php
g_zuser: Admin
g_zpassword: zabbix
roles:
- - ../roles/os_zabbix
+ - ../../../roles/os_zabbix
post_tasks:
- - zbxapi:
+ - zbx_template:
server: "{{ g_zserver }}"
user: "{{ g_zuser }}"
password: "{{ g_zpassword }}"
- zbx_class: Template
state: list
- params:
- output: extend
register: templates
- debug: var=templates
diff --git a/playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml b/playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml
index 9d6145ec4..22cc75554 100644
--- a/playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml
+++ b/playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml
@@ -1,33 +1,11 @@
---
g_template_heartbeat:
- application:
- name: Heartbeat
-#output: extend
- search:
- name: Heartbeat
- params:
- name: Template Heartbeat
- host: Template Heartbeat
- groups:
- - groupid: 1 # FIXME (not real)
- output: extend
- search:
- name: Template Heartbeat
+ name: Template Heartbeat
zitems:
- name: Heartbeat Ping
hostid:
- key_: heartbeat.ping
- type: 2
- value_type: 1
- output: extend
- search:
- key_: heartbeat.ping
- selectApplications: extend
+ key: heartbeat.ping
ztriggers:
- description: 'Heartbeat.ping has failed on {HOST.NAME}'
expression: '{Template Heartbeat:heartbeat.ping.last()}<>0'
- priority: 3
- searchWildcardsEnabled: True
- search:
- description: 'Heartbeat.ping has failed on*'
- expandExpression: True
+ priority: avg
diff --git a/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml b/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml
index b89711632..9cc038ffa 100644
--- a/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml
+++ b/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml
@@ -1,248 +1,90 @@
---
g_template_os_linux:
- application:
- name: OS Linux
- output: extend
- search:
- name: OS Linux
- params:
- name: Template OS Linux
- host: Template OS Linux
- groups:
- - groupid: 1 # FIXME (not real)
- output: extend
- search:
- name: Template OS Linux
+ name: Template OS Linux
zitems:
- - hostid: null
- key_: kernel.uname.sysname
- name: kernel.uname.sysname
- search:
- key_: kernel.uname.sysname
- type: 2
- value_type: 4
- selectApplications: extend
- - hostid: null
- key_: kernel.all.cpu.wait.total
- name: kernel.all.cpu.wait.total
- search:
- key_: kernel.all.cpu.wait.total
- type: 2
- value_type: 3
- selectApplications: extend
- - hostid: null
- key_: kernel.all.cpu.irq.hard
- name: kernel.all.cpu.irq.hard
- search:
- key_: kernel.all.cpu.irq.hard
- type: 2
- value_type: 3
- selectApplications: extend
- - hostid: null
- key_: kernel.all.cpu.idle
- name: kernel.all.cpu.idle
- search:
- key_: kernel.all.cpu.idle
- type: 2
- value_type: 3
- selectApplications: extend
- - hostid: null
- key_: kernel.uname.distro
- name: kernel.uname.distro
- search:
- key_: kernel.uname.distro
- type: 2
- value_type: 4
- selectApplications: extend
- - hostid: null
- key_: kernel.uname.nodename
- name: kernel.uname.nodename
- search:
- key_: kernel.uname.nodename
- type: 2
- value_type: 4
- selectApplications: extend
- - hostid: null
- key_: kernel.all.cpu.irq.soft
- name: kernel.all.cpu.irq.soft
- search:
- key_: kernel.all.cpu.irq.soft
- type: 2
- value_type: 3
- selectApplications: extend
- - hostid: null
- key_: kernel.all.load.15_minute
- name: kernel.all.load.15_minute
- search:
- key_: kernel.all.load.15_minute
- type: 2
- value_type: 0
- selectApplications: extend
- - hostid: null
- key_: kernel.all.cpu.sys
- name: kernel.all.cpu.sys
- search:
- key_: kernel.all.cpu.sys
- type: 2
- value_type: 3
- selectApplications: extend
- - hostid: null
- key_: kernel.all.load.5_minute
- name: kernel.all.load.5_minute
- search:
- key_: kernel.all.load.5_minute
- type: 2
- value_type: 0
- selectApplications: extend
- - hostid: null
- key_: mem.freemem
- name: mem.freemem
- search:
- key_: mem.freemem
- type: 2
- value_type: 3
- selectApplications: extend
- - hostid: null
- key_: kernel.all.cpu.nice
- name: kernel.all.cpu.nice
- search:
- key_: kernel.all.cpu.nice
- type: 2
- value_type: 3
- selectApplications: extend
- - hostid: null
- key_: mem.util.bufmem
- name: mem.util.bufmem
- search:
- key_: mem.util.bufmem
- type: 2
- value_type: 3
- selectApplications: extend
- - hostid: null
- key_: swap.used
- name: swap.used
- search:
- key_: swap.used
- type: 2
- value_type: 3
- selectApplications: extend
- - hostid: null
- key_: kernel.all.load.1_minute
- name: kernel.all.load.1_minute
- search:
- key_: kernel.all.load.1_minute
- type: 2
- value_type: 0
- selectApplications: extend
- - hostid: null
- key_: kernel.uname.version
- name: kernel.uname.version
- search:
- key_: kernel.uname.version
- type: 2
- value_type: 4
- selectApplications: extend
- - hostid: null
- key_: swap.length
- name: swap.length
- search:
- key_: swap.length
- type: 2
- value_type: 3
- selectApplications: extend
- - hostid: null
- key_: mem.physmem
- name: mem.physmem
- search:
- key_: mem.physmem
- type: 2
- value_type: 3
- selectApplications: extend
- - hostid: null
- key_: kernel.all.uptime
- name: kernel.all.uptime
- search:
- key_: kernel.all.uptime
- type: 2
- value_type: 3
- selectApplications: extend
- - hostid: null
- key_: swap.free
- name: swap.free
- search:
- key_: swap.free
- type: 2
- value_type: 3
- selectApplications: extend
- - hostid: null
- key_: mem.util.used
- name: mem.util.used
- search:
- key_: mem.util.used
- type: 2
- value_type: 3
- selectApplications: extend
- - hostid: null
- key_: kernel.all.cpu.user
- name: kernel.all.cpu.user
- search:
- key_: kernel.all.cpu.user
- type: 2
- value_type: 3
- selectApplications: extend
- - hostid: null
- key_: kernel.uname.machine
- name: kernel.uname.machine
- search:
- key_: kernel.uname.machine
- type: 2
- value_type: 4
- selectApplications: extend
- - hostid: null
- key_: hinv.ncpu
- name: hinv.ncpu
- search:
- key_: hinv.ncpu
- type: 2
- value_type: 3
- selectApplications: extend
- - hostid: null
- key_: mem.util.cached
- name: mem.util.cached
- search:
- key_: mem.util.cached
- type: 2
- value_type: 3
- selectApplications: extend
- - hostid: null
- key_: kernel.all.cpu.steal
- name: kernel.all.cpu.steal
- search:
- key_: kernel.all.cpu.steal
- type: 2
- value_type: 3
- selectApplications: extend
- - hostid: null
- key_: kernel.all.pswitch
- name: kernel.all.pswitch
- search:
- key_: kernel.all.pswitch
- type: 2
- value_type: 3
- selectApplications: extend
- - hostid: null
- key_: kernel.uname.release
- name: kernel.uname.release
- search:
- key_: kernel.uname.release
- type: 2
- value_type: 4
- selectApplications: extend
- - hostid: null
- key_: proc.nprocs
- name: proc.nprocs
- search:
- key_: proc.nprocs
- type: 2
- value_type: 3
- selectApplications: extend
+ - key: kernel.uname.sysname
+ value_type: string
+
+ - key: kernel.all.cpu.wait.total
+ value_type: int
+
+ - key: kernel.all.cpu.irq.hard
+ value_type: int
+
+ - key: kernel.all.cpu.idle
+ value_type: int
+
+ - key: kernel.uname.distro
+ value_type: string
+
+ - key: kernel.uname.nodename
+ value_type: string
+
+ - key: kernel.all.cpu.irq.soft
+ value_type: int
+
+ - key: kernel.all.load.15_minute
+ value_type: float
+
+ - key: kernel.all.cpu.sys
+ value_type: int
+
+ - key: kernel.all.load.5_minute
+ value_type: float
+
+ - key: mem.freemem
+ value_type: int
+
+ - key: kernel.all.cpu.nice
+ value_type: int
+
+ - key: mem.util.bufmem
+ value_type: int
+
+ - key: swap.used
+ value_type: int
+
+ - key: kernel.all.load.1_minute
+ value_type: float
+
+ - key: kernel.uname.version
+ value_type: string
+
+ - key: swap.length
+ value_type: int
+
+ - key: mem.physmem
+ value_type: int
+
+ - key: kernel.all.uptime
+ value_type: int
+
+ - key: swap.free
+ value_type: int
+
+ - key: mem.util.used
+ value_type: int
+
+ - key: kernel.all.cpu.user
+ value_type: int
+
+ - key: kernel.uname.machine
+ value_type: string
+
+ - key: hinv.ncpu
+ value_type: int
+
+ - key: mem.util.cached
+ value_type: int
+
+ - key: kernel.all.cpu.steal
+ value_type: int
+
+ - key: kernel.all.pswitch
+ value_type: int
+
+ - key: kernel.uname.release
+ value_type: string
+
+ - key: proc.nprocs
+ value_type: int
diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml
index 6ee539c7e..8106d5da9 100644
--- a/playbooks/aws/openshift-cluster/config.yml
+++ b/playbooks/aws/openshift-cluster/config.yml
@@ -15,6 +15,7 @@
g_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}"
g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
+ g_nodeonmaster: true
openshift_cluster_id: "{{ cluster_id }}"
openshift_debug_level: 4
openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/aws/openshift-cluster/library/ec2_ami_find.py b/playbooks/aws/openshift-cluster/library/ec2_ami_find.py
index 29e594a65..2b1db62d8 100644
--- a/playbooks/aws/openshift-cluster/library/ec2_ami_find.py
+++ b/playbooks/aws/openshift-cluster/library/ec2_ami_find.py
@@ -158,7 +158,7 @@ EXAMPLES = '''
# Launch an EC2 instance
- ec2:
image: "{{ ami_search.results[0].ami_id }}"
- instance_type: m3.medium
+ instance_type: m4.medium
key_name: mykey
wait: yes
'''
diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
index 92155582e..236d84e74 100644
--- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
@@ -99,7 +99,7 @@
iops: "{{ lookup('env', 'os_master_root_vol_iops') | default(500, true) }}"
node:
root:
- volume_size: "{{ lookup('env', 'os_node_root_vol_size') | default(25, true) }}"
+ volume_size: "{{ lookup('env', 'os_node_root_vol_size') | default(85, true) }}"
device_type: "{{ lookup('env', 'os_node_root_vol_type') | default('gp2', true) }}"
iops: "{{ lookup('env', 'os_node_root_vol_iops') | default(500, true) }}"
docker:
diff --git a/playbooks/aws/openshift-cluster/vars.online.int.yml b/playbooks/aws/openshift-cluster/vars.online.int.yml
index fc8b8d2d2..bb18e13b0 100644
--- a/playbooks/aws/openshift-cluster/vars.online.int.yml
+++ b/playbooks/aws/openshift-cluster/vars.online.int.yml
@@ -3,13 +3,13 @@ ec2_image: ami-9101c8fa
ec2_image_name: libra-ops-rhel7*
ec2_region: us-east-1
ec2_keypair: mmcgrath_libra
-ec2_master_instance_type: m3.large
+ec2_master_instance_type: t2.small
ec2_master_security_groups: [ 'integration', 'integration-master' ]
-ec2_infra_instance_type: m3.large
+ec2_infra_instance_type: c4.large
ec2_infra_security_groups: [ 'integration', 'integration-infra' ]
-ec2_node_instance_type: m3.large
+ec2_node_instance_type: m4.large
ec2_node_security_groups: [ 'integration', 'integration-node' ]
-ec2_etcd_instance_type: m3.large
+ec2_etcd_instance_type: m4.large
ec2_etcd_security_groups: [ 'integration', 'integration-etcd' ]
ec2_vpc_subnet: subnet-987c0def
ec2_assign_public_ip: yes
diff --git a/playbooks/aws/openshift-cluster/vars.online.prod.yml b/playbooks/aws/openshift-cluster/vars.online.prod.yml
index f68d41fc4..bbef9cc56 100644
--- a/playbooks/aws/openshift-cluster/vars.online.prod.yml
+++ b/playbooks/aws/openshift-cluster/vars.online.prod.yml
@@ -3,13 +3,13 @@ ec2_image: ami-9101c8fa
ec2_image_name: libra-ops-rhel7*
ec2_region: us-east-1
ec2_keypair: mmcgrath_libra
-ec2_master_instance_type: m3.large
+ec2_master_instance_type: t2.small
ec2_master_security_groups: [ 'production', 'production-master' ]
-ec2_infra_instance_type: m3.large
+ec2_infra_instance_type: c4.large
ec2_infra_security_groups: [ 'production', 'production-infra' ]
-ec2_node_instance_type: m3.large
+ec2_node_instance_type: m4.large
ec2_node_security_groups: [ 'production', 'production-node' ]
-ec2_etcd_instance_type: m3.large
+ec2_etcd_instance_type: m4.large
ec2_etcd_security_groups: [ 'production', 'production-etcd' ]
ec2_vpc_subnet: subnet-987c0def
ec2_assign_public_ip: yes
diff --git a/playbooks/aws/openshift-cluster/vars.online.stage.yml b/playbooks/aws/openshift-cluster/vars.online.stage.yml
index ce9869fcd..9008a55ba 100644
--- a/playbooks/aws/openshift-cluster/vars.online.stage.yml
+++ b/playbooks/aws/openshift-cluster/vars.online.stage.yml
@@ -3,13 +3,13 @@ ec2_image: ami-9101c8fa
ec2_image_name: libra-ops-rhel7*
ec2_region: us-east-1
ec2_keypair: mmcgrath_libra
-ec2_master_instance_type: m3.large
+ec2_master_instance_type: t2.small
ec2_master_security_groups: [ 'stage', 'stage-master' ]
-ec2_infra_instance_type: m3.large
+ec2_infra_instance_type: c4.large
ec2_infra_security_groups: [ 'stage', 'stage-infra' ]
-ec2_node_instance_type: m3.large
+ec2_node_instance_type: m4.large
ec2_node_security_groups: [ 'stage', 'stage-node' ]
-ec2_etcd_instance_type: m3.large
+ec2_etcd_instance_type: m4.large
ec2_etcd_security_groups: [ 'stage', 'stage-etcd' ]
ec2_vpc_subnet: subnet-987c0def
ec2_assign_public_ip: yes
diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml
index 07e453f89..95bc4b3e2 100644
--- a/playbooks/aws/openshift-cluster/vars.yml
+++ b/playbooks/aws/openshift-cluster/vars.yml
@@ -1,14 +1,14 @@
---
deployment_vars:
origin:
- # fedora, since centos requires marketplace
- image: ami-acd999c4
+ # centos-7, requires marketplace
+ image: ami-96a818fe
image_name:
region: us-east-1
- ssh_user: fedora
+ ssh_user: centos
sudo: yes
keypair: libra
- type: m3.large
+ type: m4.large
security_groups: [ 'public' ]
vpc_subnet:
assign_public_ip:
@@ -20,7 +20,7 @@ deployment_vars:
ssh_user: root
sudo: no
keypair: libra
- type: m3.large
+ type: m4.large
security_groups: [ 'public' ]
vpc_subnet:
assign_public_ip:
@@ -32,7 +32,7 @@ deployment_vars:
ssh_user: ec2-user
sudo: yes
keypair: libra
- type: m3.large
+ type: m4.large
security_groups: [ 'public' ]
vpc_subnet:
assign_public_ip:
diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml
new file mode 100644
index 000000000..f564905ea
--- /dev/null
+++ b/playbooks/byo/rhel_subscribe.yml
@@ -0,0 +1,12 @@
+---
+- hosts: all
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
+ roles:
+ - role: rhel_subscribe
+ when: deployment_type == "enterprise" and
+ ansible_distribution == "RedHat" and
+ lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
+ default('no', True) | lower in ['no', 'false']
+ - openshift_repos
+ - os_update_latest
diff --git a/playbooks/byo/vagrant.yml b/playbooks/byo/vagrant.yml
new file mode 100644
index 000000000..76246e7b0
--- /dev/null
+++ b/playbooks/byo/vagrant.yml
@@ -0,0 +1,4 @@
+---
+- include: rhel_subscribe.yml
+
+- include: config.yml
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 0779cfe47..4c74f96db 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -39,6 +39,15 @@
ansible_sudo: "{{ g_sudo | default(omit) }}"
with_items: groups[g_nodes_group] | default([])
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: groups[g_masters_group] | default([])
+ when: g_nodeonmaster is defined and g_nodeonmaster == true
+
- name: Evaluate oo_first_etcd
add_host:
name: "{{ groups[g_etcd_group][0] }}"
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 6ef375bbb..705f7f223 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -131,6 +131,7 @@
| oo_collect('openshift.common.hostname') }}"
openshift_unscheduleable_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] | default([]))
| oo_collect('openshift.common.hostname', {'openshift_scheduleable': False}) }}"
+ openshift_node_vars: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) }}"
pre_tasks:
- set_fact:
openshift_scheduleable_nodes: "{{ hostvars
diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml
index 6630fa27d..830f9d216 100644
--- a/playbooks/libvirt/openshift-cluster/launch.yml
+++ b/playbooks/libvirt/openshift-cluster/launch.yml
@@ -42,7 +42,7 @@
count: "{{ num_infra }}"
- include: tasks/launch_instances.yml
vars:
- instances: "{{ infra_names }}"
+ instances: "{{ node_names }}"
cluster: "{{ cluster_id }}"
type: "{{ k8s_type }}"
g_sub_host_type: "{{ sub_host_type }}"
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
index a15ec749c..d53884e0d 100644
--- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
+++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
@@ -16,8 +16,13 @@ parameters:
num_nodes:
type: number
- label: Number of nodes
- description: Number of nodes
+ label: Number of compute nodes
+ description: Number of compute nodes
+
+ num_infra:
+ type: number
+ label: Number of infrastructure nodes
+ description: Number of infrastructure nodes
cidr:
type: string
@@ -55,7 +60,12 @@ parameters:
node_image:
type: string
label: Node image
- description: Name of the image for the node servers
+ description: Name of the image for the compute node servers
+
+ infra_image:
+ type: string
+ label: Infra image
+ description: Name of the image for the infra node servers
master_flavor:
type: string
@@ -65,7 +75,12 @@ parameters:
node_flavor:
type: string
label: Node flavor
- description: Flavor of the node servers
+ description: Flavor of the compute node servers
+
+ infra_flavor:
+ type: string
+ label: Infra flavor
+ description: Flavor of the infra node servers
outputs:
@@ -83,15 +98,27 @@ outputs:
node_names:
description: Name of the nodes
- value: { get_attr: [ nodes, name ] }
+ value: { get_attr: [ compute_nodes, name ] }
node_ips:
description: IPs of the nodes
- value: { get_attr: [ nodes, private_ip ] }
+ value: { get_attr: [ compute_nodes, private_ip ] }
node_floating_ips:
description: Floating IPs of the nodes
- value: { get_attr: [ nodes, floating_ip ] }
+ value: { get_attr: [ compute_nodes, floating_ip ] }
+
+ infra_names:
+ description: Name of the nodes
+ value: { get_attr: [ infra_nodes, name ] }
+
+ infra_ips:
+ description: IPs of the nodes
+ value: { get_attr: [ infra_nodes, private_ip ] }
+
+ infra_floating_ips:
+ description: Floating IPs of the nodes
+ value: { get_attr: [ infra_nodes, floating_ip ] }
resources:
@@ -218,6 +245,29 @@ resources:
remote_mode: remote_group_id
remote_group_id: { get_resource: master-secgrp }
+ infra-secgrp:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-infra-secgrp
+ params:
+ cluster_id: { get_param: cluster_id }
+ description:
+ str_replace:
+ template: Security group for cluster_id OpenShift infrastructure cluster nodes
+ params:
+ cluster_id: { get_param: cluster_id }
+ rules:
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 80
+ port_range_max: 80
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 443
+ port_range_max: 443
+
masters:
type: OS::Heat::ResourceGroup
properties:
@@ -248,7 +298,7 @@ resources:
cluster_id: { get_param: cluster_id }
depends_on: interface
- nodes:
+ compute_nodes:
type: OS::Heat::ResourceGroup
properties:
count: { get_param: num_nodes }
@@ -257,12 +307,14 @@ resources:
properties:
name:
str_replace:
- template: cluster_id-k8s_type-%index%
+ template: cluster_id-k8s_type-sub_host_type-%index%
params:
cluster_id: { get_param: cluster_id }
k8s_type: node
+ sub_host_type: compute
cluster_id: { get_param: cluster_id }
type: node
+ subtype: compute
image: { get_param: node_image }
flavor: { get_param: node_flavor }
key_name: { get_resource: keypair }
@@ -277,3 +329,36 @@ resources:
params:
cluster_id: { get_param: cluster_id }
depends_on: interface
+
+ infra_nodes:
+ type: OS::Heat::ResourceGroup
+ properties:
+ count: { get_param: num_infra }
+ resource_def:
+ type: heat_stack_server.yaml
+ properties:
+ name:
+ str_replace:
+ template: cluster_id-k8s_type-sub_host_type-%index%
+ params:
+ cluster_id: { get_param: cluster_id }
+ k8s_type: node
+ sub_host_type: infra
+ cluster_id: { get_param: cluster_id }
+ type: node
+ subtype: infra
+ image: { get_param: infra_image }
+ flavor: { get_param: infra_flavor }
+ key_name: { get_resource: keypair }
+ net: { get_resource: net }
+ subnet: { get_resource: subnet }
+ secgrp:
+ - { get_resource: node-secgrp }
+ - { get_resource: infra-secgrp }
+ floating_network: { get_param: external_net }
+ net_name:
+ str_replace:
+ template: openshift-ansible-cluster_id-net
+ params:
+ cluster_id: { get_param: cluster_id }
+ depends_on: interface
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml
index 55f64211a..9dcab3e60 100644
--- a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml
+++ b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml
@@ -19,6 +19,12 @@ parameters:
label: Type
description: Type master or node
+ subtype:
+ type: string
+ label: Sub-type
+ description: Sub-type compute or infra for nodes, default otherwise
+ default: default
+
key_name:
type: string
label: Key name
@@ -102,11 +108,12 @@ resources:
env: { get_param: cluster_id }
host-type: { get_param: type }
env-host-type:
- str_template:
+ str_replace:
template: cluster_id-openshift-type
params:
cluster_id: { get_param: cluster_id }
type: { get_param: type }
+ sub-host-type: { get_param: subtype }
port:
type: OS::Neutron::Port
diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml
index d41448dc0..d36bdbf26 100644
--- a/playbooks/openstack/openshift-cluster/launch.yml
+++ b/playbooks/openstack/openshift-cluster/launch.yml
@@ -90,7 +90,7 @@
ansible_ssh_host: '{{ item[2] }}'
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- groups: 'tag_env_{{ cluster_id }}, tag_host-type_node, tag_env-host-type_{{ cluster_id }}-openshift-node, tag_sub-host-type_node'
+ groups: 'tag_env_{{ cluster_id }}, tag_host-type_node, tag_env-host-type_{{ cluster_id }}-openshift-node, tag_sub-host-type_compute'
with_together:
- parsed_outputs.node_names
- parsed_outputs.node_ips
@@ -115,6 +115,7 @@
with_flattened:
- parsed_outputs.master_floating_ips
- parsed_outputs.node_floating_ips
+ - parsed_outputs.infra_floating_ips
- name: Wait for user setup
command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ deployment_vars[deployment_type].ssh_user }}@{{ item }} echo {{ deployment_vars[deployment_type].ssh_user }} user is setup'
@@ -125,6 +126,7 @@
with_flattened:
- parsed_outputs.master_floating_ips
- parsed_outputs.node_floating_ips
+ - parsed_outputs.infra_floating_ips
- include: update.yml
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index 79a91dfde..27bfb7de9 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -1,6 +1,6 @@
---
- name: Install etcd
- yum: pkg=etcd state=present
+ yum: pkg=etcd-2.* state=present
- name: Validate permissions on the config dir
file:
diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2
index 801be2c97..9ac23b1dd 100644
--- a/roles/etcd/templates/etcd.conf.j2
+++ b/roles/etcd/templates/etcd.conf.j2
@@ -16,8 +16,8 @@ ETCD_NAME=default
{% endif %}
ETCD_DATA_DIR={{ etcd_data_dir }}
#ETCD_SNAPSHOT_COUNTER="10000"
-#ETCD_HEARTBEAT_INTERVAL="100"
-#ETCD_ELECTION_TIMEOUT="1000"
+ETCD_HEARTBEAT_INTERVAL="500"
+ETCD_ELECTION_TIMEOUT="2500"
ETCD_LISTEN_CLIENT_URLS={{ etcd_listen_client_urls }}
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
diff --git a/roles/fluentd_master/tasks/main.yml b/roles/fluentd_master/tasks/main.yml
index d828db52a..d592dc306 100644
--- a/roles/fluentd_master/tasks/main.yml
+++ b/roles/fluentd_master/tasks/main.yml
@@ -39,6 +39,9 @@
owner: 'td-agent'
mode: 0444
+- name: "Pause before restarting td-agent and openshift-master, depending on the number of nodes."
+ pause: seconds={{ ( num_nodes|int < 3 ) | ternary(15, (num_nodes|int * 5)) }}
+
- name: ensure td-agent is running
service:
name: 'td-agent'
diff --git a/roles/openshift_common/README.md b/roles/openshift_common/README.md
index eb4ef26e8..1eb04626f 100644
--- a/roles/openshift_common/README.md
+++ b/roles/openshift_common/README.md
@@ -1,7 +1,7 @@
-OpenShift Common
-================
+OpenShift/Atomic Enterprise Common
+===================================
-OpenShift common installation and configuration tasks.
+OpenShift/Atomic Enterprise common installation and configuration tasks.
Requirements
------------
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-persistent-sti.json
index c30832c78..b64acae8b 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-persistent-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-persistent-sti.json
@@ -315,7 +315,7 @@
},
"triggers": [
{
- "type": "Github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-sti.json
index cbdc69562..20b234bd0 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-sti.json
@@ -310,7 +310,7 @@
},
"triggers": [
{
- "type": "Github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-basic-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap6-basic-sti.json
index 76548a7fa..146bfb1ee 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-basic-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/eap6-basic-sti.json
@@ -187,7 +187,7 @@
},
"triggers": [
{
- "type": "Github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-https-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap6-https-sti.json
index e13224a60..0497e6824 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-https-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/eap6-https-sti.json
@@ -254,7 +254,7 @@
},
"triggers": [
{
- "type": "Github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-persistent-sti.json
index e80a909a7..289ab284f 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-persistent-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-persistent-sti.json
@@ -334,7 +334,7 @@
},
"triggers": [
{
- "type": "Github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-sti.json
index 1bf918eb1..22b301aa9 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-sti.json
@@ -329,7 +329,7 @@
},
"triggers": [
{
- "type": "Github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-persistent-sti.json
index 505b89043..648a53199 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-persistent-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-persistent-sti.json
@@ -336,7 +336,7 @@
},
"triggers": [
{
- "type": "Github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-sti.json
index 5d8b75992..83d5c8b18 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-sti.json
@@ -331,7 +331,7 @@
},
"triggers": [
{
- "type": "Github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-persistent-sti.json
index 95e1be2e3..53b953b7e 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-persistent-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-persistent-sti.json
@@ -324,7 +324,7 @@
},
"triggers": [
{
- "type": "Github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-sti.json
index d558e5182..9d660cb42 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-sti.json
@@ -319,7 +319,7 @@
},
"triggers": [
{
- "type": "Github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-basic-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-basic-sti.json
index 51a3d1846..d74c2dfe3 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-basic-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-basic-sti.json
@@ -157,7 +157,7 @@
},
"triggers": [
{
- "type": "Github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-https-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-https-sti.json
index f81f54e30..b94142135 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-https-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-https-sti.json
@@ -224,7 +224,7 @@
},
"triggers": [
{
- "type": "Github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-persistent-sti.json
index 11c305441..0c7b7d8e3 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-persistent-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-persistent-sti.json
@@ -305,7 +305,7 @@
},
"triggers": [
{
- "type": "Github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-sti.json
index 99815e87a..892f27fe3 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-sti.json
@@ -300,7 +300,7 @@
},
"triggers": [
{
- "type": "Github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-persistent-sti.json
index 2f1e68206..547449010 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-persistent-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-persistent-sti.json
@@ -307,7 +307,7 @@
},
"triggers": [
{
- "type": "Github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-sti.json
index 5087d2ff0..2ae59ec71 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-sti.json
@@ -302,7 +302,7 @@
},
"triggers": [
{
- "type": "Github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-persistent-sti.json
index c28188cc0..b871b48d0 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-persistent-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-persistent-sti.json
@@ -295,7 +295,7 @@
},
"triggers": [
{
- "type": "Github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-sti.json
index 4f4b2ba44..384ff1b8f 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-sti.json
@@ -290,7 +290,7 @@
},
"triggers": [
{
- "type": "Github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-basic-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-basic-sti.json
index c8d130c37..3c7812b69 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-basic-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-basic-sti.json
@@ -157,7 +157,7 @@
},
"triggers": [
{
- "type": "Github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-https-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-https-sti.json
index 867b5a5da..d725e0606 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-https-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-https-sti.json
@@ -224,7 +224,7 @@
},
"triggers": [
{
- "type": "Github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-persistent-sti.json
index 4932d6126..cf35d0024 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-persistent-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-persistent-sti.json
@@ -305,7 +305,7 @@
},
"triggers": [
{
- "type": "Github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-sti.json
index 47977f86b..a993024f4 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-sti.json
@@ -300,7 +300,7 @@
},
"triggers": [
{
- "type": "Github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-persistent-sti.json
index a7c8bdb7f..0692817bf 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-persistent-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-persistent-sti.json
@@ -307,7 +307,7 @@
},
"triggers": [
{
- "type": "Github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-sti.json
index 83af5a158..226a983b7 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-sti.json
@@ -302,7 +302,7 @@
},
"triggers": [
{
- "type": "Github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-persistent-sti.json
index 914ccc47c..b4644ac08 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-persistent-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-persistent-sti.json
@@ -295,7 +295,7 @@
},
"triggers": [
{
- "type": "Github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-sti.json
index ed95798e4..b46f23225 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-sti.json
@@ -290,7 +290,7 @@
},
"triggers": [
{
- "type": "Github",
+ "type": "GitHub",
"github": {
"secret": "${GITHUB_TRIGGER_SECRET}"
}
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index d733639c3..4e0989c5f 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -300,8 +300,7 @@ def set_registry_url_if_unset(facts):
if deployment_type == 'enterprise':
registry_url = "openshift3/ose-${component}:${version}"
elif deployment_type == 'online':
- registry_url = ("docker-registry.ops.rhcloud.com/"
- "openshift3/ose-${component}:${version}")
+ registry_url = ("openshift3/ose-${component}:${version}")
facts[role]['registry_url'] = registry_url
return facts
diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml
index d17f3f532..74e702248 100644
--- a/roles/openshift_manage_node/tasks/main.yml
+++ b/roles/openshift_manage_node/tasks/main.yml
@@ -16,3 +16,10 @@
command: >
{{ openshift.common.admin_binary }} manage-node {{ item }} --schedulable=true
with_items: openshift_scheduleable_nodes
+
+- name: Label nodes
+ command: >
+ {{ openshift.common.client_binary }} label --overwrite node {{ item.openshift.common.hostname }} {{ item.openshift.node.labels | oo_combine_dict }}
+ with_items:
+ - "{{ openshift_node_vars }}"
+ when: "'labels' in item.openshift.node and item.openshift.node.labels != {}"
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index ec7cee33b..9204d25ce 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -53,6 +53,11 @@
oauth_grant_method: "{{ openshift_master_oauth_grant_method | default(None) }}"
sdn_cluster_network_cidr: "{{ osm_cluster_network_cidr | default(None) }}"
sdn_host_subnet_length: "{{ osm_host_subnet_length | default(None) }}"
+ default_subdomain: "{{ osm_default_subdomain | default(None) }}"
+ custom_cors_origins: "{{ osm_custom_cors_origins | default(None) }}"
+ default_node_selector: "{{ osm_default_node_selector | default(None) }}"
+ api_server_args: "{{ osm_api_server_args | default(None) }}"
+ controller_args: "{{ osm_controller_args | default(None) }}"
# TODO: These values need to be configurable
- name: Set dns OpenShift facts
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 5e46f8f69..44567aa22 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -2,6 +2,9 @@ apiLevels:
- v1beta3
- v1
apiVersion: v1
+{% if api_server_args is defined and api_server_args %}
+apiServerArguments: {{ api_server_args }}
+{% endif %}
assetConfig:
logoutURL: ""
masterPublicURL: {{ openshift.master.public_api_url }}
@@ -13,11 +16,16 @@ assetConfig:
keyFile: master.server.key
maxRequestsInFlight: 0
requestTimeoutSeconds: 0
+{% if controller_args is defined and controller_args %}
+controllerArguments: {{ controller_args }}
+{% endif %}
corsAllowedOrigins:
-{# TODO: add support for user specified corsAllowedOrigins #}
{% for origin in ['127.0.0.1', 'localhost', openshift.common.hostname, openshift.common.ip, openshift.common.public_hostname, openshift.common.public_ip] %}
- {{ origin }}
{% endfor %}
+{% for custom_origin in openshift.master.custom_cors_origins | default("") %}
+ - {{ custom_origin }}
+{% endfor %}
{% if openshift.master.embedded_dns | bool %}
dnsConfig:
bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }}
@@ -93,7 +101,7 @@ policyConfig:
openshiftSharedResourcesNamespace: openshift
{# TODO: Allow users to override projectConfig items #}
projectConfig:
- defaultNodeSelector: ""
+ defaultNodeSelector: "{{ openshift.master.default_node_selector | default("") }}"
projectRequestMessage: ""
projectRequestTemplate: ""
securityAllocator:
@@ -101,7 +109,7 @@ projectConfig:
mcsLabelsPerProject: 5
uidAllocatorRange: 1000000000-1999999999/10000
routingConfig:
- subdomain: router.default.local
+ subdomain: "{{ openshift.master.default_subdomain | default("") }}"
serviceAccountConfig:
managedNames:
- default
diff --git a/roles/openshift_master/templates/v1_partials/oauthConfig.j2 b/roles/openshift_master/templates/v1_partials/oauthConfig.j2
index f6fd88c65..4ca644876 100644
--- a/roles/openshift_master/templates/v1_partials/oauthConfig.j2
+++ b/roles/openshift_master/templates/v1_partials/oauthConfig.j2
@@ -10,6 +10,20 @@
{{ key }}: {{ identity_provider[key] }}"
{% endif %}
{% endfor %}
+{% elif identity_provider.kind == 'LDAPPasswordIdentityProvider' %}
+ attributes:
+{% for attribute_key in identity_provider.attributes %}
+ {{ attribute_key }}:
+{% for attribute_value in identity_provider.attributes[attribute_key] %}
+ - {{ attribute_value }}
+{% endfor %}
+{% endfor %}
+{% for key in ('bindDN', 'bindPassword', 'ca') %}
+ {{ key }}: "{{ identity_provider[key] }}"
+{% endfor %}
+{% for key in ('insecure', 'url') %}
+ {{ key }}: {{ identity_provider[key] }}
+{% endfor %}
{% elif identity_provider.kind == 'RequestHeaderIdentityProvider' %}
headers: {{ identity_provider.headers }}
{% if 'clientCA' in identity_provider %}
diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md
index 5edb3b8dd..300e6b495 100644
--- a/roles/openshift_node/README.md
+++ b/roles/openshift_node/README.md
@@ -34,6 +34,18 @@ openshift_common
Example Playbook
----------------
+Notes
+-----
+
+Currently we support re-labeling nodes but we don't re-schedule running pods nor remove existing labels. That means you will have to trigger the re-schedulling manually. To re-schedule your pods, just follow the steps below:
+
+```
+oadm manage-node --schedulable=false ${NODE}
+oadm manage-node --evacuate ${NODE}
+oadm manage-node --schedulable=true ${NODE}
+````
+
+
TODO
License
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index e84e74b40..adffca252 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -6,6 +6,9 @@
- fail:
msg: This role requres that osn_cluster_dns_ip is set
when: osn_cluster_dns_ip is not defined or not osn_cluster_dns_ip
+- fail:
+ msg: "SELinux is disabled, This deployment type requires that SELinux is enabled."
+ when: (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online']
- name: Install OpenShift Node package
yum: pkg=openshift-node state=present
@@ -33,6 +36,7 @@
registry_url: "{{ oreg_url | default(none) }}"
debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}"
portal_net: "{{ openshift_master_portal_net | default(None) }}"
+ kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
# TODO: add the validate parameter when there is a validation command to run
- name: Create the Node config
@@ -63,11 +67,13 @@
lineinfile:
dest: /etc/sysconfig/docker
regexp: '^OPTIONS=.*'
- line: "OPTIONS='--insecure-registry={{ openshift.node.portal_net }} --selinux-enabled'"
+ line: "OPTIONS='--insecure-registry={{ openshift.node.portal_net }} \
+{% if ansible_selinux and ansible_selinux.status == '''enabled''' %}--selinux-enabled{% endif %}'"
when: docker_check.stat.isreg
- name: Allow NFS access for VMs
seboolean: name=virt_use_nfs state=yes persistent=yes
+ when: ansible_selinux and ansible_selinux.status == "enabled"
- name: Start and enable openshift-node
service: name=openshift-node enabled=yes state=started
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 7778a2a61..e6f75a4c0 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -8,6 +8,9 @@ imageConfig:
format: {{ openshift.node.registry_url }}
latest: false
kind: NodeConfig
+{% if openshift.common.kubelet_args is defined and openshift.common.kubelet_args %}
+kubeletArguments: {{ kubelet_args }}
+{% endif %}
masterKubeConfig: system:node:{{ openshift.common.hostname }}.kubeconfig
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
nodeName: {{ openshift.common.hostname }}
diff --git a/roles/os_zabbix/library/__init__.py b/roles/os_zabbix/library/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/roles/os_zabbix/library/__init__.py
diff --git a/roles/os_zabbix/library/test.yml b/roles/os_zabbix/library/test.yml
new file mode 100644
index 000000000..f585bcbb2
--- /dev/null
+++ b/roles/os_zabbix/library/test.yml
@@ -0,0 +1,92 @@
+---
+# This is a test playbook to create one of each of the zabbix ansible modules.
+# ensure that the zbxapi module is installed
+# ansible-playbook test.yml
+- name: Test zabbix ansible module
+ hosts: localhost
+ gather_facts: no
+ vars:
+ zbx_server: http://localhost/zabbix/api_jsonrpc.php
+ zbx_user: Admin
+ zbx_password: zabbix
+
+ pre_tasks:
+ - name: Create a template
+ zbx_template:
+ server: "{{ zbx_server }}"
+ user: "{{ zbx_user }}"
+ password: "{{ zbx_password }}"
+ name: 'test template'
+ register: template_output
+
+ - debug: var=template_output
+
+ - name: Create an item
+ zbx_item:
+ server: "{{ zbx_server }}"
+ user: "{{ zbx_user }}"
+ password: "{{ zbx_password }}"
+ name: 'test item'
+ key: 'kenny.item.1'
+ template_name: "{{ template_output.results[0].host }}"
+ register: item_output
+
+ - debug: var=item_output
+
+ - name: Create an trigger
+ zbx_trigger:
+ server: "{{ zbx_server }}"
+ user: "{{ zbx_user }}"
+ password: "{{ zbx_password }}"
+ expression: '{test template:kenny.item.1.last()}>2'
+ desc: 'Kenny desc'
+ register: trigger_output
+
+ - debug: var=trigger_output
+
+ - name: Create a hostgroup
+ zbx_hostgroup:
+ server: "{{ zbx_server }}"
+ user: "{{ zbx_user }}"
+ password: "{{ zbx_password }}"
+ name: 'kenny hostgroup'
+ register: hostgroup_output
+
+ - debug: var=hostgroup_output
+
+ - name: Create a host
+ zbx_host:
+ server: "{{ zbx_server }}"
+ user: "{{ zbx_user }}"
+ password: "{{ zbx_password }}"
+ name: 'kenny host'
+ hostgroups:
+ - 'kenny hostgroup'
+ register: host_output
+
+ - debug: var=host_output
+
+ - name: Create a usergroup
+ zbx_usergroup:
+ server: "{{ zbx_server }}"
+ user: "{{ zbx_user }}"
+ password: "{{ zbx_password }}"
+ name: kenny usergroup
+ rights:
+ - 'kenny hostgroup': rw
+ register: usergroup_output
+
+ - debug: var=usergroup_output
+
+ - name: Create a user
+ zbx_user:
+ server: "{{ zbx_server }}"
+ user: "{{ zbx_user }}"
+ password: "{{ zbx_password }}"
+ alias: kenny user
+ passwd: zabbix
+ usergroups:
+ - kenny usergroup
+ register: user_output
+
+ - debug: var=user_output
diff --git a/roles/os_zabbix/library/zbx_host.py b/roles/os_zabbix/library/zbx_host.py
new file mode 100644
index 000000000..d75dfdea1
--- /dev/null
+++ b/roles/os_zabbix/library/zbx_host.py
@@ -0,0 +1,162 @@
+#!/usr/bin/env python
+'''
+Zabbix host ansible module
+'''
+# vim: expandtab:tabstop=4:shiftwidth=4
+#
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This is in place because each module looks similar to each other.
+# These need duplicate code as their behavior is very similar
+# but different for each zabbix class.
+# pylint: disable=duplicate-code
+
+# pylint: disable=import-error
+from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
+
+def exists(content, key='result'):
+ ''' Check if key exists in content or the size of content[key] > 0
+ '''
+ if not content.has_key(key):
+ return False
+
+ if not content[key]:
+ return False
+
+ return True
+
+def get_group_ids(zapi, hostgroup_names):
+ '''
+ get hostgroups
+ '''
+ # Fetch groups by name
+ group_ids = []
+ for hgr in hostgroup_names:
+ content = zapi.get_content('hostgroup', 'get', {'search': {'name': hgr}})
+ if content.has_key('result'):
+ group_ids.append({'groupid': content['result'][0]['groupid']})
+
+ return group_ids
+
+def get_template_ids(zapi, template_names):
+ '''
+ get related templates
+ '''
+ template_ids = []
+ # Fetch templates by name
+ for template_name in template_names:
+ content = zapi.get_content('template', 'get', {'search': {'host': template_name}})
+ if content.has_key('result'):
+ template_ids.append({'templateid': content['results'][0]['templateid']})
+ return template_ids
+
+def main():
+ '''
+ Ansible module for zabbix host
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+ user=dict(default=None, type='str'),
+ password=dict(default=None, type='str'),
+ name=dict(default=None, type='str'),
+ hostgroup_names=dict(default=[], type='list'),
+ template_names=dict(default=[], type='list'),
+ debug=dict(default=False, type='bool'),
+ state=dict(default='present', type='str'),
+ interfaces=dict(default=[], type='list'),
+ ),
+ #supports_check_mode=True
+ )
+
+ user = module.params.get('user', os.environ['ZABBIX_USER'])
+ passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD'])
+
+ zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug']))
+
+ #Set the instance and the template for the rest of the calls
+ zbx_class_name = 'host'
+ idname = "hostid"
+ hname = module.params['name']
+ state = module.params['state']
+
+ # selectInterfaces doesn't appear to be working but is needed.
+ content = zapi.get_content(zbx_class_name,
+ 'get',
+ {'search': {'host': hname},
+ 'selectGroups': 'groupid',
+ 'selectParentTemplates': 'templateid',
+ 'selectInterfaces': 'interfaceid',
+ })
+ if state == 'list':
+ module.exit_json(changed=False, results=content['result'], state="list")
+
+ if state == 'absent':
+ if not exists(content):
+ module.exit_json(changed=False, state="absent")
+
+ content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
+ module.exit_json(changed=True, results=content['result'], state="absent")
+
+ if state == 'present':
+ params = {'host': hname,
+ 'groups': get_group_ids(zapi, module.params('hostgroup_names')),
+ 'templates': get_template_ids(zapi, module.params('template_names')),
+ 'interfaces': module.params.get('interfaces', [{'type': 1, # interface type, 1 = agent
+ 'main': 1, # default interface? 1 = true
+ 'useip': 1, # default interface? 1 = true
+ 'ip': '127.0.0.1', # default interface? 1 = true
+ 'dns': '', # dns for host
+ 'port': '10050', # port for interface? 10050
+ }])
+ }
+
+ if not exists(content):
+ # if we didn't find it, create it
+ content = zapi.get_content(zbx_class_name, 'create', params)
+ module.exit_json(changed=True, results=content['result'], state='present')
+ # already exists, we need to update it
+ # let's compare properties
+ differences = {}
+ zab_results = content['result'][0]
+ for key, value in params.items():
+
+ if key == 'templates' and zab_results.has_key('parentTemplates'):
+ if zab_results['parentTemplates'] != value:
+ differences[key] = value
+
+ elif zab_results[key] != value and zab_results[key] != str(value):
+ differences[key] = value
+
+ if not differences:
+ module.exit_json(changed=False, results=zab_results, state="present")
+
+ # We have differences and need to update
+ differences[idname] = zab_results[idname]
+ content = zapi.get_content(zbx_class_name, 'update', differences)
+ module.exit_json(changed=True, results=content['result'], state="present")
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/os_zabbix/library/zbx_hostgroup.py b/roles/os_zabbix/library/zbx_hostgroup.py
new file mode 100644
index 000000000..a1eb875d4
--- /dev/null
+++ b/roles/os_zabbix/library/zbx_hostgroup.py
@@ -0,0 +1,116 @@
+#!/usr/bin/env python
+''' Ansible module for hostgroup
+'''
+# vim: expandtab:tabstop=4:shiftwidth=4
+#
+# Zabbix hostgroup ansible module
+#
+#
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This is in place because each module looks similar to each other.
+# These need duplicate code as their behavior is very similar
+# but different for each zabbix class.
+# pylint: disable=duplicate-code
+
+# pylint: disable=import-error
+from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
+
+def exists(content, key='result'):
+ ''' Check if key exists in content or the size of content[key] > 0
+ '''
+ if not content.has_key(key):
+ return False
+
+ if not content[key]:
+ return False
+
+ return True
+
+def main():
+ ''' ansible module for hostgroup
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+ user=dict(default=None, type='str'),
+ password=dict(default=None, type='str'),
+ name=dict(default=None, type='str'),
+ debug=dict(default=False, type='bool'),
+ state=dict(default='present', type='str'),
+ ),
+ #supports_check_mode=True
+ )
+
+ user = module.params.get('user', os.environ['ZABBIX_USER'])
+ passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD'])
+
+ zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug']))
+
+ #Set the instance and the template for the rest of the calls
+ zbx_class_name = 'hostgroup'
+ idname = "groupid"
+ hname = module.params['name']
+ state = module.params['state']
+
+ content = zapi.get_content(zbx_class_name,
+ 'get',
+ {'search': {'name': hname},
+ })
+ if state == 'list':
+ module.exit_json(changed=False, results=content['result'], state="list")
+
+ if state == 'absent':
+ if not exists(content):
+ module.exit_json(changed=False, state="absent")
+
+ content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
+ module.exit_json(changed=True, results=content['result'], state="absent")
+
+ if state == 'present':
+ params = {'name': hname}
+
+ if not exists(content):
+ # if we didn't find it, create it
+ content = zapi.get_content(zbx_class_name, 'create', params)
+ module.exit_json(changed=True, results=content['result'], state='present')
+ # already exists, we need to update it
+ # let's compare properties
+ differences = {}
+ zab_results = content['result'][0]
+ for key, value in params.items():
+ if zab_results[key] != value and zab_results[key] != str(value):
+ differences[key] = value
+
+ if not differences:
+ module.exit_json(changed=False, results=zab_results, state="present")
+
+ # We have differences and need to update
+ differences[idname] = zab_results[idname]
+ content = zapi.get_content(zbx_class_name, 'update', differences)
+ module.exit_json(changed=True, results=content['result'], state="present")
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/os_zabbix/library/zbx_item.py b/roles/os_zabbix/library/zbx_item.py
new file mode 100644
index 000000000..57ec06463
--- /dev/null
+++ b/roles/os_zabbix/library/zbx_item.py
@@ -0,0 +1,160 @@
+#!/usr/bin/env python
+'''
+ Ansible module for zabbix items
+'''
+# vim: expandtab:tabstop=4:shiftwidth=4
+#
+# Zabbix item ansible module
+#
+#
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This is in place because each module looks similar to each other.
+# These need duplicate code as their behavior is very similar
+# but different for each zabbix class.
+# pylint: disable=duplicate-code
+
+# pylint: disable=import-error
+from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
+
+def exists(content, key='result'):
+ ''' Check if key exists in content or the size of content[key] > 0
+ '''
+ if not content.has_key(key):
+ return False
+
+ if not content[key]:
+ return False
+
+ return True
+
+def get_value_type(value_type):
+ '''
+ Possible values:
+ 0 - numeric float;
+ 1 - character;
+ 2 - log;
+ 3 - numeric unsigned;
+ 4 - text
+ '''
+ vtype = 0
+ if 'int' in value_type:
+ vtype = 3
+ elif 'char' in value_type:
+ vtype = 1
+ elif 'str' in value_type:
+ vtype = 4
+
+ return vtype
+
+def main():
+ '''
+ ansible zabbix module for zbx_item
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+ user=dict(default=None, type='str'),
+ password=dict(default=None, type='str'),
+ name=dict(default=None, type='str'),
+ key=dict(default=None, type='str'),
+ template_name=dict(default=None, type='str'),
+ zabbix_type=dict(default=2, type='int'),
+ value_type=dict(default='int', type='str'),
+ applications=dict(default=[], type='list'),
+ debug=dict(default=False, type='bool'),
+ state=dict(default='present', type='str'),
+ ),
+ #supports_check_mode=True
+ )
+
+ user = module.params.get('user', os.environ['ZABBIX_USER'])
+ passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD'])
+
+ zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug']))
+
+ #Set the instance and the template for the rest of the calls
+ zbx_class_name = 'item'
+ idname = "itemid"
+ state = module.params['state']
+ key = module.params['key']
+ template_name = module.params['template_name']
+
+ content = zapi.get_content('template', 'get', {'search': {'host': template_name}})
+ templateid = None
+ if content['result']:
+ templateid = content['result'][0]['templateid']
+ else:
+ module.exit_json(changed=False,
+ results='Error: Could find template with name %s for item.' % template_name,
+ state="Unkown")
+
+ content = zapi.get_content(zbx_class_name,
+ 'get',
+ {'search': {'key_': key},
+ 'selectApplications': 'applicationid',
+ })
+ if state == 'list':
+ module.exit_json(changed=False, results=content['result'], state="list")
+
+ if state == 'absent':
+ if not exists(content):
+ module.exit_json(changed=False, state="absent")
+
+ content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
+ module.exit_json(changed=True, results=content['result'], state="absent")
+
+ if state == 'present':
+ params = {'name': module.params.get('name', module.params['key']),
+ 'key_': key,
+ 'hostid': templateid,
+ 'type': module.params['zabbix_type'],
+ 'value_type': get_value_type(module.params['value_type']),
+ 'applications': module.params['applications'],
+ }
+
+ if not exists(content):
+ # if we didn't find it, create it
+ content = zapi.get_content(zbx_class_name, 'create', params)
+ module.exit_json(changed=True, results=content['result'], state='present')
+ # already exists, we need to update it
+ # let's compare properties
+ differences = {}
+ zab_results = content['result'][0]
+ for key, value in params.items():
+
+ if zab_results[key] != value and zab_results[key] != str(value):
+ differences[key] = value
+
+ if not differences:
+ module.exit_json(changed=False, results=zab_results, state="present")
+
+ # We have differences and need to update
+ differences[idname] = zab_results[idname]
+ content = zapi.get_content(zbx_class_name, 'update', differences)
+ module.exit_json(changed=True, results=content['result'], state="present")
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/os_zabbix/library/zbx_mediatype.py b/roles/os_zabbix/library/zbx_mediatype.py
new file mode 100644
index 000000000..a49aecd0f
--- /dev/null
+++ b/roles/os_zabbix/library/zbx_mediatype.py
@@ -0,0 +1,149 @@
+#!/usr/bin/env python
+'''
+ Ansible module for mediatype
+'''
+# vim: expandtab:tabstop=4:shiftwidth=4
+#
+# Zabbix mediatype ansible module
+#
+#
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This is in place because each module looks similar to each other.
+# These need duplicate code as their behavior is very similar
+# but different for each zabbix class.
+# pylint: disable=duplicate-code
+
+# pylint: disable=import-error
+from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
+
+def exists(content, key='result'):
+ ''' Check if key exists in content or the size of content[key] > 0
+ '''
+ if not content.has_key(key):
+ return False
+
+ if not content[key]:
+ return False
+
+ return True
+def get_mtype(mtype):
+ '''
+ Transport used by the media type.
+ Possible values:
+ 0 - email;
+ 1 - script;
+ 2 - SMS;
+ 3 - Jabber;
+ 100 - Ez Texting.
+ '''
+ mtype = mtype.lower()
+ media_type = None
+ if mtype == 'script':
+ media_type = 1
+ elif mtype == 'sms':
+ media_type = 2
+ elif mtype == 'jabber':
+ media_type = 3
+ elif mtype == 'script':
+ media_type = 100
+ else:
+ media_type = 0
+
+ return media_type
+
+def main():
+ '''
+ Ansible zabbix module for mediatype
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+ user=dict(default=None, type='str'),
+ password=dict(default=None, type='str'),
+ description=dict(default=None, type='str'),
+ mtype=dict(default=None, type='str'),
+ smtp_server=dict(default=None, type='str'),
+ smtp_helo=dict(default=None, type='str'),
+ smtp_email=dict(default=None, type='str'),
+ debug=dict(default=False, type='bool'),
+ state=dict(default='present', type='str'),
+ ),
+ #supports_check_mode=True
+ )
+
+ user = module.params.get('user', os.environ['ZABBIX_USER'])
+ passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD'])
+
+ zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug']))
+
+ #Set the instance and the template for the rest of the calls
+ zbx_class_name = 'mediatype'
+ idname = "mediatypeid"
+ description = module.params['description']
+ state = module.params['state']
+
+ content = zapi.get_content(zbx_class_name, 'get', {'search': {'description': description}})
+ if state == 'list':
+ module.exit_json(changed=False, results=content['result'], state="list")
+
+ if state == 'absent':
+ if not exists(content):
+ module.exit_json(changed=False, state="absent")
+
+ content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
+ module.exit_json(changed=True, results=content['result'], state="absent")
+
+ if state == 'present':
+ params = {'description': description,
+ 'type': get_mtype(module.params['media_type']),
+ 'smtp_server': module.params['smtp_server'],
+ 'smtp_helo': module.params['smtp_helo'],
+ 'smtp_email': module.params['smtp_email'],
+ }
+
+ if not exists(content):
+ # if we didn't find it, create it
+ content = zapi.get_content(zbx_class_name, 'create', params)
+ module.exit_json(changed=True, results=content['result'], state='present')
+ # already exists, we need to update it
+ # let's compare properties
+ differences = {}
+ zab_results = content['result'][0]
+ for key, value in params.items():
+ if zab_results[key] != value and \
+ zab_results[key] != str(value):
+ differences[key] = value
+
+ if not differences:
+ module.exit_json(changed=False, results=zab_results, state="present")
+
+ # We have differences and need to update
+ differences[idname] = zab_results[idname]
+ content = zapi.get_content(zbx_class_name, 'update', differences)
+ module.exit_json(changed=True, results=content['result'], state="present")
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/os_zabbix/library/zbx_template.py b/roles/os_zabbix/library/zbx_template.py
new file mode 100644
index 000000000..676fa7e49
--- /dev/null
+++ b/roles/os_zabbix/library/zbx_template.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python
+'''
+Ansible module for template
+'''
+# vim: expandtab:tabstop=4:shiftwidth=4
+#
+# Zabbix template ansible module
+#
+#
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This is in place because each module looks similar to each other.
+# These need duplicate code as their behavior is very similar
+# but different for each zabbix class.
+# pylint: disable=duplicate-code
+
+# pylint: disable=import-error
+from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
+
+def exists(content, key='result'):
+ ''' Check if key exists in content or the size of content[key] > 0
+ '''
+ if not content.has_key(key):
+ return False
+
+ if not content[key]:
+ return False
+
+ return True
+
+def main():
+ ''' Ansible module for template
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+ user=dict(default=None, type='str'),
+ password=dict(default=None, type='str'),
+ name=dict(default=None, type='str'),
+ debug=dict(default=False, type='bool'),
+ state=dict(default='present', type='str'),
+ ),
+ #supports_check_mode=True
+ )
+
+ user = module.params.get('user', os.environ['ZABBIX_USER'])
+ passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD'])
+
+ zbc = ZabbixConnection(module.params['server'], user, passwd, module.params['debug'])
+ zapi = ZabbixAPI(zbc)
+
+ #Set the instance and the template for the rest of the calls
+ zbx_class_name = 'template'
+ idname = 'templateid'
+ tname = module.params['name']
+ state = module.params['state']
+ # get a template, see if it exists
+ content = zapi.get_content(zbx_class_name,
+ 'get',
+ {'search': {'host': tname},
+ 'selectParentTemplates': 'templateid',
+ 'selectGroups': 'groupid',
+ #'selectApplications': extend,
+ })
+ if state == 'list':
+ module.exit_json(changed=False, results=content['result'], state="list")
+
+ if state == 'absent':
+ if not exists(content):
+ module.exit_json(changed=False, state="absent")
+
+ content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
+ module.exit_json(changed=True, results=content['result'], state="absent")
+
+ if state == 'present':
+ params = {'groups': module.params.get('groups', [{'groupid': '1'}]),
+ 'host': tname,
+ }
+
+ if not exists(content):
+ # if we didn't find it, create it
+ content = zapi.get_content(zbx_class_name, 'create', params)
+ module.exit_json(changed=True, results=content['result'], state='present')
+ # already exists, we need to update it
+ # let's compare properties
+ differences = {}
+ zab_results = content['result'][0]
+ for key, value in params.items():
+ if key == 'templates' and zab_results.has_key('parentTemplates'):
+ if zab_results['parentTemplates'] != value:
+ differences[key] = value
+ elif zab_results[key] != str(value) and zab_results[key] != value:
+ differences[key] = value
+
+ if not differences:
+ module.exit_json(changed=False, results=content['result'], state="present")
+
+ # We have differences and need to update
+ differences[idname] = zab_results[idname]
+ content = zapi.get_content(zbx_class_name, 'update', differences)
+ module.exit_json(changed=True, results=content['result'], state="present")
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/os_zabbix/library/zbx_trigger.py b/roles/os_zabbix/library/zbx_trigger.py
new file mode 100644
index 000000000..7cc9356c8
--- /dev/null
+++ b/roles/os_zabbix/library/zbx_trigger.py
@@ -0,0 +1,175 @@
+#!/usr/bin/env python
+'''
+ansible module for zabbix triggers
+'''
+# vim: expandtab:tabstop=4:shiftwidth=4
+#
+# Zabbix trigger ansible module
+#
+#
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This is in place because each module looks similar to each other.
+# These need duplicate code as their behavior is very similar
+# but different for each zabbix class.
+# pylint: disable=duplicate-code
+
+# pylint: disable=import-error
+from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
+
+def exists(content, key='result'):
+ ''' Check if key exists in content or the size of content[key] > 0
+ '''
+ if not content.has_key(key):
+ return False
+
+ if not content[key]:
+ return False
+
+ return True
+
+
+def get_priority(priority):
+ ''' determine priority
+ '''
+ prior = 0
+ if 'info' in priority:
+ prior = 1
+ elif 'warn' in priority:
+ prior = 2
+ elif 'avg' == priority or 'ave' in priority:
+ prior = 3
+ elif 'high' in priority:
+ prior = 4
+ elif 'dis' in priority:
+ prior = 5
+
+ return prior
+
+def get_deps(zapi, deps):
+ ''' get trigger dependencies
+ '''
+ results = []
+ for desc in deps:
+ content = zapi.get_content('trigger',
+ 'get',
+ {'search': {'description': desc},
+ 'expandExpression': True,
+ 'selectDependencies': 'triggerid',
+ })
+ if content.has_key('result'):
+ results.append({'triggerid': content['result'][0]['triggerid']})
+
+ return results
+
+def main():
+ '''
+ Create a trigger in zabbix
+
+ Example:
+ "params": {
+ "description": "Processor load is too high on {HOST.NAME}",
+ "expression": "{Linux server:system.cpu.load[percpu,avg1].last()}>5",
+ "dependencies": [
+ {
+ "triggerid": "14062"
+ }
+ ]
+ },
+
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+ user=dict(default=None, type='str'),
+ password=dict(default=None, type='str'),
+ expression=dict(default=None, type='str'),
+ description=dict(default=None, type='str'),
+ dependencies=dict(default=[], type='list'),
+ priority=dict(default='avg', type='str'),
+ debug=dict(default=False, type='bool'),
+ state=dict(default='present', type='str'),
+ ),
+ #supports_check_mode=True
+ )
+
+ user = module.params.get('user', os.environ['ZABBIX_USER'])
+ passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD'])
+
+
+ zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug']))
+
+ #Set the instance and the template for the rest of the calls
+ zbx_class_name = 'trigger'
+ idname = "triggerid"
+ state = module.params['state']
+ description = module.params['description']
+
+ content = zapi.get_content(zbx_class_name,
+ 'get',
+ {'search': {'description': description},
+ 'expandExpression': True,
+ 'selectDependencies': 'triggerid',
+ })
+ if state == 'list':
+ module.exit_json(changed=False, results=content['result'], state="list")
+
+ if state == 'absent':
+ if not exists(content):
+ module.exit_json(changed=False, state="absent")
+ content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
+ module.exit_json(changed=True, results=content['result'], state="absent")
+
+ if state == 'present':
+ params = {'description': description,
+ 'expression': module.params['expression'],
+ 'dependencies': get_deps(zapi, module.params['dependencies']),
+ 'priority': get_priority(module.params['priority']),
+ }
+
+ if not exists(content):
+ # if we didn't find it, create it
+ content = zapi.get_content(zbx_class_name, 'create', params)
+ module.exit_json(changed=True, results=content['result'], state='present')
+ # already exists, we need to update it
+ # let's compare properties
+ differences = {}
+ zab_results = content['result'][0]
+ for key, value in params.items():
+
+ if zab_results[key] != value and zab_results[key] != str(value):
+ differences[key] = value
+
+ if not differences:
+ module.exit_json(changed=False, results=zab_results, state="present")
+
+ # We have differences and need to update
+ differences[idname] = zab_results[idname]
+ content = zapi.get_content(zbx_class_name, 'update', differences)
+ module.exit_json(changed=True, results=content['result'], state="present")
+
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/os_zabbix/library/zbx_user.py b/roles/os_zabbix/library/zbx_user.py
new file mode 100644
index 000000000..489023407
--- /dev/null
+++ b/roles/os_zabbix/library/zbx_user.py
@@ -0,0 +1,146 @@
+#!/usr/bin/env python
+'''
+ansible module for zabbix users
+'''
+# vim: expandtab:tabstop=4:shiftwidth=4
+#
+# Zabbix user ansible module
+#
+#
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This is in place because each module looks similar to each other.
+# These need duplicate code as their behavior is very similar
+# but different for each zabbix class.
+# pylint: disable=duplicate-code
+
+# pylint: disable=import-error
+from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
+
+def exists(content, key='result'):
+ ''' Check if key exists in content or the size of content[key] > 0
+ '''
+ if not content.has_key(key):
+ return False
+
+ if not content[key]:
+ return False
+
+ return True
+
+def get_usergroups(zapi, usergroups):
+ ''' Get usergroups
+ '''
+ ugroups = []
+ for ugr in usergroups:
+ content = zapi.get_content('usergroup',
+ 'get',
+ {'search': {'name': ugr},
+ #'selectUsers': 'userid',
+ #'getRights': 'extend'
+ })
+ if content['result']:
+ ugroups.append({'usrgrpid': content['result'][0]['usrgrpid']})
+
+ return ugroups
+
+def main():
+ '''
+ ansible zabbix module for users
+ '''
+
+ ##def user(self, name, state='present', params=None):
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+ user=dict(default=None, type='str'),
+ password=dict(default=None, type='str'),
+ alias=dict(default=None, type='str'),
+ passwd=dict(default=None, type='str'),
+ usergroups=dict(default=None, type='list'),
+ debug=dict(default=False, type='bool'),
+ state=dict(default='present', type='str'),
+ ),
+ #supports_check_mode=True
+ )
+
+ user = module.params.get('user', os.environ['ZABBIX_USER'])
+ password = module.params.get('password', os.environ['ZABBIX_PASSWORD'])
+
+ zbc = ZabbixConnection(module.params['server'], user, password, module.params['debug'])
+ zapi = ZabbixAPI(zbc)
+
+ ## before we can create a user media and users with media types we need media
+ zbx_class_name = 'user'
+ idname = "userid"
+ alias = module.params['alias']
+ state = module.params['state']
+
+ content = zapi.get_content(zbx_class_name,
+ 'get',
+ {'output': 'extend',
+ 'search': {'alias': alias},
+ "selectUsrgrps": 'usergrpid',
+ })
+ if state == 'list':
+ module.exit_json(changed=False, results=content['result'], state="list")
+
+ if state == 'absent':
+ if not exists(content):
+ module.exit_json(changed=False, state="absent")
+
+ content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
+ module.exit_json(changed=True, results=content['result'], state="absent")
+
+ if state == 'present':
+ params = {'alias': alias,
+ 'passwd': module.params['passwd'],
+ 'usrgrps': get_usergroups(zapi, module.params['usergroups']),
+ }
+
+ if not exists(content):
+ # if we didn't find it, create it
+ content = zapi.get_content(zbx_class_name, 'create', params)
+ module.exit_json(changed=True, results=content['result'], state='present')
+ # already exists, we need to update it
+ # let's compare properties
+ differences = {}
+ zab_results = content['result'][0]
+ for key, value in params.items():
+ if key == 'passwd':
+ differences[key] = value
+
+ elif zab_results[key] != value and zab_results[key] != str(value):
+ differences[key] = value
+
+ if not differences:
+ module.exit_json(changed=False, results=zab_results, state="present")
+
+ # We have differences and need to update
+ differences[idname] = zab_results[idname]
+ content = zapi.get_content(zbx_class_name, 'update', differences)
+ module.exit_json(changed=True, results=content['result'], state="present")
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/os_zabbix/library/zbx_usergroup.py b/roles/os_zabbix/library/zbx_usergroup.py
new file mode 100644
index 000000000..ede4c9df1
--- /dev/null
+++ b/roles/os_zabbix/library/zbx_usergroup.py
@@ -0,0 +1,160 @@
+#!/usr/bin/env python
+'''
+zabbix ansible module for usergroups
+'''
+# vim: expandtab:tabstop=4:shiftwidth=4
+#
+# Zabbix usergroup ansible module
+#
+#
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This is in place because each module looks similar to each other.
+# These need duplicate code as their behavior is very similar
+# but different for each zabbix class.
+# pylint: disable=duplicate-code
+
+# pylint: disable=import-error
+from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
+
+def exists(content, key='result'):
+ ''' Check if key exists in content or the size of content[key] > 0
+ '''
+ if not content.has_key(key):
+ return False
+
+ if not content[key]:
+ return False
+
+ return True
+
+def get_rights(zapi, rights):
+ '''Get rights
+ '''
+ perms = []
+ for right in rights:
+ hstgrp = right.keys()[0]
+ perm = right.values()[0]
+ content = zapi.get_content('hostgroup', 'get', {'search': {'name': hstgrp}})
+ if content['result']:
+ permission = 0
+ if perm == 'ro':
+ permission = 2
+ elif perm == 'rw':
+ permission = 3
+ perms.append({'id': content['result'][0]['groupid'],
+ 'permission': permission})
+ return perms
+
+def get_userids(zapi, users):
+ ''' Get userids from user aliases
+ '''
+ userids = []
+ for alias in users:
+ content = zapi.get_content('user', 'get', {'search': {'alias': alias}})
+ if content['result']:
+ userids.append(content['result'][0]['userid'])
+
+ return userids
+
+def main():
+ ''' Ansible module for usergroup
+ '''
+
+ ##def usergroup(self, name, rights=None, users=None, state='present', params=None):
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+ user=dict(default=None, type='str'),
+ password=dict(default=None, type='str'),
+ name=dict(default=None, type='str'),
+ rights=dict(default=[], type='list'),
+ users=dict(default=[], type='list'),
+ debug=dict(default=False, type='bool'),
+ state=dict(default='present', type='str'),
+ ),
+ #supports_check_mode=True
+ )
+
+ user = module.params.get('user', os.environ['ZABBIX_USER'])
+ passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD'])
+
+ zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug']))
+
+ zbx_class_name = 'usergroup'
+ idname = "usrgrpid"
+ uname = module.params['name']
+ state = module.params['state']
+
+ content = zapi.get_content(zbx_class_name,
+ 'get',
+ {'search': {'name': uname},
+ 'selectUsers': 'userid',
+ })
+ if state == 'list':
+ module.exit_json(changed=False, results=content['result'], state="list")
+
+ if state == 'absent':
+ if not exists(content):
+ module.exit_json(changed=False, state="absent")
+
+ content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
+ module.exit_json(changed=True, results=content['result'], state="absent")
+
+ if state == 'present':
+ params = {'name': uname,
+ 'rights': get_rights(zapi, module.params['rights']),
+ 'userids': get_userids(zapi, module.params['users']),
+ }
+
+ if not exists(content):
+ # if we didn't find it, create it
+ content = zapi.get_content(zbx_class_name, 'create', params)
+ module.exit_json(changed=True, results=content['result'], state='present')
+ # already exists, we need to update it
+ # let's compare properties
+ differences = {}
+ zab_results = content['result'][0]
+ for key, value in params.items():
+ if key == 'rights':
+ differences['rights'] = value
+
+ elif key == 'userids' and zab_results.has_key('users'):
+ if zab_results['users'] != value:
+ differences['userids'] = value
+
+ elif zab_results[key] != value and zab_results[key] != str(value):
+ differences[key] = value
+
+ if not differences:
+ module.exit_json(changed=False, results=zab_results, state="present")
+
+ # We have differences and need to update
+ differences[idname] = zab_results[idname]
+ content = zapi.get_content(zbx_class_name, 'update', differences)
+ module.exit_json(changed=True, results=content['result'], state="present")
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/os_zabbix/library/zbxapi.py b/roles/os_zabbix/library/zbxapi.py
deleted file mode 100755
index 48f294938..000000000
--- a/roles/os_zabbix/library/zbxapi.py
+++ /dev/null
@@ -1,382 +0,0 @@
-#!/usr/bin/env python
-# vim: expandtab:tabstop=4:shiftwidth=4
-'''
- ZabbixAPI ansible module
-'''
-
-# Copyright 2015 Red Hat Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# Purpose: An ansible module to communicate with zabbix.
-#
-
-# pylint: disable=line-too-long
-# Disabling line length for readability
-
-import json
-import httplib2
-import sys
-import os
-import re
-import copy
-
-class ZabbixAPIError(Exception):
- '''
- ZabbixAPIError
- Exists to propagate errors up from the api
- '''
- pass
-
-class ZabbixAPI(object):
- '''
- ZabbixAPI class
- '''
- classes = {
- 'Action': ['create', 'delete', 'get', 'update'],
- 'Alert': ['get'],
- 'Application': ['create', 'delete', 'get', 'massadd', 'update'],
- 'Configuration': ['export', 'import'],
- 'Dcheck': ['get'],
- 'Dhost': ['get'],
- 'Drule': ['copy', 'create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
- 'Dservice': ['get'],
- 'Event': ['acknowledge', 'get'],
- 'Graph': ['create', 'delete', 'get', 'update'],
- 'Graphitem': ['get'],
- 'Graphprototype': ['create', 'delete', 'get', 'update'],
- 'History': ['get'],
- 'Hostgroup': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'massadd', 'massremove', 'massupdate', 'update'],
- 'Hostinterface': ['create', 'delete', 'get', 'massadd', 'massremove', 'replacehostinterfaces', 'update'],
- 'Host': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'massadd', 'massremove', 'massupdate', 'update'],
- 'Hostprototype': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
- 'Httptest': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
- 'Iconmap': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
- 'Image': ['create', 'delete', 'get', 'update'],
- 'Item': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
- 'Itemprototype': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
- 'Maintenance': ['create', 'delete', 'get', 'update'],
- 'Map': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
- 'Mediatype': ['create', 'delete', 'get', 'update'],
- 'Proxy': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
- 'Screen': ['create', 'delete', 'get', 'update'],
- 'Screenitem': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update', 'updatebyposition'],
- 'Script': ['create', 'delete', 'execute', 'get', 'getscriptsbyhosts', 'update'],
- 'Service': ['adddependencies', 'addtimes', 'create', 'delete', 'deletedependencies', 'deletetimes', 'get', 'getsla', 'isreadable', 'iswritable', 'update'],
- 'Template': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'massadd', 'massremove', 'massupdate', 'update'],
- 'Templatescreen': ['copy', 'create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
- 'Templatescreenitem': ['get'],
- 'Trigger': ['adddependencies', 'create', 'delete', 'deletedependencies', 'get', 'isreadable', 'iswritable', 'update'],
- 'Triggerprototype': ['create', 'delete', 'get', 'update'],
- 'User': ['addmedia', 'create', 'delete', 'deletemedia', 'get', 'isreadable', 'iswritable', 'login', 'logout', 'update', 'updatemedia', 'updateprofile'],
- 'Usergroup': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'massadd', 'massupdate', 'update'],
- 'Usermacro': ['create', 'createglobal', 'delete', 'deleteglobal', 'get', 'update', 'updateglobal'],
- 'Usermedia': ['get'],
- }
-
- def __init__(self, data=None):
- if not data:
- data = {}
- self.server = data.get('server', None)
- self.username = data.get('user', None)
- self.password = data.get('password', None)
- if any([value == None for value in [self.server, self.username, self.password]]):
- print 'Please specify zabbix server url, username, and password.'
- sys.exit(1)
-
- self.verbose = data.get('verbose', False)
- self.use_ssl = data.has_key('use_ssl')
- self.auth = None
-
- for cname, _ in self.classes.items():
- setattr(self, cname.lower(), getattr(self, cname)(self))
-
- # pylint: disable=no-member
- # This method does not exist until the metaprogramming executed
- results = self.user.login(user=self.username, password=self.password)
-
- if results[0]['status'] == '200':
- if results[1].has_key('result'):
- self.auth = results[1]['result']
- elif results[1].has_key('error'):
- print "Unable to authenticate with zabbix server. {0} ".format(results[1]['error'])
- sys.exit(1)
- else:
- print "Error in call to zabbix. Http status: {0}.".format(results[0]['status'])
- sys.exit(1)
-
- def perform(self, method, rpc_params):
- '''
- This method calls your zabbix server.
-
- It requires the following parameters in order for a proper request to be processed:
- jsonrpc - the version of the JSON-RPC protocol used by the API;
- the Zabbix API implements JSON-RPC version 2.0;
- method - the API method being called;
- rpc_params - parameters that will be passed to the API method;
- id - an arbitrary identifier of the request;
- auth - a user authentication token; since we don't have one yet, it's set to null.
- '''
- http_method = "POST"
- jsonrpc = "2.0"
- rid = 1
-
- http = None
- if self.use_ssl:
- http = httplib2.Http()
- else:
- http = httplib2.Http(disable_ssl_certificate_validation=True,)
-
- headers = {}
- headers["Content-type"] = "application/json"
-
- body = {
- "jsonrpc": jsonrpc,
- "method": method,
- "params": rpc_params.get('params', {}),
- "id": rid,
- 'auth': self.auth,
- }
-
- if method in ['user.login', 'api.version']:
- del body['auth']
-
- body = json.dumps(body)
-
- if self.verbose:
- print body
- print method
- print headers
- httplib2.debuglevel = 1
-
- response, content = http.request(self.server, http_method, body, headers)
-
- if response['status'] not in ['200', '201']:
- raise ZabbixAPIError('Error calling zabbix. Zabbix returned %s' % response['status'])
-
- if self.verbose:
- print response
- print content
-
- try:
- content = json.loads(content)
- except ValueError as err:
- content = {"error": err.message}
-
- return response, content
-
- @staticmethod
- def meta(cname, method_names):
- '''
- This bit of metaprogramming is where the ZabbixAPI subclasses are created.
- For each of ZabbixAPI.classes we create a class from the key and methods
- from the ZabbixAPI.classes values. We pass a reference to ZabbixAPI class
- to each subclass in order for each to be able to call the perform method.
- '''
- def meta_method(_class, method_name):
- '''
- This meta method allows a class to add methods to it.
- '''
- # This template method is a stub method for each of the subclass
- # methods.
- def template_method(self, params=None, **rpc_params):
- '''
- This template method is a stub method for each of the subclass methods.
- '''
- if params:
- rpc_params['params'] = params
- else:
- rpc_params['params'] = copy.deepcopy(rpc_params)
-
- return self.parent.perform(cname.lower()+"."+method_name, rpc_params)
-
- template_method.__doc__ = \
- "https://www.zabbix.com/documentation/2.4/manual/api/reference/%s/%s" % \
- (cname.lower(), method_name)
- template_method.__name__ = method_name
- # this is where the template method is placed inside of the subclass
- # e.g. setattr(User, "create", stub_method)
- setattr(_class, template_method.__name__, template_method)
-
- # This class call instantiates a subclass. e.g. User
- _class = type(cname,
- (object,),
- {'__doc__': \
- "https://www.zabbix.com/documentation/2.4/manual/api/reference/%s" % cname.lower()})
- def __init__(self, parent):
- '''
- This init method gets placed inside of the _class
- to allow it to be instantiated. A reference to the parent class(ZabbixAPI)
- is passed in to allow each class access to the perform method.
- '''
- self.parent = parent
-
- # This attaches the init to the subclass. e.g. Create
- setattr(_class, __init__.__name__, __init__)
- # For each of our ZabbixAPI.classes dict values
- # Create a method and attach it to our subclass.
- # e.g. 'User': ['delete', 'get', 'updatemedia', 'updateprofile',
- # 'update', 'iswritable', 'logout', 'addmedia', 'create',
- # 'login', 'deletemedia', 'isreadable'],
- # User.delete
- # User.get
- for method_name in method_names:
- meta_method(_class, method_name)
- # Return our subclass with all methods attached
- return _class
-
-# Attach all ZabbixAPI.classes to ZabbixAPI class through metaprogramming
-for _class_name, _method_names in ZabbixAPI.classes.items():
- setattr(ZabbixAPI, _class_name, ZabbixAPI.meta(_class_name, _method_names))
-
-def exists(content, key='result'):
- ''' Check if key exists in content or the size of content[key] > 0
- '''
- if not content.has_key(key):
- return False
-
- if not content[key]:
- return False
-
- return True
-
-def diff_content(from_zabbix, from_user, ignore=None):
- ''' Compare passed in object to results returned from zabbix
- '''
- terms = ['search', 'output', 'groups', 'select', 'expand', 'filter']
- if ignore:
- terms.extend(ignore)
- regex = '(' + '|'.join(terms) + ')'
- retval = {}
- for key, value in from_user.items():
- if re.findall(regex, key):
- continue
-
- # special case here for templates. You query templates and
- # the zabbix api returns parentTemplates. These will obviously fail.
- # So when its templates compare against parentTemplates.
- if key == 'templates' and from_zabbix.has_key('parentTemplates'):
- if from_zabbix['parentTemplates'] != value:
- retval[key] = value
-
- elif from_zabbix[key] != str(value):
- retval[key] = str(value)
-
- return retval
-
-def main():
- '''
- This main method runs the ZabbixAPI Ansible Module
- '''
-
- module = AnsibleModule(
- argument_spec=dict(
- server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
- user=dict(default=None, type='str'),
- password=dict(default=None, type='str'),
- zbx_class=dict(choices=ZabbixAPI.classes.keys()),
- params=dict(),
- debug=dict(default=False, type='bool'),
- state=dict(default='present', type='str'),
- ignore=dict(default=None, type='list'),
- ),
- #supports_check_mode=True
- )
-
- user = module.params.get('user', None)
- if not user:
- user = os.environ['ZABBIX_USER']
-
- passwd = module.params.get('password', None)
- if not passwd:
- passwd = os.environ['ZABBIX_PASSWORD']
-
-
-
- api_data = {
- 'user': user,
- 'password': passwd,
- 'server': module.params['server'],
- 'verbose': module.params['debug']
- }
-
- if not user or not passwd or not module.params['server']:
- module.fail_json(msg='Please specify the user, password, and the zabbix server.')
-
- zapi = ZabbixAPI(api_data)
-
- ignore = module.params['ignore']
- zbx_class = module.params.get('zbx_class')
- rpc_params = module.params.get('params', {})
- state = module.params.get('state')
-
-
- # Get the instance we are trying to call
- zbx_class_inst = zapi.__getattribute__(zbx_class.lower())
-
- # perform get
- # Get the instance's method we are trying to call
-
- zbx_action_method = zapi.__getattribute__(zbx_class.capitalize()).__dict__['get']
- _, content = zbx_action_method(zbx_class_inst, rpc_params)
-
- if state == 'list':
- module.exit_json(changed=False, results=content['result'], state="list")
-
- if state == 'absent':
- if not exists(content):
- module.exit_json(changed=False, state="absent")
- # If we are coming from a query, we need to pass in the correct rpc_params for delete.
- # specifically the zabbix class name + 'id'
- # if rpc_params is a list then we need to pass it. (list of ids to delete)
- idname = zbx_class.lower() + "id"
- if not isinstance(rpc_params, list) and content['result'][0].has_key(idname):
- rpc_params = [content['result'][0][idname]]
-
- zbx_action_method = zapi.__getattribute__(zbx_class.capitalize()).__dict__['delete']
- _, content = zbx_action_method(zbx_class_inst, rpc_params)
- module.exit_json(changed=True, results=content['result'], state="absent")
-
- if state == 'present':
- # It's not there, create it!
- if not exists(content):
- zbx_action_method = zapi.__getattribute__(zbx_class.capitalize()).__dict__['create']
- _, content = zbx_action_method(zbx_class_inst, rpc_params)
- module.exit_json(changed=True, results=content['result'], state='present')
-
- # It's there and the same, do nothing!
- diff_params = diff_content(content['result'][0], rpc_params, ignore)
- if not diff_params:
- module.exit_json(changed=False, results=content['result'], state="present")
-
- # Add the id to update with
- idname = zbx_class.lower() + "id"
- diff_params[idname] = content['result'][0][idname]
-
-
- ## It's there and not the same, update it!
- zbx_action_method = zapi.__getattribute__(zbx_class.capitalize()).__dict__['update']
- _, content = zbx_action_method(zbx_class_inst, diff_params)
- module.exit_json(changed=True, results=content, state="present")
-
- module.exit_json(failed=True,
- changed=False,
- results='Unknown state passed. %s' % state,
- state="unknown")
-
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
-# import module snippets. This are required
-from ansible.module_utils.basic import *
-
-main()
-
diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml
index fc4d44745..e9e6e4bd4 100644
--- a/roles/rhel_subscribe/tasks/enterprise.yml
+++ b/roles/rhel_subscribe/tasks/enterprise.yml
@@ -1,5 +1,9 @@
---
+- name: Disable all repositories
+ command: subscription-manager repos --disable="*"
+
- name: Enable RHEL repositories
command: subscription-manager repos \
--enable="rhel-7-server-rpms" \
+ --enable="rhel-7-server-extras-rpms" \
--enable="rhel-7-server-ose-3.0-rpms"