summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
authorTroy Dawson <tdawson@redhat.com>2015-06-12 12:49:37 -0500
committerTroy Dawson <tdawson@redhat.com>2015-06-12 12:49:37 -0500
commitc650920bc7b0043e59fa3439f48f61d5fa211f2d (patch)
tree3e1f882f5bc7fe419f13a134a71927cb6484fa86 /roles
parent124ca40c134a40b2e6823ab3c4bfe329580d7eaa (diff)
parent42806b6745c747843b71eaf08b62aeee5e450ab1 (diff)
downloadopenshift-c650920bc7b0043e59fa3439f48f61d5fa211f2d.tar.gz
openshift-c650920bc7b0043e59fa3439f48f61d5fa211f2d.tar.bz2
openshift-c650920bc7b0043e59fa3439f48f61d5fa211f2d.tar.xz
openshift-c650920bc7b0043e59fa3439f48f61d5fa211f2d.zip
Merge branch 'master' into prod
Diffstat (limited to 'roles')
-rw-r--r--roles/atomic_base/README.md56
-rw-r--r--roles/atomic_base/files/bash/bashrc12
-rw-r--r--roles/atomic_base/files/ostree/repo_config10
-rw-r--r--roles/atomic_base/files/system/90-nofile.conf7
-rw-r--r--roles/atomic_base/meta/main.yml19
-rw-r--r--roles/atomic_base/tasks/bash.yml14
-rw-r--r--roles/atomic_base/tasks/cloud_user.yml6
-rw-r--r--roles/atomic_base/tasks/main.yml4
-rw-r--r--roles/atomic_base/tasks/ostree.yml18
-rw-r--r--roles/atomic_base/tasks/system.yml3
-rw-r--r--roles/atomic_base/vars/main.yml2
-rw-r--r--roles/atomic_proxy/README.md56
-rw-r--r--roles/atomic_proxy/files/proxy_containers_deploy_descriptor.json29
-rw-r--r--roles/atomic_proxy/files/puppet/auth.conf116
-rwxr-xr-xroles/atomic_proxy/files/setup-proxy-containers.sh43
-rw-r--r--roles/atomic_proxy/handlers/main.yml3
-rw-r--r--roles/atomic_proxy/meta/main.yml21
-rw-r--r--roles/atomic_proxy/tasks/main.yml3
-rw-r--r--roles/atomic_proxy/tasks/setup_containers.yml57
-rw-r--r--roles/atomic_proxy/tasks/setup_puppet.yml24
-rw-r--r--roles/atomic_proxy/templates/puppet/puppet.conf.j240
-rwxr-xr-xroles/atomic_proxy/templates/sync/sync-proxy-configs.sh.j216
-rw-r--r--roles/atomic_proxy/templates/systemd/ctr-proxy-1.service.j232
-rw-r--r--roles/atomic_proxy/templates/systemd/ctr-proxy-monitoring-1.service.j236
-rw-r--r--roles/atomic_proxy/templates/systemd/ctr-proxy-puppet-1.service.j233
-rw-r--r--roles/atomic_proxy/vars/main.yml2
-rwxr-xr-xroles/docker/files/enter-container.sh13
-rw-r--r--roles/docker/handlers/main.yml4
-rw-r--r--roles/docker/tasks/main.yml9
-rw-r--r--roles/docker_storage/README.md39
-rw-r--r--roles/docker_storage/defaults/main.yml1
-rw-r--r--roles/docker_storage/handlers/main.yml1
-rw-r--r--roles/docker_storage/meta/main.yml9
-rw-r--r--roles/docker_storage/tasks/main.yml37
-rw-r--r--roles/docker_storage/vars/main.yml1
-rw-r--r--roles/etcd/README.md38
-rw-r--r--roles/etcd/handlers/main.yml3
-rw-r--r--roles/etcd/meta/main.yml124
-rw-r--r--roles/etcd/tasks/main.yml20
-rw-r--r--roles/etcd/templates/etcd.conf.j234
-rw-r--r--roles/fluentd_master/tasks/main.yml47
-rw-r--r--roles/fluentd_master/templates/kubernetes.conf.j29
-rw-r--r--roles/fluentd_node/tasks/main.yml55
-rw-r--r--roles/fluentd_node/templates/kubernetes.conf.j253
-rw-r--r--roles/fluentd_node/templates/td-agent.j22
-rw-r--r--roles/kube_nfs_volumes/README.md111
-rw-r--r--roles/kube_nfs_volumes/defaults/main.yml10
-rw-r--r--roles/kube_nfs_volumes/handlers/main.yml3
-rw-r--r--roles/kube_nfs_volumes/library/partitionpool.py240
-rw-r--r--roles/kube_nfs_volumes/meta/main.yml16
-rw-r--r--roles/kube_nfs_volumes/tasks/main.yml25
-rw-r--r--roles/kube_nfs_volumes/tasks/nfs.yml16
-rw-r--r--roles/kube_nfs_volumes/templates/nfs.json.j223
-rw-r--r--roles/openshift_common/tasks/main.yml2
-rw-r--r--roles/openshift_common/vars/main.yml4
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py208
-rw-r--r--roles/openshift_master/README.md2
-rw-r--r--roles/openshift_master/defaults/main.yml4
-rw-r--r--roles/openshift_master/tasks/main.yml124
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j298
-rw-r--r--roles/openshift_master/templates/scheduler.json.j212
-rw-r--r--roles/openshift_master/templates/v1_partials/oauthConfig.j278
-rw-r--r--roles/openshift_master/vars/main.yml13
-rw-r--r--roles/openshift_node/README.md2
-rw-r--r--roles/openshift_node/defaults/main.yml4
-rw-r--r--roles/openshift_node/handlers/main.yml1
-rw-r--r--roles/openshift_node/tasks/main.yml71
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j218
-rw-r--r--roles/openshift_node/vars/main.yml3
-rw-r--r--roles/openshift_register_nodes/defaults/main.yml2
-rwxr-xr-xroles/openshift_register_nodes/library/kubernetes_register_node.py228
-rw-r--r--roles/openshift_register_nodes/tasks/main.yml64
-rw-r--r--roles/openshift_register_nodes/vars/main.yml13
-rw-r--r--roles/openshift_registry/README.md42
-rw-r--r--roles/openshift_registry/handlers/main.yml0
-rw-r--r--roles/openshift_registry/meta/main.yml (renamed from roles/openshift_sdn_node/meta/main.yml)6
-rw-r--r--roles/openshift_registry/tasks/main.yml11
-rw-r--r--roles/openshift_registry/vars/main.yml3
-rw-r--r--roles/openshift_repos/files/online/repos/enterprise-v3.repo8
-rw-r--r--roles/openshift_router/README.md (renamed from roles/openshift_sdn_master/README.md)18
-rw-r--r--roles/openshift_router/handlers/main.yml0
-rw-r--r--roles/openshift_router/meta/main.yml (renamed from roles/openshift_sdn_master/meta/main.yml)6
-rw-r--r--roles/openshift_router/tasks/main.yml11
-rw-r--r--roles/openshift_router/vars/main.yml3
-rw-r--r--roles/openshift_sdn_master/handlers/main.yml3
-rw-r--r--roles/openshift_sdn_master/tasks/main.yml37
-rw-r--r--roles/openshift_sdn_node/README.md44
-rw-r--r--roles/openshift_sdn_node/handlers/main.yml3
-rw-r--r--roles/openshift_sdn_node/tasks/main.yml60
-rwxr-xr-xroles/os_zabbix/library/zbxapi.py259
-rw-r--r--roles/pods/meta/main.yml6
91 files changed, 1591 insertions, 1485 deletions
diff --git a/roles/atomic_base/README.md b/roles/atomic_base/README.md
deleted file mode 100644
index 8fe3faf7d..000000000
--- a/roles/atomic_base/README.md
+++ /dev/null
@@ -1,56 +0,0 @@
-Role Name
-========
-
-The purpose of this role is to do common configurations for all RHEL atomic hosts.
-
-
-Requirements
-------------
-
-None
-
-
-Role Variables
---------------
-
-None
-
-
-Dependencies
-------------
-
-None
-
-
-Example Playbook
--------------------------
-
-From a group playbook:
-
- hosts: servers
- roles:
- - ../../roles/atomic_base
-
-
-License
--------
-
-Copyright 2012-2014 Red Hat, Inc., All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-
-Author Information
-------------------
-
-Thomas Wiest <twiest@redhat.com>
diff --git a/roles/atomic_base/files/bash/bashrc b/roles/atomic_base/files/bash/bashrc
deleted file mode 100644
index 446f18f22..000000000
--- a/roles/atomic_base/files/bash/bashrc
+++ /dev/null
@@ -1,12 +0,0 @@
-# .bashrc
-
-# User specific aliases and functions
-
-alias rm='rm -i'
-alias cp='cp -i'
-alias mv='mv -i'
-
-# Source global definitions
-if [ -f /etc/bashrc ]; then
- . /etc/bashrc
-fi
diff --git a/roles/atomic_base/files/ostree/repo_config b/roles/atomic_base/files/ostree/repo_config
deleted file mode 100644
index 7038158f9..000000000
--- a/roles/atomic_base/files/ostree/repo_config
+++ /dev/null
@@ -1,10 +0,0 @@
-[core]
-repo_version=1
-mode=bare
-
-[remote "rh-atomic-controller"]
-url=https://mirror.openshift.com/libra/ostree/rhel-7-atomic-host
-branches=rh-atomic-controller/el7/x86_64/buildmaster/controller/docker;
-tls-client-cert-path=/var/lib/yum/client-cert.pem
-tls-client-key-path=/var/lib/yum/client-key.pem
-gpg-verify=false
diff --git a/roles/atomic_base/files/system/90-nofile.conf b/roles/atomic_base/files/system/90-nofile.conf
deleted file mode 100644
index 8537a4c5f..000000000
--- a/roles/atomic_base/files/system/90-nofile.conf
+++ /dev/null
@@ -1,7 +0,0 @@
-# PAM process file descriptor limits
-# see limits.conf(5) for details.
-#Each line describes a limit for a user in the form:
-#
-#<domain> <type> <item> <value>
-* hard nofile 16384
-root soft nofile 16384
diff --git a/roles/atomic_base/meta/main.yml b/roles/atomic_base/meta/main.yml
deleted file mode 100644
index 9578ab809..000000000
--- a/roles/atomic_base/meta/main.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-galaxy_info:
- author: Thomas Wiest
- description: Common base RHEL atomic configurations
- company: Red Hat
- # Some suggested licenses:
- # - BSD (default)
- # - MIT
- # - GPLv2
- # - GPLv3
- # - Apache
- # - CC-BY
- license: Apache
- min_ansible_version: 1.2
- platforms:
- - name: EL
- versions:
- - 7
-dependencies: []
diff --git a/roles/atomic_base/tasks/bash.yml b/roles/atomic_base/tasks/bash.yml
deleted file mode 100644
index 547ae83c3..000000000
--- a/roles/atomic_base/tasks/bash.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- name: Copy .bashrc
- copy: src=bash/bashrc dest=/root/.bashrc owner=root group=root mode=0644
-
-- name: Link to .profile to .bashrc
- file: src=/root/.bashrc dest=/root/.profile owner=root group=root state=link
-
-- name: "Setup Timezone [{{ oo_timezone }}]"
- file:
- src: "/usr/share/zoneinfo/{{ oo_timezone }}"
- dest: /etc/localtime
- owner: root
- group: root
- state: link
diff --git a/roles/atomic_base/tasks/cloud_user.yml b/roles/atomic_base/tasks/cloud_user.yml
deleted file mode 100644
index e7347fc3d..000000000
--- a/roles/atomic_base/tasks/cloud_user.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: Remove cloud-user account
- user: name=cloud-user state=absent remove=yes force=yes
-
-- name: Remove cloud-user sudo
- file: path=/etc/sudoers.d/90-cloud-init-users state=absent
diff --git a/roles/atomic_base/tasks/main.yml b/roles/atomic_base/tasks/main.yml
deleted file mode 100644
index 5d8e8571a..000000000
--- a/roles/atomic_base/tasks/main.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: system.yml
-- include: bash.yml
-- include: ostree.yml
diff --git a/roles/atomic_base/tasks/ostree.yml b/roles/atomic_base/tasks/ostree.yml
deleted file mode 100644
index aacaa5efd..000000000
--- a/roles/atomic_base/tasks/ostree.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- name: Copy ostree repo config
- copy:
- src: ostree/repo_config
- dest: /ostree/repo/config
- owner: root
- group: root
- mode: 0644
-
-- name: "WORK AROUND: Stat redhat repo file"
- stat: path=/etc/yum.repos.d/redhat.repo
- register: redhat_repo
-
-- name: "WORK AROUND: subscription manager failures"
- file:
- path: /etc/yum.repos.d/redhat.repo
- state: touch
- when: redhat_repo.stat.exists == False
diff --git a/roles/atomic_base/tasks/system.yml b/roles/atomic_base/tasks/system.yml
deleted file mode 100644
index e5cde427d..000000000
--- a/roles/atomic_base/tasks/system.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- name: Upload nofile limits.d file
- copy: src=system/90-nofile.conf dest=/etc/security/limits.d/90-nofile.conf owner=root group=root mode=0644
diff --git a/roles/atomic_base/vars/main.yml b/roles/atomic_base/vars/main.yml
deleted file mode 100644
index d4e61175c..000000000
--- a/roles/atomic_base/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-oo_timezone: US/Eastern
diff --git a/roles/atomic_proxy/README.md b/roles/atomic_proxy/README.md
deleted file mode 100644
index 348eaee1f..000000000
--- a/roles/atomic_proxy/README.md
+++ /dev/null
@@ -1,56 +0,0 @@
-Role Name
-========
-
-The purpose of this role is to do common configurations for all RHEL atomic hosts.
-
-
-Requirements
-------------
-
-None
-
-
-Role Variables
---------------
-
-None
-
-
-Dependencies
-------------
-
-None
-
-
-Example Playbook
--------------------------
-
-From a group playbook:
-
- hosts: servers
- roles:
- - ../../roles/atomic_proxy
-
-
-License
--------
-
-Copyright 2012-2014 Red Hat, Inc., All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-
-Author Information
-------------------
-
-Thomas Wiest <twiest@redhat.com>
diff --git a/roles/atomic_proxy/files/proxy_containers_deploy_descriptor.json b/roles/atomic_proxy/files/proxy_containers_deploy_descriptor.json
deleted file mode 100644
index c15835d48..000000000
--- a/roles/atomic_proxy/files/proxy_containers_deploy_descriptor.json
+++ /dev/null
@@ -1,29 +0,0 @@
-{
- "Containers":[
- {
- "Name":"proxy-puppet",
- "Count":1,
- "Image":"puppet:latest",
- "PublicPorts":[
- ]
- },
- {
- "Name":"proxy",
- "Count":1,
- "Image":"proxy:latest",
- "PublicPorts":[
- {"Internal":80,"External":80},
- {"Internal":443,"External":443},
- {"Internal":4999,"External":4999}
- ]
- },
- {
- "Name":"proxy-monitoring",
- "Count":1,
- "Image":"monitoring:latest",
- "PublicPorts":[
- ]
- }
- ],
- "RandomizeIds": false
-}
diff --git a/roles/atomic_proxy/files/puppet/auth.conf b/roles/atomic_proxy/files/puppet/auth.conf
deleted file mode 100644
index b31906bae..000000000
--- a/roles/atomic_proxy/files/puppet/auth.conf
+++ /dev/null
@@ -1,116 +0,0 @@
-# This is the default auth.conf file, which implements the default rules
-# used by the puppet master. (That is, the rules below will still apply
-# even if this file is deleted.)
-#
-# The ACLs are evaluated in top-down order. More specific stanzas should
-# be towards the top of the file and more general ones at the bottom;
-# otherwise, the general rules may "steal" requests that should be
-# governed by the specific rules.
-#
-# See http://docs.puppetlabs.com/guides/rest_auth_conf.html for a more complete
-# description of auth.conf's behavior.
-#
-# Supported syntax:
-# Each stanza in auth.conf starts with a path to match, followed
-# by optional modifiers, and finally, a series of allow or deny
-# directives.
-#
-# Example Stanza
-# ---------------------------------
-# path /path/to/resource # simple prefix match
-# # path ~ regex # alternately, regex match
-# [environment envlist]
-# [method methodlist]
-# [auth[enthicated] {yes|no|on|off|any}]
-# allow [host|backreference|*|regex]
-# deny [host|backreference|*|regex]
-# allow_ip [ip|cidr|ip_wildcard|*]
-# deny_ip [ip|cidr|ip_wildcard|*]
-#
-# The path match can either be a simple prefix match or a regular
-# expression. `path /file` would match both `/file_metadata` and
-# `/file_content`. Regex matches allow the use of backreferences
-# in the allow/deny directives.
-#
-# The regex syntax is the same as for Ruby regex, and captures backreferences
-# for use in the `allow` and `deny` lines of that stanza
-#
-# Examples:
-#
-# path ~ ^/path/to/resource # Equivalent to `path /path/to/resource`.
-# allow * # Allow all authenticated nodes (since auth
-# # defaults to `yes`).
-#
-# path ~ ^/catalog/([^/]+)$ # Permit nodes to access their own catalog (by
-# allow $1 # certname), but not any other node's catalog.
-#
-# path ~ ^/file_(metadata|content)/extra_files/ # Only allow certain nodes to
-# auth yes # access the "extra_files"
-# allow /^(.+)\.example\.com$/ # mount point; note this must
-# allow_ip 192.168.100.0/24 # go ABOVE the "/file" rule,
-# # since it is more specific.
-#
-# environment:: restrict an ACL to a comma-separated list of environments
-# method:: restrict an ACL to a comma-separated list of HTTP methods
-# auth:: restrict an ACL to an authenticated or unauthenticated request
-# the default when unspecified is to restrict the ACL to authenticated requests
-# (ie exactly as if auth yes was present).
-#
-
-### Authenticated ACLs - these rules apply only when the client
-### has a valid certificate and is thus authenticated
-
-# allow nodes to retrieve their own catalog
-path ~ ^/catalog/([^/]+)$
-method find
-allow $1
-
-# allow nodes to retrieve their own node definition
-path ~ ^/node/([^/]+)$
-method find
-allow $1
-
-# allow all nodes to access the certificates services
-path /certificate_revocation_list/ca
-method find
-allow *
-
-# allow all nodes to store their own reports
-path ~ ^/report/([^/]+)$
-method save
-allow $1
-
-# Allow all nodes to access all file services; this is necessary for
-# pluginsync, file serving from modules, and file serving from custom
-# mount points (see fileserver.conf). Note that the `/file` prefix matches
-# requests to both the file_metadata and file_content paths. See "Examples"
-# above if you need more granular access control for custom mount points.
-path /file
-allow *
-
-### Unauthenticated ACLs, for clients without valid certificates; authenticated
-### clients can also access these paths, though they rarely need to.
-
-# allow access to the CA certificate; unauthenticated nodes need this
-# in order to validate the puppet master's certificate
-path /certificate/ca
-auth any
-method find
-allow *
-
-# allow nodes to retrieve the certificate they requested earlier
-path /certificate/
-auth any
-method find
-allow *
-
-# allow nodes to request a new certificate
-path /certificate_request
-auth any
-method find, save
-allow *
-
-# deny everything else; this ACL is not strictly necessary, but
-# illustrates the default policy.
-path /
-auth any
diff --git a/roles/atomic_proxy/files/setup-proxy-containers.sh b/roles/atomic_proxy/files/setup-proxy-containers.sh
deleted file mode 100755
index d047c96c1..000000000
--- a/roles/atomic_proxy/files/setup-proxy-containers.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/bash
-
-function fail {
- msg=$1
- echo
- echo $msg
- echo
- exit 5
-}
-
-
-NUM_DATA_CTR=$(docker ps -a | grep -c proxy-shared-data-1)
-[ "$NUM_DATA_CTR" -ne 0 ] && fail "ERROR: proxy-shared-data-1 exists"
-
-
-# pre-cache the container images
-echo
-timeout --signal TERM --kill-after 30 600 docker pull busybox:latest || fail "ERROR: docker pull of busybox failed"
-
-echo
-# WORKAROUND: Setup the shared data container
-/usr/bin/docker run --name "proxy-shared-data-1" \
- -v /shared/etc/haproxy \
- -v /shared/etc/httpd \
- -v /shared/etc/openshift \
- -v /shared/etc/pki \
- -v /shared/var/run/ctr-ipc \
- -v /shared/var/lib/haproxy \
- -v /shared/usr/local \
- "busybox:latest" true
-
-# WORKAROUND: These are because we're not using a pod yet
-cp /usr/local/etc/ctr-proxy-1.service /usr/local/etc/ctr-proxy-puppet-1.service /usr/local/etc/ctr-proxy-monitoring-1.service /etc/systemd/system/
-
-systemctl daemon-reload
-
-echo
-echo -n "sleeping 10 seconds for systemd reload to take affect..."
-sleep 10
-echo " Done."
-
-# Start the services
-systemctl start ctr-proxy-puppet-1 ctr-proxy-1 ctr-proxy-monitoring-1
diff --git a/roles/atomic_proxy/handlers/main.yml b/roles/atomic_proxy/handlers/main.yml
deleted file mode 100644
index 8eedec17a..000000000
--- a/roles/atomic_proxy/handlers/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- name: reload systemd
- command: systemctl daemon-reload
diff --git a/roles/atomic_proxy/meta/main.yml b/roles/atomic_proxy/meta/main.yml
deleted file mode 100644
index a92d685b1..000000000
--- a/roles/atomic_proxy/meta/main.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-galaxy_info:
- author: Thomas Wiest
- description: Common base RHEL atomic configurations
- company: Red Hat
- # Some suggested licenses:
- # - BSD (default)
- # - MIT
- # - GPLv2
- # - GPLv3
- # - Apache
- # - CC-BY
- license: Apache
- min_ansible_version: 1.2
- platforms:
- - name: EL
- versions:
- - 7
-dependencies:
- # This is the role's PRIVATE counterpart, which is used.
- - ../../../../../atomic_private/ansible/roles/atomic_proxy
diff --git a/roles/atomic_proxy/tasks/main.yml b/roles/atomic_proxy/tasks/main.yml
deleted file mode 100644
index 073a1c61e..000000000
--- a/roles/atomic_proxy/tasks/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- include: setup_puppet.yml
-- include: setup_containers.yml
diff --git a/roles/atomic_proxy/tasks/setup_containers.yml b/roles/atomic_proxy/tasks/setup_containers.yml
deleted file mode 100644
index ee971623a..000000000
--- a/roles/atomic_proxy/tasks/setup_containers.yml
+++ /dev/null
@@ -1,57 +0,0 @@
----
-- name: "get output of: docker images"
- command: docker images
- changed_when: False # don't report as changed
- register: docker_images
-
-- name: docker pull busybox ONLY if it's not present
- command: "docker pull busybox:latest"
- when: "not docker_images.stdout | search('busybox.*latest')"
-
-- name: docker pull containers ONLY if they're not present (needed otherwise systemd will timeout pulling the containers)
- command: "docker pull docker-registry.ops.rhcloud.com/{{ item }}:{{ oo_env }}"
- with_items:
- - oso-v2-proxy
- - oso-v2-puppet
- - oso-v2-monitoring
- when: "not docker_images.stdout | search('docker-registry.ops.rhcloud.com/{{ item }}.*{{ oo_env }}')"
-
-- name: "get output of: docker ps -a"
- command: docker ps -a
- changed_when: False # don't report as changed
- register: docker_ps
-
-- name: run proxy-shared-data-1
- command: /usr/bin/docker run --name "proxy-shared-data-1" \
- -v /shared/etc/haproxy \
- -v /shared/etc/httpd \
- -v /shared/etc/openshift \
- -v /shared/etc/pki \
- -v /shared/var/run/ctr-ipc \
- -v /shared/var/lib/haproxy \
- -v /shared/usr/local \
- "busybox:latest" true
- when: "not docker_ps.stdout | search('proxy-shared-data-1')"
-
-- name: Deploy systemd files for containers
- template:
- src: "systemd/{{ item }}.j2"
- dest: "/etc/systemd/system/{{ item }}"
- mode: 0640
- owner: root
- group: root
- with_items:
- - ctr-proxy-1.service
- - ctr-proxy-monitoring-1.service
- - ctr-proxy-puppet-1.service
- notify: reload systemd
-
-- name: start containers
- service:
- name: "{{ item }}"
- state: started
- enabled: yes
- with_items:
- - ctr-proxy-puppet-1
- - ctr-proxy-1
- - ctr-proxy-monitoring-1
diff --git a/roles/atomic_proxy/tasks/setup_puppet.yml b/roles/atomic_proxy/tasks/setup_puppet.yml
deleted file mode 100644
index 7a599f06d..000000000
--- a/roles/atomic_proxy/tasks/setup_puppet.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- name: make puppet conf dir
- file:
- dest: "{{ oo_proxy_puppet_volume_dir }}/etc/puppet"
- mode: 755
- owner: root
- group: root
- state: directory
-
-- name: upload puppet auth config
- copy:
- src: puppet/auth.conf
- dest: "{{ oo_proxy_puppet_volume_dir }}/etc/puppet/auth.conf"
- mode: 0644
- owner: root
- group: root
-
-- name: upload puppet config
- template:
- src: puppet/puppet.conf.j2
- dest: "{{ oo_proxy_puppet_volume_dir }}/etc/puppet/puppet.conf"
- mode: 0644
- owner: root
- group: root
diff --git a/roles/atomic_proxy/templates/puppet/puppet.conf.j2 b/roles/atomic_proxy/templates/puppet/puppet.conf.j2
deleted file mode 100644
index 9731ff168..000000000
--- a/roles/atomic_proxy/templates/puppet/puppet.conf.j2
+++ /dev/null
@@ -1,40 +0,0 @@
-[main]
- # we need to override the host name of the container
- certname = ctr-proxy.{{ oo_env }}.rhcloud.com
-
- # The Puppet log directory.
- # The default value is '$vardir/log'.
- logdir = /var/log/puppet
-
- # Where Puppet PID files are kept.
- # The default value is '$vardir/run'.
- rundir = /var/run/puppet
-
- # Where SSL certificates are kept.
- # The default value is '$confdir/ssl'.
- ssldir = $vardir/ssl
- manifest = $manifestdir/site.pp
- manifestdir = /var/lib/puppet/environments/pub/$environment/manifests
- environment = {{ oo_env_long }}
- modulepath = /var/lib/puppet/environments/pub/$environment/modules:/var/lib/puppet/environments/pri/$environment/modules:/var/lib/puppet/environments/pri/production/modules:$confdir/modules:/usr/share/puppet/modules
-
-[agent]
- # The file in which puppetd stores a list of the classes
- # associated with the retrieved configuratiion. Can be loaded in
- # the separate ``puppet`` executable using the ``--loadclasses``
- # option.
- # The default value is '$confdir/classes.txt'.
- classfile = $vardir/classes.txt
-
- # Where puppetd caches the local configuration. An
- # extension indicating the cache format is added automatically.
- # The default value is '$confdir/localconfig'.
- localconfig = $vardir/localconfig
- server = puppet.ops.rhcloud.com
- environment = {{ oo_env_long }}
- pluginsync = true
- graph = true
- configtimeout = 600
- report = true
- runinterval = 3600
- splay = true
diff --git a/roles/atomic_proxy/templates/sync/sync-proxy-configs.sh.j2 b/roles/atomic_proxy/templates/sync/sync-proxy-configs.sh.j2
deleted file mode 100755
index d9aa2d811..000000000
--- a/roles/atomic_proxy/templates/sync/sync-proxy-configs.sh.j2
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/bash
-
-VOL_DIR=/var/lib/docker/volumes/proxy
-SSH_CMD="ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null"
-
-mkdir -p ${VOL_DIR}/etc/haproxy/
-rsync -e "${SSH_CMD}" -va --progress root@proxy1.{{ oo_env }}.rhcloud.com:/etc/haproxy/ ${VOL_DIR}/etc/haproxy/
-
-mkdir -p ${VOL_DIR}/etc/httpd/
-rsync -e "${SSH_CMD}" -va --progress root@proxy1.{{ oo_env }}.rhcloud.com:/etc/httpd/ ${VOL_DIR}/etc/httpd/
-
-mkdir -p ${VOL_DIR}/etc/pki/tls/
-rsync -e "${SSH_CMD}" -va --progress root@proxy1.{{ oo_env }}.rhcloud.com:/etc/pki/tls/ ${VOL_DIR}/etc/pki/tls/
-
-# We need to disable the haproxy chroot
-sed -i -re 's/^(\s+)chroot/\1#chroot/' /var/lib/docker/volumes/proxy/etc/haproxy/haproxy.cfg
diff --git a/roles/atomic_proxy/templates/systemd/ctr-proxy-1.service.j2 b/roles/atomic_proxy/templates/systemd/ctr-proxy-1.service.j2
deleted file mode 100644
index 988a9f544..000000000
--- a/roles/atomic_proxy/templates/systemd/ctr-proxy-1.service.j2
+++ /dev/null
@@ -1,32 +0,0 @@
-[Unit]
-Description=Container proxy-1
-
-
-[Service]
-Type=simple
-TimeoutStartSec=5m
-Slice=container-small.slice
-
-ExecStartPre=-/usr/bin/docker rm "proxy-1"
-
-ExecStart=/usr/bin/docker run --rm --name "proxy-1" \
- --volumes-from proxy-shared-data-1 \
- -a stdout -a stderr -p 80:80 -p 443:443 -p 4999:4999 \
- "docker-registry.ops.rhcloud.com/oso-v2-proxy:{{ oo_env }}"
-
-ExecReload=-/usr/bin/docker stop "proxy-1"
-ExecReload=-/usr/bin/docker rm "proxy-1"
-ExecStop=-/usr/bin/docker stop "proxy-1"
-
-[Install]
-WantedBy=container.target
-
-# Container information
-X-ContainerId=proxy-1
-X-ContainerImage=docker-registry.ops.rhcloud.com/oso-v2-proxy:{{ oo_env }}
-X-ContainerUserId=
-X-ContainerRequestId=LwiWtYWaAvSavH6Ze53QJg
-X-ContainerType=simple
-X-PortMapping=80:80
-X-PortMapping=443:443
-X-PortMapping=4999:4999
diff --git a/roles/atomic_proxy/templates/systemd/ctr-proxy-monitoring-1.service.j2 b/roles/atomic_proxy/templates/systemd/ctr-proxy-monitoring-1.service.j2
deleted file mode 100644
index 975b0061b..000000000
--- a/roles/atomic_proxy/templates/systemd/ctr-proxy-monitoring-1.service.j2
+++ /dev/null
@@ -1,36 +0,0 @@
-[Unit]
-Description=Container proxy-monitoring-1
-
-
-[Service]
-Type=simple
-TimeoutStartSec=5m
-Slice=container-small.slice
-
-ExecStartPre=-/usr/bin/docker rm "proxy-monitoring-1"
-
-ExecStart=/usr/bin/docker run --rm --name "proxy-monitoring-1" \
- --volumes-from proxy-shared-data-1 \
- -a stdout -a stderr \
- -e "OO_ENV={{ oo_env }}" \
- -e "OO_CTR_TYPE=proxy" \
- -e "OO_ZABBIX_HOSTGROUPS={{ oo_zabbix_hostgroups | join(',') }}" \
- -e "OO_ZABBIX_TEMPLATES=Template OpenShift Proxy Ctr" \
- "docker-registry.ops.rhcloud.com/oso-v2-monitoring:{{ oo_env }}"
-
-ExecReload=-/usr/bin/docker stop "proxy-monitoring-1"
-ExecReload=-/usr/bin/docker rm "proxy-monitoring-1"
-ExecStop=-/usr/bin/docker stop "proxy-monitoring-1"
-
-[Install]
-WantedBy=container.target
-
-# Container information
-X-ContainerId=proxy-monitoring-1
-X-ContainerImage=docker-registry.ops.rhcloud.com/oso-v2-monitoring:{{ oo_env }}
-X-ContainerUserId=
-X-ContainerRequestId=LwiWtYWaAvSavH6Ze53QJg
-X-ContainerType=simple
-X-PortMapping=80:80
-X-PortMapping=443:443
-X-PortMapping=4999:4999
diff --git a/roles/atomic_proxy/templates/systemd/ctr-proxy-puppet-1.service.j2 b/roles/atomic_proxy/templates/systemd/ctr-proxy-puppet-1.service.j2
deleted file mode 100644
index c3f28f471..000000000
--- a/roles/atomic_proxy/templates/systemd/ctr-proxy-puppet-1.service.j2
+++ /dev/null
@@ -1,33 +0,0 @@
-[Unit]
-Description=Container proxy-puppet-1
-
-
-[Service]
-Type=simple
-TimeoutStartSec=5m
-Slice=container-small.slice
-
-
-ExecStartPre=-/usr/bin/docker rm "proxy-puppet-1"
-
-ExecStart=/usr/bin/docker run --rm --name "proxy-puppet-1" \
- --volumes-from proxy-shared-data-1 \
- -v /var/lib/docker/volumes/proxy_puppet/var/lib/puppet/ssl:/var/lib/puppet/ssl \
- -v /var/lib/docker/volumes/proxy_puppet/etc/puppet:/etc/puppet \
- -a stdout -a stderr \
- "docker-registry.ops.rhcloud.com/oso-v2-puppet:{{ oo_env }}"
-
-# Set links (requires container have a name)
-ExecReload=-/usr/bin/docker stop "proxy-puppet-1"
-ExecReload=-/usr/bin/docker rm "proxy-puppet-1"
-ExecStop=-/usr/bin/docker stop "proxy-puppet-1"
-
-[Install]
-WantedBy=container.target
-
-# Container information
-X-ContainerId=proxy-puppet-1
-X-ContainerImage=docker-registry.ops.rhcloud.com/oso-v2-puppet:{{ oo_env }}
-X-ContainerUserId=
-X-ContainerRequestId=Ky0lhw0onwoSDJR4GK6t3g
-X-ContainerType=simple
diff --git a/roles/atomic_proxy/vars/main.yml b/roles/atomic_proxy/vars/main.yml
deleted file mode 100644
index 1f90492fd..000000000
--- a/roles/atomic_proxy/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-oo_proxy_puppet_volume_dir: /var/lib/docker/volumes/proxy_puppet
diff --git a/roles/docker/files/enter-container.sh b/roles/docker/files/enter-container.sh
deleted file mode 100755
index 7cf5b8d83..000000000
--- a/roles/docker/files/enter-container.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/bash
-
-if [ $# -ne 1 ]
-then
- echo
- echo "Usage: $(basename $0) <container_name>"
- echo
- exit 1
-fi
-
-PID=$(docker inspect --format '{{.State.Pid}}' $1)
-
-nsenter --target $PID --mount --uts --ipc --net --pid
diff --git a/roles/docker/handlers/main.yml b/roles/docker/handlers/main.yml
new file mode 100644
index 000000000..eca7419c1
--- /dev/null
+++ b/roles/docker/handlers/main.yml
@@ -0,0 +1,4 @@
+---
+
+- name: restart docker
+ service: name=docker state=restarted
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index ca700db17..96949230d 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -1,15 +1,8 @@
---
# tasks file for docker
- name: Install docker
- yum: pkg=docker-io
+ yum: pkg=docker
- name: enable and start the docker service
service: name=docker enabled=yes state=started
-- copy: src=enter-container.sh dest=/usr/local/bin/enter-container.sh mode=0755
-
-# From the origin rpm there exists instructions on how to
-# setup origin properly. The following steps come from there
-- name: Change root to be in the Docker group
- user: name=root groups=dockerroot append=yes
-
diff --git a/roles/docker_storage/README.md b/roles/docker_storage/README.md
new file mode 100644
index 000000000..0d8f31afc
--- /dev/null
+++ b/roles/docker_storage/README.md
@@ -0,0 +1,39 @@
+docker_storage
+=========
+
+Configure docker_storage options
+------------
+
+None
+
+Role Variables
+--------------
+
+None
+
+Dependencies
+------------
+
+None
+
+Example Playbook
+----------------
+
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
+
+ - hosts: servers
+ roles:
+ - { role/docker_storage:
+ - key: df.fs
+ value: xfs
+ }
+
+License
+-------
+
+ASL 2.0
+
+Author Information
+------------------
+
+Openshift operations, Red Hat, Inc
diff --git a/roles/docker_storage/defaults/main.yml b/roles/docker_storage/defaults/main.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/roles/docker_storage/defaults/main.yml
@@ -0,0 +1 @@
+---
diff --git a/roles/docker_storage/handlers/main.yml b/roles/docker_storage/handlers/main.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/roles/docker_storage/handlers/main.yml
@@ -0,0 +1 @@
+---
diff --git a/roles/docker_storage/meta/main.yml b/roles/docker_storage/meta/main.yml
new file mode 100644
index 000000000..a5d51cd3a
--- /dev/null
+++ b/roles/docker_storage/meta/main.yml
@@ -0,0 +1,9 @@
+---
+galaxy_info:
+ author: Openshift
+ description: Setup docker_storage options
+ company: Red Hat, Inc
+ license: ASL 2.0
+ min_ansible_version: 1.2
+dependencies:
+- docker
diff --git a/roles/docker_storage/tasks/main.yml b/roles/docker_storage/tasks/main.yml
new file mode 100644
index 000000000..48a3fc208
--- /dev/null
+++ b/roles/docker_storage/tasks/main.yml
@@ -0,0 +1,37 @@
+---
+- lvg:
+ pvs: "{{ dst_device }}"
+ vg: "{{ dst_vg }}"
+ register: dst_lvg
+
+- lvol:
+ lv: data
+ vg: "{{ dst_vg }}"
+ size: 95%VG
+ register: dst_lvol_data
+
+- lvol:
+ lv: metadata
+ vg: "{{ dst_vg }}"
+ size: 5%VG
+ register: dst_lvol_metadata
+
+
+- name: Update docker_storage options
+ lineinfile:
+ dest: /etc/sysconfig/docker-storage
+ backrefs: yes
+ regexp: "^(DOCKER_STORAGE_OPTIONS=)"
+ line: '\1 --storage-opt {{ dst_options | oo_combine_key_value("=") | join(" --storage-opt ") }}'
+ when: dst_options is defined and dst_options | length > 0
+ register: dst_config
+
+
+- name: Reload systemd units
+ command: systemctl daemon-reload
+ notify:
+ - restart docker
+ when: dst_config | changed or
+ dst_lvg | changed or
+ dst_lvol_data | changed or
+ dst_lvol_metadata | changed
diff --git a/roles/docker_storage/vars/main.yml b/roles/docker_storage/vars/main.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/roles/docker_storage/vars/main.yml
@@ -0,0 +1 @@
+---
diff --git a/roles/etcd/README.md b/roles/etcd/README.md
deleted file mode 100644
index 225dd44b9..000000000
--- a/roles/etcd/README.md
+++ /dev/null
@@ -1,38 +0,0 @@
-Role Name
-=========
-
-A brief description of the role goes here.
-
-Requirements
-------------
-
-Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
-
-Role Variables
---------------
-
-A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
-
-Dependencies
-------------
-
-A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
-
-Example Playbook
-----------------
-
-Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
-
- - hosts: servers
- roles:
- - { role: username.rolename, x: 42 }
-
-License
--------
-
-BSD
-
-Author Information
-------------------
-
-An optional section for the role authors to include contact information, or a website (HTML is not allowed).
diff --git a/roles/etcd/handlers/main.yml b/roles/etcd/handlers/main.yml
deleted file mode 100644
index b897913f9..000000000
--- a/roles/etcd/handlers/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- name: restart etcd
- service: name=etcd state=restarted
diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml
deleted file mode 100644
index c5c362c60..000000000
--- a/roles/etcd/meta/main.yml
+++ /dev/null
@@ -1,124 +0,0 @@
----
-galaxy_info:
- author: your name
- description:
- company: your company (optional)
- # Some suggested licenses:
- # - BSD (default)
- # - MIT
- # - GPLv2
- # - GPLv3
- # - Apache
- # - CC-BY
- license: license (GPLv2, CC-BY, etc)
- min_ansible_version: 1.2
- #
- # Below are all platforms currently available. Just uncomment
- # the ones that apply to your role. If you don't see your
- # platform on this list, let us know and we'll get it added!
- #
- #platforms:
- #- name: EL
- # versions:
- # - all
- # - 5
- # - 6
- # - 7
- #- name: GenericUNIX
- # versions:
- # - all
- # - any
- #- name: Fedora
- # versions:
- # - all
- # - 16
- # - 17
- # - 18
- # - 19
- # - 20
- #- name: opensuse
- # versions:
- # - all
- # - 12.1
- # - 12.2
- # - 12.3
- # - 13.1
- # - 13.2
- #- name: Amazon
- # versions:
- # - all
- # - 2013.03
- # - 2013.09
- #- name: GenericBSD
- # versions:
- # - all
- # - any
- #- name: FreeBSD
- # versions:
- # - all
- # - 8.0
- # - 8.1
- # - 8.2
- # - 8.3
- # - 8.4
- # - 9.0
- # - 9.1
- # - 9.1
- # - 9.2
- #- name: Ubuntu
- # versions:
- # - all
- # - lucid
- # - maverick
- # - natty
- # - oneiric
- # - precise
- # - quantal
- # - raring
- # - saucy
- # - trusty
- #- name: SLES
- # versions:
- # - all
- # - 10SP3
- # - 10SP4
- # - 11
- # - 11SP1
- # - 11SP2
- # - 11SP3
- #- name: GenericLinux
- # versions:
- # - all
- # - any
- #- name: Debian
- # versions:
- # - all
- # - etch
- # - lenny
- # - squeeze
- # - wheezy
- #
- # Below are all categories currently available. Just as with
- # the platforms above, uncomment those that apply to your role.
- #
- #categories:
- #- cloud
- #- cloud:ec2
- #- cloud:gce
- #- cloud:rax
- #- clustering
- #- database
- #- database:nosql
- #- database:sql
- #- development
- #- monitoring
- #- networking
- #- packaging
- #- system
- #- web
-dependencies: []
- # List your role dependencies here, one per line. Only
- # dependencies available via galaxy should be listed here.
- # Be sure to remove the '[]' above if you add dependencies
- # to this list.
-
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
deleted file mode 100644
index 062d2e8a9..000000000
--- a/roles/etcd/tasks/main.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Install etcd
- yum: pkg=etcd state=installed disable_gpg_check=yes
-
-- name: Install etcdctl
- yum: pkg=etcdctl state=installed disable_gpg_check=yes
-
-- name: Write etcd global config file
- template: src=etcd.conf.j2 dest=/etc/etcd/etcd.conf
- notify:
- - restart etcd
-
-- name: Open firewalld port for etcd
- firewalld: port=4001/tcp permanent=false state=enabled
-
-- name: Save firewalld port for etcd
- firewalld: port=4001/tcp permanent=true state=enabled
-
-- name: Enable etcd
- service: name=etcd enabled=yes state=started
diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2
deleted file mode 100644
index 1b43f6552..000000000
--- a/roles/etcd/templates/etcd.conf.j2
+++ /dev/null
@@ -1,34 +0,0 @@
-# This configuration file is written in [TOML](https://github.com/mojombo/toml)
-
-# addr = "127.0.0.1:4001"
-# bind_addr = "127.0.0.1:4001"
-# ca_file = ""
-# cert_file = ""
-# cors = []
-# cpu_profile_file = ""
-# data_dir = "."
-# discovery = "http://etcd.local:4001/v2/keys/_etcd/registry/examplecluster"
-# http_read_timeout = 10
-# http_write_timeout = 10
-# key_file = ""
-# peers = []
-# peers_file = ""
-# max_cluster_size = 9
-# max_result_buffer = 1024
-# max_retry_attempts = 3
-# name = "default-name"
-# snapshot = false
-# verbose = false
-# very_verbose = false
-
-# [peer]
-# addr = "127.0.0.1:7001"
-# bind_addr = "127.0.0.1:7001"
-# ca_file = ""
-# cert_file = ""
-# key_file = ""
-
-# [cluster]
-# active_size = 9
-# remove_delay = 1800.0
-# sync_interval = 5.0
diff --git a/roles/fluentd_master/tasks/main.yml b/roles/fluentd_master/tasks/main.yml
new file mode 100644
index 000000000..d828db52a
--- /dev/null
+++ b/roles/fluentd_master/tasks/main.yml
@@ -0,0 +1,47 @@
+---
+# TODO: Update fluentd install and configuration when packaging is complete
+- name: download and install td-agent
+ yum:
+ name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm'
+ state: present
+
+- name: Verify fluentd plugin installed
+ command: '/opt/td-agent/embedded/bin/gem query -i fluent-plugin-kubernetes'
+ register: _fluent_plugin_check
+ failed_when: false
+ changed_when: false
+
+- name: install Kubernetes fluentd plugin
+ command: '/opt/td-agent/embedded/bin/gem install fluent-plugin-kubernetes'
+ when: _fluent_plugin_check.rc == 1
+
+- name: Creates directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ group: 'td-agent'
+ owner: 'td-agent'
+ mode: 0755
+ with_items: ['/etc/td-agent/config.d']
+
+- name: Add include to td-agent configuration
+ lineinfile:
+ dest: '/etc/td-agent/td-agent.conf'
+ regexp: '^@include config.d'
+ line: '@include config.d/*.conf'
+ state: present
+
+- name: install Kubernetes fluentd configuration file
+ template:
+ src: kubernetes.conf.j2
+ dest: /etc/td-agent/config.d/kubernetes.conf
+ group: 'td-agent'
+ owner: 'td-agent'
+ mode: 0444
+
+- name: ensure td-agent is running
+ service:
+ name: 'td-agent'
+ state: started
+ enabled: yes
+
diff --git a/roles/fluentd_master/templates/kubernetes.conf.j2 b/roles/fluentd_master/templates/kubernetes.conf.j2
new file mode 100644
index 000000000..7b5c86062
--- /dev/null
+++ b/roles/fluentd_master/templates/kubernetes.conf.j2
@@ -0,0 +1,9 @@
+<match kubernetes.**>
+ type file
+ path /var/log/td-agent/containers.log
+ time_slice_format %Y%m%d
+ time_slice_wait 10m
+ time_format %Y%m%dT%H%M%S%z
+ compress gzip
+ utc
+</match>
diff --git a/roles/fluentd_node/tasks/main.yml b/roles/fluentd_node/tasks/main.yml
new file mode 100644
index 000000000..f9ef30b83
--- /dev/null
+++ b/roles/fluentd_node/tasks/main.yml
@@ -0,0 +1,55 @@
+---
+# TODO: Update fluentd install and configuration when packaging is complete
+- name: download and install td-agent
+ yum:
+ name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm'
+ state: present
+
+- name: Verify fluentd plugin installed
+ command: '/opt/td-agent/embedded/bin/gem query -i fluent-plugin-kubernetes'
+ register: _fluent_plugin_check
+ failed_when: false
+ changed_when: false
+
+- name: install Kubernetes fluentd plugin
+ command: '/opt/td-agent/embedded/bin/gem install fluent-plugin-kubernetes'
+ when: _fluent_plugin_check.rc == 1
+
+- name: Override td-agent configuration file
+ template:
+ src: td-agent.j2
+ dest: /etc/sysconfig/td-agent
+ group: 'td-agent'
+ owner: 'td-agent'
+ mode: 0444
+
+- name: Creates directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ group: 'td-agent'
+ owner: 'td-agent'
+ mode: 0755
+ with_items: ['/etc/td-agent/config.d', '/var/log/td-agent/tmp']
+
+- name: Add include to td-agent configuration
+ lineinfile:
+ dest: '/etc/td-agent/td-agent.conf'
+ regexp: '^@include config.d'
+ line: '@include config.d/*.conf'
+ state: present
+
+- name: install Kubernetes fluentd configuration file
+ template:
+ src: kubernetes.conf.j2
+ dest: /etc/td-agent/config.d/kubernetes.conf
+ group: 'td-agent'
+ owner: 'td-agent'
+ mode: 0444
+
+- name: ensure td-agent is running
+ service:
+ name: 'td-agent'
+ state: started
+ enabled: yes
+
diff --git a/roles/fluentd_node/templates/kubernetes.conf.j2 b/roles/fluentd_node/templates/kubernetes.conf.j2
new file mode 100644
index 000000000..5f1eecb20
--- /dev/null
+++ b/roles/fluentd_node/templates/kubernetes.conf.j2
@@ -0,0 +1,53 @@
+<source>
+ type tail
+ path /var/lib/docker/containers/*/*-json.log
+ pos_file /var/log/td-agent/tmp/fluentd-docker.pos
+ time_format %Y-%m-%dT%H:%M:%S
+ tag docker.*
+ format json
+ read_from_head true
+</source>
+
+<match docker.var.lib.docker.containers.*.*.log>
+ type kubernetes
+ container_id ${tag_parts[5]}
+ tag docker.${name}
+</match>
+
+<match kubernetes>
+ type copy
+
+ <store>
+ type forward
+ send_timeout 60s
+ recover_wait 10s
+ heartbeat_interval 1s
+ phi_threshold 16
+ hard_timeout 60s
+ log_level trace
+ require_ack_response true
+ heartbeat_type tcp
+
+ <server>
+ name {{groups['oo_first_master'][0]}}
+ host {{hostvars[groups['oo_first_master'][0]].openshift.common.hostname}}
+ port 24224
+ weight 60
+ </server>
+
+ <secondary>
+ type file
+ path /var/log/td-agent/forward-failed
+ </secondary>
+ </store>
+
+ <store>
+ type file
+ path /var/log/td-agent/containers.log
+ time_slice_format %Y%m%d
+ time_slice_wait 10m
+ time_format %Y%m%dT%H%M%S%z
+ compress gzip
+ utc
+ </store>
+</match>
diff --git a/roles/fluentd_node/templates/td-agent.j2 b/roles/fluentd_node/templates/td-agent.j2
new file mode 100644
index 000000000..7245e11ec
--- /dev/null
+++ b/roles/fluentd_node/templates/td-agent.j2
@@ -0,0 +1,2 @@
+DAEMON_ARGS=
+TD_AGENT_ARGS="/usr/sbin/td-agent --log /var/log/td-agent/td-agent.log --use-v1-config"
diff --git a/roles/kube_nfs_volumes/README.md b/roles/kube_nfs_volumes/README.md
new file mode 100644
index 000000000..56c69c286
--- /dev/null
+++ b/roles/kube_nfs_volumes/README.md
@@ -0,0 +1,111 @@
+# kube_nfs_volumes
+
+This role is useful to export disks as set of Kubernetes persistent volumes.
+It does so by partitioning the disks, creating ext4 filesystem on each
+partition, mounting the partitions, exporting the mounts via NFS and adding
+these NFS shares as NFS persistent volumes to existing Kubernetes installation.
+
+All partitions on given disks are used as the persistent volumes, including
+already existing partitions! There should be no other data (such as operating
+system) on the disks!
+
+## Requirements
+
+* Running Kubernetes with NFS persistent volume support (on a remote machine).
+
+* Works only on RHEL/Fedora-like distros.
+
+## Role Variables
+
+```
+# Options of NFS exports.
+nfs_export_options: "*(rw,no_root_squash,insecure,no_subtree_check)"
+
+# Directory, where the created partitions should be mounted. They will be
+# mounted as <mount_dir>/sda1 etc.
+mount_dir: /exports
+
+# Comma-separated list of disks to partition.
+# This role always assumes that all partitions on these disks are used as
+# physical volumes.
+disks: /dev/sdb,/dev/sdc
+
+# Whether to re-partition already partitioned disks.
+# Even though the disks won't get repartitioned on 'false', all existing
+# partitions on the disk are exported via NFS as physical volumes!
+force: false
+
+# Specification of size of partitions to create. See library/partitionpool.py
+# for details.
+sizes: 100M
+
+# URL of Kubernetes API server, incl. port.
+kubernetes_url: https://10.245.1.2:6443
+
+# Token to use for authentication to the API server
+kubernetes_token: tJdce6Fn3cL1112YoIJ5m2exzAbzcPZX
+```
+
+## Dependencies
+
+None
+
+## Example Playbook
+
+With this playbook, `/dev/sdb` is partitioned into 100MiB partitions, all of
+them are mounted into `/exports/sdb<N>` directory and all these directories
+are exported via NFS and added as physical volumes to Kubernetes running at
+`https://10.245.1.2:6443`.
+
+ - hosts: servers
+ roles:
+ - role: kube_nfs_volumes
+ disks: "/dev/sdb"
+ sizes: 100M
+ kubernetes_url: https://10.245.1.2:6443
+ kubernetes_token: tJdce6Fn3cL1112YoIJ5m2exzAbzcPZX
+
+See library/partitionpool.py for details how `sizes` parameter can be used
+to create partitions of various sizes.
+
+## Full example
+Let's say there are two machines, 10.0.0.1 and 10.0.0.2, that we want to use as
+NFS servers for our Kubernetes cluster, running Kubernetes public API at
+https://10.245.1.2:6443.
+
+Both servers have three 1 TB disks, /dev/sda for the system and /dev/sdb and
+/dev/sdc to be partitioned. We want to split the data disks into 5, 10 and
+20 GiB partitions so that 10% of the total capacity is in 5 GiB partitions, 40%
+in 10 GiB and 50% in 20 GiB partitions.
+
+That means, each data disk will have 20x 5 GiB, 40x 10 GiB and 25x 20 GiB
+partitions.
+
+* Create an `inventory` file:
+ ```
+ [nfsservers]
+ 10.0.0.1
+ 10.0.0.2
+ ```
+
+* Create an ansible playbook, say `setupnfs.yaml`:
+ ```
+ - hosts: nfsservers
+ sudo: yes
+ roles:
+ - role: kube_nfs_volumes
+ disks: "/dev/sdb,/dev/sdc"
+ sizes: 5G:10,10G:40,20G:50
+ force: no
+ kubernetes_url: https://10.245.1.2:6443
+ kubernetes_token: tJdce6Fn3cL1112YoIJ5m2exzAbzcPZX
+ ```
+
+* Run the playbook:
+ ```
+ ansible-playbook -i inventory setupnfs.yml
+ ```
+
+## License
+
+Apache 2.0
diff --git a/roles/kube_nfs_volumes/defaults/main.yml b/roles/kube_nfs_volumes/defaults/main.yml
new file mode 100644
index 000000000..e296492f9
--- /dev/null
+++ b/roles/kube_nfs_volumes/defaults/main.yml
@@ -0,0 +1,10 @@
+---
+# Options of NFS exports.
+nfs_export_options: "*(rw,no_root_squash,insecure,no_subtree_check)"
+
+# Directory, where the created partitions should be mounted. They will be
+# mounted as <mount_dir>/sda1 etc.
+mount_dir: /exports
+
+# Force re-partitioning the disks
+force: false
diff --git a/roles/kube_nfs_volumes/handlers/main.yml b/roles/kube_nfs_volumes/handlers/main.yml
new file mode 100644
index 000000000..52f3ceffe
--- /dev/null
+++ b/roles/kube_nfs_volumes/handlers/main.yml
@@ -0,0 +1,3 @@
+---
+- name: restart nfs
+ service: name=nfs-server state=restarted
diff --git a/roles/kube_nfs_volumes/library/partitionpool.py b/roles/kube_nfs_volumes/library/partitionpool.py
new file mode 100644
index 000000000..1ac8eed4d
--- /dev/null
+++ b/roles/kube_nfs_volumes/library/partitionpool.py
@@ -0,0 +1,240 @@
+#!/usr/bin/python
+"""
+Ansible module for partitioning.
+"""
+
+# There is no pyparted on our Jenkins worker
+# pylint: disable=import-error
+import parted
+
+DOCUMENTATION = """
+---
+module: partitionpool
+short_description; Partition a disk into parititions.
+description:
+ - Creates partitions on given disk based on partition sizes and their weights.
+ Unless 'force' option is set to True, it ignores already partitioned disks.
+
+ When the disk is empty or 'force' is set to True, it always creates a new
+ GPT partition table on the disk. Then it creates number of partitions, based
+ on their weights.
+
+ This module should be used when a system admin wants to split existing disk(s)
+ into pools of partitions of specific sizes. It is not intended as generic disk
+ partitioning module!
+
+ Independent on 'force' parameter value and actual disk state, the task
+ always fills 'partition_pool' fact with all partitions on given disks,
+ together with their sizes (in bytes). E.g.:
+ partition_sizes = [
+ { name: sda1, Size: 1048576000 },
+ { name: sda2, Size: 1048576000 },
+ { name: sdb1, Size: 1048576000 },
+ ...
+ ]
+
+options:
+ disk:
+ description:
+ - Disk to partition.
+ size:
+ description:
+ - Sizes of partitions to create and their weights. Has form of:
+ <size1>[:<weigth1>][,<size2>[:<weight2>][,...]]
+ - Any <size> can end with 'm' or 'M' for megabyte, 'g/G' for gigabyte
+ and 't/T' for terabyte. Megabyte is used when no unit is specified.
+ - If <weight> is missing, 1.0 is used.
+ - From each specified partition <sizeX>, number of these partitions are
+ created so they occupy spaces represented by <weightX>, proportionally to
+ other weights.
+
+ - Example 1: size=100G says, that the whole disk is split in number of 100 GiB
+ partitions. On 1 TiB disk, 10 partitions will be created.
+
+ - Example 2: size=100G:1,10G:1 says that ratio of space occupied by 100 GiB
+ partitions and 10 GiB partitions is 1:1. Therefore, on 1 TiB disk, 500 GiB
+ will be split into five 100 GiB partition and 500 GiB will be split into fifty
+ 10GiB partitions.
+ - size=100G:1,10G:1 = 5x 100 GiB and 50x 10 GiB partitions (on 1 TiB disk).
+
+ - Example 3: size=200G:1,100G:2 says that the ratio of space occupied by 200 GiB
+ partitions and 100GiB partition is 1:2. Therefore, on 1 TiB disk, 1/3
+ (300 GiB) should be occupied by 200 GiB partitions. Only one fits there,
+ so only one is created (we always round nr. of partitions *down*). Teh rest
+ (800 GiB) is split into eight 100 GiB partitions, even though it's more
+ than 2/3 of total space - free space is always allocated as much as possible.
+ - size=200G:1,100G:2 = 1x 200 GiB and 8x 100 GiB partitions (on 1 TiB disk).
+
+ - Example: size=200G:1,100G:1,50G:1 says that the ratio of space occupied by
+ 200 GiB, 100 GiB and 50 GiB partitions is 1:1:1. Therefore 1/3 of 1 TiB disk
+ is dedicated to 200 GiB partitions. Only one fits there and only one is
+ created. The rest (800 GiB) is distributed according to remaining weights:
+ 100 GiB vs 50 GiB is 1:1, we create four 100 GiB partitions (400 GiB in total)
+ and eight 50 GiB partitions (again, 400 GiB).
+ - size=200G:1,100G:1,50G:1 = 1x 200 GiB, 4x 100 GiB and 8x 50 GiB partitions
+ (on 1 TiB disk).
+
+ force:
+ description:
+ - If True, it will always overwite partition table on the disk and create new one.
+ - If False (default), it won't change existing partition tables.
+
+"""
+
+# It's not class, it's more a simple struct with almost no functionality.
+# pylint: disable=too-few-public-methods
+class PartitionSpec(object):
+ """ Simple class to represent required partitions."""
+ def __init__(self, size, weight):
+ """ Initialize the partition specifications."""
+ # Size of the partitions
+ self.size = size
+ # Relative weight of this request
+ self.weight = weight
+ # Number of partitions to create, will be calculated later
+ self.count = -1
+
+ def set_count(self, count):
+ """ Set count of parititions of this specification. """
+ self.count = count
+
+def assign_space(total_size, specs):
+ """
+ Satisfy all the PartitionSpecs according to their weight.
+ In other words, calculate spec.count of all the specs.
+ """
+ total_weight = 0.0
+ for spec in specs:
+ total_weight += float(spec.weight)
+
+ for spec in specs:
+ num_blocks = int((float(spec.weight) / total_weight) * (total_size / float(spec.size)))
+ spec.set_count(num_blocks)
+ total_size -= num_blocks * spec.size
+ total_weight -= spec.weight
+
+def partition(diskname, specs, force=False, check_mode=False):
+ """
+ Create requested partitions.
+ Returns nr. of created partitions or 0 when the disk was already partitioned.
+ """
+ count = 0
+
+ dev = parted.getDevice(diskname)
+ try:
+ disk = parted.newDisk(dev)
+ except parted.DiskException:
+ # unrecognizable format, treat as empty disk
+ disk = None
+
+ if disk and len(disk.partitions) > 0 and not force:
+ print "skipping", diskname
+ return 0
+
+ # create new partition table, wiping all existing data
+ disk = parted.freshDisk(dev, 'gpt')
+ # calculate nr. of partitions of each size
+ assign_space(dev.getSize(), specs)
+ last_megabyte = 1
+ for spec in specs:
+ for _ in range(spec.count):
+ # create the partition
+ start = parted.sizeToSectors(last_megabyte, "MiB", dev.sectorSize)
+ length = parted.sizeToSectors(spec.size, "MiB", dev.sectorSize)
+ geo = parted.Geometry(device=dev, start=start, length=length)
+ filesystem = parted.FileSystem(type='ext4', geometry=geo)
+ part = parted.Partition(
+ disk=disk,
+ type=parted.PARTITION_NORMAL,
+ fs=filesystem,
+ geometry=geo)
+ disk.addPartition(partition=part, constraint=dev.optimalAlignedConstraint)
+ last_megabyte += spec.size
+ count += 1
+ try:
+ if not check_mode:
+ disk.commit()
+ except parted.IOException:
+ # partitions have been written, but we have been unable to inform the
+ # kernel of the change, probably because they are in use.
+ # Ignore it and hope for the best...
+ pass
+ return count
+
+def parse_spec(text):
+ """ Parse string with partition specification. """
+ tokens = text.split(",")
+ specs = []
+ for token in tokens:
+ if not ":" in token:
+ token += ":1"
+
+ (sizespec, weight) = token.split(':')
+ weight = float(weight) # throws exception with reasonable error string
+
+ units = {"m": 1, "g": 1 << 10, "t": 1 << 20, "p": 1 << 30}
+ unit = units.get(sizespec[-1].lower(), None)
+ if not unit:
+ # there is no unit specifier, it must be just the number
+ size = float(sizespec)
+ unit = 1
+ else:
+ size = float(sizespec[:-1])
+ spec = PartitionSpec(int(size * unit), weight)
+ specs.append(spec)
+ return specs
+
+def get_partitions(diskpath):
+ """ Return array of partition names for given disk """
+ dev = parted.getDevice(diskpath)
+ disk = parted.newDisk(dev)
+ partitions = []
+ for part in disk.partitions:
+ (_, _, pname) = part.path.rsplit("/")
+ partitions.append({"name": pname, "size": part.getLength() * dev.sectorSize})
+
+ return partitions
+
+
+def main():
+ """ Ansible module main method. """
+ module = AnsibleModule(
+ argument_spec=dict(
+ disks=dict(required=True, type='str'),
+ force=dict(required=False, default="no", type='bool'),
+ sizes=dict(required=True, type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+ disks = module.params['disks']
+ force = module.params['force']
+ if force is None:
+ force = False
+ sizes = module.params['sizes']
+
+ try:
+ specs = parse_spec(sizes)
+ except ValueError, ex:
+ err = "Error parsing sizes=" + sizes + ": " + str(ex)
+ module.fail_json(msg=err)
+
+ partitions = []
+ changed_count = 0
+ for disk in disks.split(","):
+ try:
+ changed_count += partition(disk, specs, force, module.check_mode)
+ except Exception, ex:
+ err = "Error creating partitions on " + disk + ": " + str(ex)
+ raise
+ #module.fail_json(msg=err)
+ partitions += get_partitions(disk)
+
+ module.exit_json(changed=(changed_count > 0), ansible_facts={"partition_pool": partitions})
+
+# ignore pylint errors related to the module_utils import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+# import module snippets
+from ansible.module_utils.basic import *
+main()
+
diff --git a/roles/kube_nfs_volumes/meta/main.yml b/roles/kube_nfs_volumes/meta/main.yml
new file mode 100644
index 000000000..eb71a7a1f
--- /dev/null
+++ b/roles/kube_nfs_volumes/meta/main.yml
@@ -0,0 +1,16 @@
+---
+galaxy_info:
+ author: Jan Safranek
+ description: Partition disks and use them as Kubernetes NFS physical volumes.
+ company: Red Hat, Inc.
+ license: license (Apache)
+ min_ansible_version: 1.4
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ - name: Fedora
+ versions:
+ - all
+ categories:
+ - cloud
diff --git a/roles/kube_nfs_volumes/tasks/main.yml b/roles/kube_nfs_volumes/tasks/main.yml
new file mode 100644
index 000000000..f4a506234
--- /dev/null
+++ b/roles/kube_nfs_volumes/tasks/main.yml
@@ -0,0 +1,25 @@
+---
+- name: Install pyparted (RedHat/Fedora)
+ yum: name=pyparted,python-httplib2 state=present
+
+- name: partition the drives
+ partitionpool: disks={{ disks }} force={{ force }} sizes={{ sizes }}
+
+- name: create filesystem
+ filesystem: fstype=ext4 dev=/dev/{{ item.name }}
+ with_items: partition_pool
+
+- name: mount
+ mount: name={{mount_dir}}/{{ item.name }} src=/dev/{{ item.name }} state=mounted fstype=ext4 passno=2
+ with_items: partition_pool
+
+- include: nfs.yml
+
+- name: export physical volumes
+ uri: url={{ kubernetes_url }}/api/v1beta3/persistentvolumes
+ method=POST
+ body='{{ lookup("template", "../templates/nfs.json.j2") }}'
+ body_format=json
+ status_code=201
+ HEADER_Authorization="Bearer {{ kubernetes_token }}"
+ with_items: partition_pool
diff --git a/roles/kube_nfs_volumes/tasks/nfs.yml b/roles/kube_nfs_volumes/tasks/nfs.yml
new file mode 100644
index 000000000..559fcf17c
--- /dev/null
+++ b/roles/kube_nfs_volumes/tasks/nfs.yml
@@ -0,0 +1,16 @@
+---
+- name: Install NFS server on Fedora/Red Hat
+ yum: name=nfs-utils state=present
+
+- name: Start rpcbind on Fedora/Red Hat
+ service: name=rpcbind state=started enabled=yes
+
+- name: Start nfs on Fedora/Red Hat
+ service: name=nfs-server state=started enabled=yes
+
+- name: Export the directories
+ lineinfile: dest=/etc/exports
+ regexp="^{{ mount_dir }}/{{ item.name }} "
+ line="{{ mount_dir }}/{{ item.name }} {{nfs_export_options}}"
+ with_items: partition_pool
+ notify: restart nfs
diff --git a/roles/kube_nfs_volumes/templates/nfs.json.j2 b/roles/kube_nfs_volumes/templates/nfs.json.j2
new file mode 100644
index 000000000..b42886ef1
--- /dev/null
+++ b/roles/kube_nfs_volumes/templates/nfs.json.j2
@@ -0,0 +1,23 @@
+{
+ "kind": "PersistentVolume",
+ "apiVersion": "v1beta3",
+ "metadata": {
+ "name": "pv-{{ inventory_hostname | regex_replace("\.", "-") }}-{{ item.name }}",
+ "labels": {
+ "type": "nfs"
+ }
+ },
+ "spec": {
+ "capacity": {
+ "storage": "{{ item.size }}"
+ },
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "NFS": {
+ "Server": "{{ inventory_hostname }}",
+ "Path": "{{ mount_dir }}/{{ item.name }}",
+ "ReadOnly": false
+ }
+ }
+}
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
index c55677c3f..f76dd84ed 100644
--- a/roles/openshift_common/tasks/main.yml
+++ b/roles/openshift_common/tasks/main.yml
@@ -10,7 +10,9 @@
public_hostname: "{{ openshift_public_hostname | default(None) }}"
public_ip: "{{ openshift_public_ip | default(None) }}"
use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}"
+ sdn_network_plugin_name: "{{ os_sdn_network_plugin_name | default(None) }}"
deployment_type: "{{ openshift_deployment_type }}"
+
- name: Set hostname
hostname: name={{ openshift.common.hostname }}
diff --git a/roles/openshift_common/vars/main.yml b/roles/openshift_common/vars/main.yml
index 9f657a2c7..8e7d71154 100644
--- a/roles/openshift_common/vars/main.yml
+++ b/roles/openshift_common/vars/main.yml
@@ -6,6 +6,4 @@
# interfaces)
os_firewall_use_firewalld: False
-openshift_cert_parent_dir: /var/lib/openshift
-openshift_cert_relative_dir: openshift.local.certificates
-openshift_cert_dir: "{{ openshift_cert_parent_dir }}/{{ openshift_cert_relative_dir }}"
+openshift_data_dir: /var/lib/openshift
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 9c2657ff2..e4d3bf26f 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -1,10 +1,6 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
-# disable pylint checks
-# temporarily disabled until items can be addressed:
-# fixme - until all TODO comments have been addressed
-# pylint:disable=fixme
"""Ansible module for retrieving and setting openshift related facts"""
DOCUMENTATION = '''
@@ -19,6 +15,7 @@ EXAMPLES = '''
import ConfigParser
import copy
+import os
def hostname_valid(hostname):
@@ -166,7 +163,6 @@ def normalize_gce_facts(metadata, facts):
facts['network']['interfaces'].append(int_info)
_, _, zone = metadata['instance']['zone'].rpartition('/')
facts['zone'] = zone
- facts['external_id'] = metadata['instance']['id']
# Default to no sdn for GCE deployments
facts['use_openshift_sdn'] = False
@@ -215,7 +211,6 @@ def normalize_aws_facts(metadata, facts):
int_info['network_id'] = None
facts['network']['interfaces'].append(int_info)
facts['zone'] = metadata['placement']['availability-zone']
- facts['external_id'] = metadata['instance-id']
# TODO: actually attempt to determine default local and public ips
# by using the ansible default ip fact and the ipv4-associations
@@ -247,7 +242,7 @@ def normalize_openstack_facts(metadata, facts):
# metadata api, should be updated if neutron exposes this.
facts['zone'] = metadata['availability_zone']
- facts['external_id'] = metadata['uuid']
+
facts['network']['ip'] = metadata['ec2_compat']['local-ipv4']
facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4']
@@ -288,6 +283,72 @@ def normalize_provider_facts(provider, metadata):
facts = normalize_openstack_facts(metadata, facts)
return facts
+def set_registry_url_if_unset(facts):
+ """ Set registry_url fact if not already present in facts dict
+
+ Args:
+ facts (dict): existing facts
+ Returns:
+ dict: the facts dict updated with the generated identity providers
+ facts if they were not already present
+ """
+ for role in ('master', 'node'):
+ if role in facts:
+ deployment_type = facts['common']['deployment_type']
+ if 'registry_url' not in facts[role]:
+ registry_url = "openshift/origin-${component}:${version}"
+ if deployment_type == 'enterprise':
+ registry_url = "openshift3_beta/ose-${component}:${version}"
+ elif deployment_type == 'online':
+ registry_url = ("docker-registry.ops.rhcloud.com/"
+ "openshift3_beta/ose-${component}:${version}")
+ facts[role]['registry_url'] = registry_url
+
+ return facts
+
+def set_fluentd_facts_if_unset(facts):
+ """ Set fluentd facts if not already present in facts dict
+ dict: the facts dict updated with the generated fluentd facts if
+ missing
+ Args:
+ facts (dict): existing facts
+ Returns:
+ dict: the facts dict updated with the generated fluentd
+ facts if they were not already present
+
+ """
+ if 'common' in facts:
+ deployment_type = facts['common']['deployment_type']
+ if 'use_fluentd' not in facts['common']:
+ use_fluentd = True if deployment_type == 'online' else False
+ facts['common']['use_fluentd'] = use_fluentd
+ return facts
+
+def set_identity_providers_if_unset(facts):
+ """ Set identity_providers fact if not already present in facts dict
+
+ Args:
+ facts (dict): existing facts
+ Returns:
+ dict: the facts dict updated with the generated identity providers
+ facts if they were not already present
+ """
+ if 'master' in facts:
+ deployment_type = facts['common']['deployment_type']
+ if 'identity_providers' not in facts['master']:
+ identity_provider = dict(
+ name='allow_all', challenge=True, login=True,
+ kind='AllowAllPasswordIdentityProvider'
+ )
+ if deployment_type == 'enterprise':
+ identity_provider = dict(
+ name='deny_all', challenge=True, login=True,
+ kind='DenyAllPasswordIdentityProvider'
+ )
+
+ facts['master']['identity_providers'] = [identity_provider]
+
+ return facts
def set_url_facts_if_unset(facts):
""" Set url facts if not already present in facts dict
@@ -299,34 +360,77 @@ def set_url_facts_if_unset(facts):
were not already present
"""
if 'master' in facts:
- for (url_var, use_ssl, port, default) in [
- ('api_url',
- facts['master']['api_use_ssl'],
- facts['master']['api_port'],
- facts['common']['hostname']),
- ('public_api_url',
- facts['master']['api_use_ssl'],
- facts['master']['api_port'],
- facts['common']['public_hostname']),
- ('console_url',
- facts['master']['console_use_ssl'],
- facts['master']['console_port'],
- facts['common']['hostname']),
- ('public_console_url' 'console_use_ssl',
- facts['master']['console_use_ssl'],
- facts['master']['console_port'],
- facts['common']['public_hostname'])]:
- if url_var not in facts['master']:
- scheme = 'https' if use_ssl else 'http'
- netloc = default
- if ((scheme == 'https' and port != '443')
- or (scheme == 'http' and port != '80')):
- netloc = "%s:%s" % (netloc, port)
- facts['master'][url_var] = urlparse.urlunparse(
- (scheme, netloc, '', '', '', '')
- )
+ api_use_ssl = facts['master']['api_use_ssl']
+ api_port = facts['master']['api_port']
+ console_use_ssl = facts['master']['console_use_ssl']
+ console_port = facts['master']['console_port']
+ console_path = facts['master']['console_path']
+ etcd_use_ssl = facts['master']['etcd_use_ssl']
+ etcd_port = facts['master']['etcd_port'],
+ hostname = facts['common']['hostname']
+ public_hostname = facts['common']['public_hostname']
+
+ if 'etcd_urls' not in facts['master']:
+ facts['master']['etcd_urls'] = [format_url(etcd_use_ssl, hostname,
+ etcd_port)]
+ if 'api_url' not in facts['master']:
+ facts['master']['api_url'] = format_url(api_use_ssl, hostname,
+ api_port)
+ if 'public_api_url' not in facts['master']:
+ facts['master']['public_api_url'] = format_url(api_use_ssl,
+ public_hostname,
+ api_port)
+ if 'console_url' not in facts['master']:
+ facts['master']['console_url'] = format_url(console_use_ssl,
+ hostname,
+ console_port,
+ console_path)
+ if 'public_console_url' not in facts['master']:
+ facts['master']['public_console_url'] = format_url(console_use_ssl,
+ public_hostname,
+ console_port,
+ console_path)
+ return facts
+
+def set_sdn_facts_if_unset(facts):
+ """ Set sdn facts if not already present in facts dict
+
+ Args:
+ facts (dict): existing facts
+ Returns:
+ dict: the facts dict updated with the generated sdn facts if they
+ were not already present
+ """
+ if 'common' in facts:
+ if 'sdn_network_plugin_name' not in facts['common']:
+ use_sdn = facts['common']['use_openshift_sdn']
+ plugin = 'redhat/openshift-ovs-subnet' if use_sdn else ''
+ facts['common']['sdn_network_plugin_name'] = plugin
+
+ if 'master' in facts:
+ if 'sdn_cluster_network_cidr' not in facts['master']:
+ facts['master']['sdn_cluster_network_cidr'] = '10.1.0.0/16'
+ if 'sdn_host_subnet_length' not in facts['master']:
+ facts['master']['sdn_host_subnet_length'] = '8'
+
return facts
+def format_url(use_ssl, hostname, port, path=''):
+ """ Format url based on ssl flag, hostname, port and path
+
+ Args:
+ use_ssl (bool): is ssl enabled
+ hostname (str): hostname
+ port (str): port
+ path (str): url path
+ Returns:
+ str: The generated url string
+ """
+ scheme = 'https' if use_ssl else 'http'
+ netloc = hostname
+ if (use_ssl and port != '443') or (not use_ssl and port != '80'):
+ netloc += ":%s" % port
+ return urlparse.urlunparse((scheme, netloc, path, '', '', ''))
def get_current_config(facts):
""" Get current openshift config
@@ -390,7 +494,7 @@ def get_current_config(facts):
return current_config
-def apply_provider_facts(facts, provider_facts, roles):
+def apply_provider_facts(facts, provider_facts):
""" Apply provider facts to supplied facts dict
Args:
@@ -418,11 +522,6 @@ def apply_provider_facts(facts, provider_facts, roles):
facts['common'][ip_var]
)
- if 'node' in roles:
- ext_id = provider_facts.get('external_id')
- if ext_id:
- facts['node']['external_id'] = ext_id
-
facts['provider'] = provider_facts
return facts
@@ -556,10 +655,14 @@ class OpenShiftFacts(object):
defaults = self.get_defaults(roles)
provider_facts = self.init_provider_facts()
- facts = apply_provider_facts(defaults, provider_facts, roles)
+ facts = apply_provider_facts(defaults, provider_facts)
facts = merge_facts(facts, local_facts)
facts['current_config'] = get_current_config(facts)
facts = set_url_facts_if_unset(facts)
+ facts = set_fluentd_facts_if_unset(facts)
+ facts = set_identity_providers_if_unset(facts)
+ facts = set_registry_url_if_unset(facts)
+ facts = set_sdn_facts_if_unset(facts)
return dict(openshift=facts)
def get_defaults(self, roles):
@@ -573,31 +676,36 @@ class OpenShiftFacts(object):
"""
defaults = dict()
- common = dict(use_openshift_sdn=True)
ip_addr = self.system_facts['default_ipv4']['address']
- common['ip'] = ip_addr
- common['public_ip'] = ip_addr
-
exit_code, output, _ = module.run_command(['hostname', '-f'])
hostname_f = output.strip() if exit_code == 0 else ''
hostname_values = [hostname_f, self.system_facts['nodename'],
self.system_facts['fqdn']]
hostname = choose_hostname(hostname_values)
- common['hostname'] = hostname
- common['public_hostname'] = hostname
+ common = dict(use_openshift_sdn=True, ip=ip_addr, public_ip=ip_addr,
+ deployment_type='origin', hostname=hostname,
+ public_hostname=hostname)
+ common['client_binary'] = 'oc' if os.path.isfile('/usr/bin/oc') else 'osc'
+ common['admin_binary'] = 'oadm' if os.path.isfile('/usr/bin/oadm') else 'osadm'
defaults['common'] = common
if 'master' in roles:
master = dict(api_use_ssl=True, api_port='8443',
console_use_ssl=True, console_path='/console',
- console_port='8443', etcd_use_ssl=False,
- etcd_port='4001', portal_net='172.30.17.0/24')
+ console_port='8443', etcd_use_ssl=True,
+ etcd_port='4001', portal_net='172.30.0.0/16',
+ embedded_etcd=True, embedded_kube=True,
+ embedded_dns=True, dns_port='53',
+ bind_addr='0.0.0.0', session_max_seconds=3600,
+ session_name='ssn', session_secrets_file='',
+ access_token_max_seconds=86400,
+ auth_token_max_seconds=500,
+ oauth_grant_method='auto')
defaults['master'] = master
if 'node' in roles:
- node = dict(external_id=common['hostname'], pod_cidr='',
- labels={}, annotations={})
+ node = dict(pod_cidr='', labels={}, annotations={})
node['resources_cpu'] = self.system_facts['processor_cores']
node['resources_memory'] = int(
int(self.system_facts['memtotal_mb']) * 1024 * 1024 * 0.75
diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md
index 9f9d0a613..3178e318c 100644
--- a/roles/openshift_master/README.md
+++ b/roles/openshift_master/README.md
@@ -17,7 +17,7 @@ From this role:
|-------------------------------------|-----------------------|--------------------------------------------------|
| openshift_master_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-master |
| openshift_node_ips | [] | List of the openshift node ip addresses to pre-register when openshift-master starts up |
-| openshift_registry_url | UNDEF | Default docker registry to use |
+| oreg_url | UNDEF | Default docker registry to use |
| openshift_master_api_port | UNDEF | |
| openshift_master_console_port | UNDEF | |
| openshift_master_api_url | UNDEF | |
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index 56cf43531..11195e83e 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -11,6 +11,10 @@ os_firewall_allow:
port: 53/tcp
- service: OpenShift dns udp
port: 53/udp
+- service: Fluentd td-agent tcp
+ port: 24224/tcp
+- service: Fluentd td-agent udp
+ port: 24224/udp
os_firewall_deny:
- service: OpenShift api http
port: 8080/tcp
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index f9e6199a5..23f8b4649 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -1,10 +1,16 @@
---
-# TODO: actually have api_port, api_use_ssl, console_port, console_use_ssl,
-# etcd_use_ssl actually change the master config.
+# TODO: add validation for openshift_master_identity_providers
+# TODO: add ability to configure certificates given either a local file to
+# point to or certificate contents, set in default cert locations.
+
+- assert:
+ that:
+ - openshift_master_oauth_grant_method in openshift_master_valid_grant_methods
+ when: openshift_master_oauth_grant_method is defined
- name: Set master OpenShift facts
openshift_facts:
- role: 'master'
+ role: master
local_facts:
debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level) }}"
api_port: "{{ openshift_master_api_port | default(None) }}"
@@ -18,66 +24,104 @@
public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
etcd_port: "{{ openshift_master_etcd_port | default(None) }}"
etcd_use_ssl: "{{ openshift_master_etcd_use_ssl | default(None) }}"
+ etcd_urls: "{{ openshift_master_etcd_urls | default(None) }}"
+ embedded_etcd: "{{ openshift_master_embedded_etcd | default(None) }}"
+ embedded_kube: "{{ openshift_master_embedded_kube | default(None) }}"
+ embedded_dns: "{{ openshift_master_embedded_dns | default(None) }}"
+ dns_port: "{{ openshift_master_dns_port | default(None) }}"
+ bind_addr: "{{ openshift_master_bind_addr | default(None) }}"
portal_net: "{{ openshift_master_portal_net | default(None) }}"
+ session_max_seconds: "{{ openshift_master_session_max_seconds | default(None) }}"
+ session_name: "{{ openshift_master_session_name | default(None) }}"
+ session_secrets_file: "{{ openshift_master_session_secrets_file | default(None) }}"
+ access_token_max_seconds: "{{ openshift_master_access_token_max_seconds | default(None) }}"
+ auth_token_max_seconds: "{{ openshift_master_auth_token_max_seconds | default(None) }}"
+ identity_providers: "{{ openshift_master_identity_providers | default(None) }}"
+ registry_url: "{{ oreg_url | default(None) }}"
+ oauth_grant_method: "{{ openshift_master_oauth_grant_method | default(None) }}"
+ sdn_cluster_network_cidr: "{{ osm_cluster_network_cidr | default(None) }}"
+ sdn_host_subnet_length: "{{ osm_host_subnet_length | default(None) }}"
# TODO: These values need to be configurable
- name: Set dns OpenShift facts
openshift_facts:
- role: 'dns'
+ role: dns
local_facts:
ip: "{{ openshift.common.ip }}"
- domain: local
+ domain: cluster.local
+ when: openshift.master.embedded_dns
- name: Install OpenShift Master package
- yum: pkg=openshift-master state=installed
+ yum: pkg=openshift-master state=present
register: install_result
- name: Reload systemd units
command: systemctl daemon-reload
when: install_result | changed
-- name: Create certificate parent directory if it doesn't exist
- file:
- path: "{{ openshift_cert_parent_dir }}"
- state: directory
-
- name: Create config parent directory if it doesn't exist
file:
- path: "{{ openshift_master_config | dirname }}"
+ path: "{{ openshift_master_config_dir }}"
state: directory
-# TODO: should probably use a template lookup for this
-# TODO: should allow for setting --etcd, --kubernetes options
-# TODO: recreate config if values change
-- name: Use enterprise default for openshift_registry_url if not set
- set_fact:
- openshift_registry_url: "openshift3_beta/ose-${component}:${version}"
- when: openshift.common.deployment_type == 'enterprise' and openshift_registry_url is not defined
-
-- name: Use online default for openshift_registry_url if not set
- set_fact:
- openshift_registry_url: "docker-registry.ops.rhcloud.com/openshift3_beta/ose-${component}:${version}"
- when: openshift.common.deployment_type == 'online' and openshift_registry_url is not defined
+- name: Create the master certificates if they do not already exist
+ command: >
+ {{ openshift.common.admin_binary }} create-master-certs
+ --hostnames={{ openshift.common.hostname }},{{ openshift.common.public_hostname }}
+ --master={{ openshift.master.api_url }}
+ --public-master={{ openshift.master.public_api_url }}
+ --cert-dir={{ openshift_master_config_dir }} --overwrite=false
+ args:
+ creates: "{{ openshift_master_config_dir }}/master.server.key"
-- name: Create master config
+- name: Create the policy file if it does not already exist
command: >
- /usr/bin/openshift start master --write-config
- --config={{ openshift_master_config }}
- --portal-net={{ openshift.master.portal_net }}
- --master={{ openshift.master.api_url }}
- --public-master={{ openshift.master.public_api_url }}
- --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://0.0.0.0:{{ openshift.master.api_port }}
- {{ ('--images=' ~ openshift_registry_url) if (openshift_registry_url | default('', true) != '') else '' }}
- {{ ('--nodes=' ~ openshift_node_ips | join(',')) if (openshift_node_ips | default('', true) != '') else '' }}
+ {{ openshift.common.admin_binary }} create-bootstrap-policy-file
+ --filename={{ openshift_master_policy }}
args:
- chdir: "{{ openshift_cert_parent_dir }}"
- creates: "{{ openshift_master_config }}"
+ creates: "{{ openshift_master_policy }}"
+ notify:
+ - restart openshift-master
+
+- name: Create the scheduler config
+ template:
+ dest: "{{ openshift_master_scheduler_conf }}"
+ src: scheduler.json.j2
+ notify:
+ - restart openshift-master
+
+- name: Install httpd-tools if needed
+ yum: pkg=httpd-tools state=present
+ when: item.kind == 'HTPasswdPasswordIdentityProvider'
+ with_items: openshift.master.identity_providers
+
+- name: Create the htpasswd file if needed
+ copy:
+ dest: "{{ item.filename }}"
+ content: ""
+ mode: 0600
+ force: no
+ when: item.kind == 'HTPasswdPasswordIdentityProvider'
+ with_items: openshift.master.identity_providers
+
+# TODO: add the validate parameter when there is a validation command to run
+- name: Create master config
+ template:
+ dest: "{{ openshift_master_config_file }}"
+ src: master.yaml.v1.j2
+ notify:
+ - restart openshift-master
- name: Configure OpenShift settings
lineinfile:
dest: /etc/sysconfig/openshift-master
- regexp: '^OPTIONS='
- line: "OPTIONS=\"--config={{ openshift_master_config }} --loglevel={{ openshift.master.debug_level }}\""
+ regexp: "{{ item.regex }}"
+ line: "{{ item.line }}"
+ with_items:
+ - regex: '^OPTIONS='
+ line: "OPTIONS=--loglevel={{ openshift.master.debug_level }}"
+ - regex: '^CONFIG_FILE='
+ line: "CONFIG_FILE={{ openshift_master_config_file }}"
notify:
- restart openshift-master
@@ -97,15 +141,15 @@
# TODO: Update this file if the contents of the source file are not present in
# the dest file, will need to make sure to ignore things that could be added
-- name: Create the OpenShift client config(s)
- command: cp {{ openshift_cert_dir }}/openshift-client/.kubeconfig ~{{ item }}/.config/openshift/.config
+- name: Copy the OpenShift admin client config(s)
+ command: cp {{ openshift_master_config_dir }}/admin.kubeconfig ~{{ item }}/.config/openshift/.config
args:
creates: ~{{ item }}/.config/openshift/.config
with_items:
- root
- "{{ ansible_ssh_user }}"
-- name: Update the permissions on the OpenShift client config(s)
+- name: Update the permissions on the OpenShift admin client config(s)
file:
path: "~{{ item }}/.config/openshift/.config"
state: file
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
new file mode 100644
index 000000000..1c2d37b63
--- /dev/null
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -0,0 +1,98 @@
+apiVersion: v1
+assetConfig:
+ logoutURL: ""
+ masterPublicURL: {{ openshift.master.public_api_url }}
+ publicURL: {{ openshift.master.public_console_url }}/
+ servingInfo:
+ bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.console_port }}
+ certFile: master.server.crt
+ clientCA: ""
+ keyFile: master.server.key
+corsAllowedOrigins:
+{# TODO: add support for user specified corsAllowedOrigins #}
+{% for origin in ['127.0.0.1', 'localhost', openshift.common.hostname, openshift.common.ip, openshift.common.public_hostname, openshift.common.public_ip] %}
+ - {{ origin }}
+{% endfor %}
+{% if openshift.master.embedded_dns %}
+dnsConfig:
+ bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }}
+{% endif %}
+etcdClientInfo:
+ ca: ca.crt
+ certFile: master.etcd-client.crt
+ keyFile: master.etcd-client.key
+ urls:
+{% for etcd_url in openshift.master.etcd_urls %}
+ - {{ etcd_url }}
+{% endfor %}
+{% if openshift.master.embedded_etcd %}
+etcdConfig:
+ address: {{ openshift.common.hostname }}:{{ openshift.master.etcd_port }}
+ peerAddress: {{ openshift.common.hostname }}:7001
+ peerServingInfo:
+ bindAddress: {{ openshift.master.bind_addr }}:7001
+ certFile: etcd.server.crt
+ clientCA: ca.crt
+ keyFile: etcd.server.key
+ servingInfo:
+ bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.etcd_port }}
+ certFile: etcd.server.crt
+ clientCA: ca.crt
+ keyFile: etcd.server.key
+ storageDirectory: {{ openshift_data_dir }}/openshift.local.etcd
+{% endif %}
+etcdStorageConfig:
+ kubernetesStoragePrefix: kubernetes.io
+ kubernetesStorageVersion: v1beta3
+ kubernetesStoragePrefix: kubernetes.io
+ openShiftStorageVersion: v1beta3
+imageConfig:
+ format: {{ openshift.master.registry_url }}
+ latest: false
+kind: MasterConfig
+kubeletClientInfo:
+{# TODO: allow user specified kubelet port #}
+ ca: ca.crt
+ certFile: master.kubelet-client.crt
+ keyFile: master.kubelet-client.key
+ port: 10250
+{% if openshift.master.embedded_kube %}
+kubernetesMasterConfig:
+{# TODO: support overriding masterCount #}
+ masterCount: 1
+ masterIP: ""
+ schedulerConfigFile: {{ openshift_master_scheduler_conf }}
+ servicesSubnet: {{ openshift.master.portal_net }}
+ staticNodeNames: {{ openshift_node_ips | default([], true) }}
+{% endif %}
+masterClients:
+{# TODO: allow user to set externalKubernetesKubeConfig #}
+ deployerKubeConfig: openshift-deployer.kubeconfig
+ externalKubernetesKubeConfig: ""
+ openshiftLoopbackKubeConfig: openshift-client.kubeconfig
+masterPublicURL: {{ openshift.master.public_api_url }}
+networkConfig:
+ clusterNetworkCIDR: {{ openshift.master.sdn_cluster_network_cidr }}
+ hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }}
+ networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
+{% include 'v1_partials/oauthConfig.j2' %}
+policyConfig:
+ bootstrapPolicyFile: {{ openshift_master_policy }}
+ openshiftSharedResourcesNamespace: openshift
+{# TODO: Allow users to override projectConfig items #}
+projectConfig:
+ defaultNodeSelector: ""
+ projectRequestMessage: ""
+ projectRequestTemplate: ""
+serviceAccountConfig:
+ managedNames:
+ - default
+ - builder
+ privateKeyFile: serviceaccounts.private.key
+ publicKeyFiles:
+ - serviceaccounts.public.key
+servingInfo:
+ bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.api_port }}
+ certFile: master.server.crt
+ clientCA: ca.crt
+ keyFile: master.server.key
diff --git a/roles/openshift_master/templates/scheduler.json.j2 b/roles/openshift_master/templates/scheduler.json.j2
new file mode 100644
index 000000000..833e7f3e1
--- /dev/null
+++ b/roles/openshift_master/templates/scheduler.json.j2
@@ -0,0 +1,12 @@
+{
+ "predicates": [
+ {"name": "PodFitsResources"},
+ {"name": "PodFitsPorts"},
+ {"name": "NoDiskConflict"},
+ {"name": "Region", "argument": {"serviceAffinity" : {"labels" : ["region"]}}}
+ ],"priorities": [
+ {"name": "LeastRequestedPriority", "weight": 1},
+ {"name": "ServiceSpreadingPriority", "weight": 1},
+ {"name": "Zone", "weight" : 2, "argument": {"serviceAntiAffinity" : {"label": "zone"}}}
+ ]
+}
diff --git a/roles/openshift_master/templates/v1_partials/oauthConfig.j2 b/roles/openshift_master/templates/v1_partials/oauthConfig.j2
new file mode 100644
index 000000000..f6fd88c65
--- /dev/null
+++ b/roles/openshift_master/templates/v1_partials/oauthConfig.j2
@@ -0,0 +1,78 @@
+{% macro identity_provider_config(identity_provider) %}
+ apiVersion: v1
+ kind: {{ identity_provider.kind }}
+{% if identity_provider.kind == 'HTPasswdPasswordIdentityProvider' %}
+ file: {{ identity_provider.filename }}
+{% elif identity_provider.kind == 'BasicAuthPasswordIdentityProvider' %}
+ url: {{ identity_provider.url }}
+{% for key in ('ca', 'certFile', 'keyFile') %}
+{% if key in identity_provider %}
+ {{ key }}: {{ identity_provider[key] }}"
+{% endif %}
+{% endfor %}
+{% elif identity_provider.kind == 'RequestHeaderIdentityProvider' %}
+ headers: {{ identity_provider.headers }}
+{% if 'clientCA' in identity_provider %}
+ clientCA: {{ identity_provider.clientCA }}
+{% endif %}
+{% elif identity_provider.kind == 'GitHubIdentityProvider' %}
+ clientID: {{ identity_provider.clientID }}
+ clientSecret: {{ identity_provider.clientSecret }}
+{% elif identity_provider.kind == 'GoogleIdentityProvider' %}
+ clientID: {{ identity_provider.clientID }}
+ clientSecret: {{ identity_provider.clientSecret }}
+{% if 'hostedDomain' in identity_provider %}
+ hostedDomain: {{ identity_provider.hostedDomain }}
+{% endif %}
+{% elif identity_provider.kind == 'OpenIDIdentityProvider' %}
+ clientID: {{ identity_provider.clientID }}
+ clientSecret: {{ identity_provider.clientSecret }}
+ claims:
+ id: identity_provider.claims.id
+{% for claim_key in ('preferredUsername', 'name', 'email') %}
+{% if claim_key in identity_provider.claims %}
+ {{ claim_key }}: {{ identity_provider.claims[claim_key] }}
+{% endif %}
+{% endfor %}
+ urls:
+ authorize: {{ identity_provider.urls.authorize }}
+ token: {{ identity_provider.urls.token }}
+{% if 'userInfo' in identity_provider.urls %}
+ userInfo: {{ identity_provider.userInfo }}
+{% endif %}
+{% if 'extraScopes' in identity_provider %}
+ extraScopes:
+{% for scope in identity_provider.extraScopes %}
+ - {{ scope }}
+{% endfor %}
+{% endif %}
+{% if 'extraAuthorizeParameters' in identity_provider %}
+ extraAuthorizeParameters:
+{% for param_key, param_value in identity_provider.extraAuthorizeParameters.iteritems() %}
+ {{ param_key }}: {{ param_value }}
+{% endfor %}
+{% endif %}
+{% endif %}
+{% endmacro %}
+oauthConfig:
+ assetPublicURL: {{ openshift.master.public_console_url }}/
+ grantConfig:
+ method: {{ openshift.master.oauth_grant_method }}
+ identityProviders:
+{% for identity_provider in openshift.master.identity_providers %}
+ - name: {{ identity_provider.name }}
+ challenge: {{ identity_provider.challenge }}
+ login: {{ identity_provider.login }}
+ provider:
+{{ identity_provider_config(identity_provider) }}
+{%- endfor %}
+ masterPublicURL: {{ openshift.master.public_api_url }}
+ masterURL: {{ openshift.master.api_url }}
+ sessionConfig:
+ sessionMaxAgeSeconds: {{ openshift.master.session_max_seconds }}
+ sessionName: {{ openshift.master.session_name }}
+ sessionSecretsFile: {{ openshift.master.session_secrets_file }}
+ tokenConfig:
+ accessTokenMaxAgeSeconds: {{ openshift.master.access_token_max_seconds }}
+ authorizeTokenMaxAgeSeconds: {{ openshift.master.auth_token_max_seconds }}
+{# Comment to preserve newline after authorizeTokenMaxAgeSeconds #}
diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml
index c52d957ac..f6f69966a 100644
--- a/roles/openshift_master/vars/main.yml
+++ b/roles/openshift_master/vars/main.yml
@@ -1,5 +1,10 @@
---
-openshift_master_config: /etc/openshift/master.yaml
-openshift_master_ca_dir: "{{ openshift_cert_dir }}/ca"
-openshift_master_ca_cert: "{{ openshift_master_ca_dir }}/cert.crt"
-openshift_master_ca_key: "{{ openshift_master_ca_dir }}/key.key"
+openshift_master_config_dir: /etc/openshift/master
+openshift_master_config_file: "{{ openshift_master_config_dir }}/master-config.yaml"
+openshift_master_scheduler_conf: "{{ openshift_master_config_dir }}/scheduler.json"
+openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json"
+
+openshift_master_valid_grant_methods:
+- auto
+- prompt
+- deny
diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md
index 83359f164..c3c17b848 100644
--- a/roles/openshift_node/README.md
+++ b/roles/openshift_node/README.md
@@ -17,7 +17,7 @@ From this role:
| Name | Default value | |
|------------------------------------------|-----------------------|----------------------------------------|
| openshift_node_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-node |
-| openshift_registry_url | UNDEF (Optional) | Default docker registry to use |
+| oreg_url | UNDEF (Optional) | Default docker registry to use |
From openshift_common:
| Name | Default Value | |
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index df7ec41b6..be51195f2 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -2,3 +2,7 @@
os_firewall_allow:
- service: OpenShift kubelet
port: 10250/tcp
+- service: http
+ port: 80/tcp
+- service: https
+ port: 443/tcp
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index ca2992637..953a1421b 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -1,4 +1,3 @@
---
- name: restart openshift-node
service: name=openshift-node state=restarted
- when: not openshift.common.use_openshift_sdn|bool
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 3d56bdd67..c5202650f 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -1,50 +1,63 @@
---
# TODO: allow for overriding default ports where possible
-# TODO: trigger the external service when restart is needed
- name: Set node OpenShift facts
openshift_facts:
- role: 'node'
+ role: "{{ item.role }}"
+ local_facts: "{{ item.local_facts }}"
+ with_items:
+ - role: common
+ local_facts:
+ hostname: "{{ openshift_hostname | default(none) }}"
+ public_hostname: "{{ openshift_public_hostname | default(none) }}"
+ deployment_type: "{{ openshift_deployment_type }}"
+ - role: node
local_facts:
+ resources_cpu: "{{ openshift_node_resources_cpu | default(none) }}"
+ resources_memory: "{{ openshift_node_resources_memory | default(none) }}"
+ pod_cidr: "{{ openshift_node_pod_cidr | default(none) }}"
+ labels: "{{ openshift_node_labels | default(none) }}"
+ annotations: "{{ openshift_node_annotations | default(none) }}"
+ registry_url: "{{ oreg_url | default(none) }}"
debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}"
-- name: Test if node certs and config exist
- stat: path={{ item }}
- failed_when: not result.stat.exists
- register: result
- with_items:
- - "{{ openshift_node_cert_dir }}"
- - "{{ openshift_node_cert_dir }}/ca.crt"
- - "{{ openshift_node_cert_dir }}/client.crt"
- - "{{ openshift_node_cert_dir }}/client.key"
- - "{{ openshift_node_cert_dir }}/.kubeconfig"
- - "{{ openshift_node_cert_dir }}/node-config.yaml"
- - "{{ openshift_node_cert_dir }}/server.crt"
- - "{{ openshift_node_cert_dir }}/server.key"
-
- name: Install OpenShift Node package
- yum: pkg=openshift-node state=installed
- register: install_result
+ yum: pkg=openshift-node state=present
+ register: node_install_result
+
+- name: Install openshift-sdn-ovs
+ yum: pkg=openshift-sdn-ovs state=present
+ register: sdn_install_result
+ when: openshift.common.use_openshift_sdn
- name: Reload systemd units
command: systemctl daemon-reload
- when: install_result | changed
+ when: (node_install_result | changed or (openshift.common.use_openshift_sdn
+ and sdn_install_result | changed))
+
+# TODO: add the validate parameter when there is a validation command to run
+- name: Create the Node config
+ template:
+ dest: "{{ openshift_node_config_file }}"
+ src: node.yaml.v1.j2
+ notify:
+ - restart openshift-node
-# --create-certs=false is a temporary workaround until
-# https://github.com/openshift/origin/pull/1361 is merged upstream and it is
-# the default for nodes
- name: Configure OpenShift Node settings
lineinfile:
dest: /etc/sysconfig/openshift-node
- regexp: '^OPTIONS='
- line: "OPTIONS=\"--loglevel={{ openshift.node.debug_level }} --config={{ openshift_node_cert_dir }}/node-config.yaml\""
+ regexp: "{{ item.regex }}"
+ line: "{{ item.line }}"
+ with_items:
+ - regex: '^OPTIONS='
+ line: "OPTIONS=--loglevel={{ openshift.node.debug_level }}"
+ - regex: '^CONFIG_FILE='
+ line: "CONFIG_FILE={{ openshift_node_config_file }}"
notify:
- restart openshift-node
+- name: Allow NFS access for VMs
+ seboolean: name=virt_use_nfs state=yes persistent=yes
+
- name: Start and enable openshift-node
service: name=openshift-node enabled=yes state=started
- when: not openshift.common.use_openshift_sdn|bool
-
-- name: Disable openshift-node if openshift-node is managed externally
- service: name=openshift-node enabled=false
- when: openshift.common.use_openshift_sdn|bool
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
new file mode 100644
index 000000000..cab75cd49
--- /dev/null
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -0,0 +1,18 @@
+allowDisabledDocker: false
+apiVersion: v1
+dnsDomain: {{ hostvars[openshift_first_master].openshift.dns.domain }}
+dnsIP: {{ hostvars[openshift_first_master].openshift.dns.ip }}
+imageConfig:
+ format: {{ openshift.node.registry_url }}
+ latest: false
+kind: NodeConfig
+masterKubeConfig: node.kubeconfig
+networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
+nodeName: {{ openshift.common.hostname }}
+podManifestConfig: null
+servingInfo:
+ bindAddress: 0.0.0.0:10250
+ certFile: server.crt
+ clientCA: ca.crt
+ keyFile: server.key
+volumeDirectory: {{ openshift_data_dir }}/openshift.local.volumes
diff --git a/roles/openshift_node/vars/main.yml b/roles/openshift_node/vars/main.yml
index c6be83139..cf47f8354 100644
--- a/roles/openshift_node/vars/main.yml
+++ b/roles/openshift_node/vars/main.yml
@@ -1,2 +1,3 @@
---
-openshift_node_cert_dir: /etc/openshift/node
+openshift_node_config_dir: /etc/openshift/node
+openshift_node_config_file: "{{ openshift_node_config_dir }}/node-config.yaml"
diff --git a/roles/openshift_register_nodes/defaults/main.yml b/roles/openshift_register_nodes/defaults/main.yml
deleted file mode 100644
index a0befab44..000000000
--- a/roles/openshift_register_nodes/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-openshift_kube_api_version: v1beta1
diff --git a/roles/openshift_register_nodes/library/kubernetes_register_node.py b/roles/openshift_register_nodes/library/kubernetes_register_node.py
index afa9eb27d..a8c38627b 100755
--- a/roles/openshift_register_nodes/library/kubernetes_register_node.py
+++ b/roles/openshift_register_nodes/library/kubernetes_register_node.py
@@ -3,15 +3,13 @@
# vim: expandtab:tabstop=4:shiftwidth=4
#
# disable pylint checks
-# temporarily disabled until items can be addressed:
-# fixme - until all TODO comments have been addressed
# permanently disabled unless someone wants to refactor the object model:
# too-few-public-methods
# no-self-use
# too-many-arguments
# too-many-locals
# too-many-branches
-# pylint:disable=fixme, too-many-arguments, no-self-use
+# pylint:disable=too-many-arguments, no-self-use
# pylint:disable=too-many-locals, too-many-branches, too-few-public-methods
"""Ansible module to register a kubernetes node to the cluster"""
@@ -41,24 +39,6 @@ options:
- IP Address to associate with the node when registering.
Available in the following API versions: v1beta1.
required: false
- hostnames:
- default: []
- description:
- - Valid hostnames for this node. Available in the following API
- versions: v1beta3.
- required: false
- external_ips:
- default: []
- description:
- - External IP Addresses for this node. Available in the following API
- versions: v1beta3.
- required: false
- internal_ips:
- default: []
- description:
- - Internal IP Addresses for this node. Available in the following API
- versions: v1beta3.
- required: false
cpu:
default: null
description:
@@ -87,17 +67,6 @@ EXAMPLES = '''
hostIP: 192.168.1.1
cpu: 1
memory: 500000000
-
-# Node registration using the v1beta3 API, setting an alternate hostname,
-# internalIP, externalIP and assigning 3.5 CPU cores and 1 TiB of Memory
-- openshift_register_node:
- name: ose3.node.example.com
- api_version: v1beta3
- external_ips: ['192.168.1.5']
- internal_ips: ['10.0.0.5']
- hostnames: ['ose2.node.internal.local']
- cpu: 3.5
- memory: 1Ti
'''
@@ -313,57 +282,11 @@ class NodeSpec(object):
"""
return Util.remove_empty_elements(self.spec)
-class NodeStatus(object):
- """ Kubernetes Node Status
-
- Attributes:
- status (dict): A dictionary representing the node status
-
- Args:
- version (str): kubernetes api version
- externalIPs (list, optional): externalIPs for the node
- internalIPs (list, optional): internalIPs for the node
- hostnames (list, optional): hostnames for the node
- """
- def add_addresses(self, address_type, addresses):
- """ Adds addresses of the specified type
-
- Args:
- address_type (str): address type
- addresses (list): addresses to add
- """
- address_list = []
- for address in addresses:
- address_list.append(dict(type=address_type, address=address))
- return address_list
-
- def __init__(self, version, externalIPs=None, internalIPs=None,
- hostnames=None):
- if version == 'v1beta3':
- addresses = []
- if externalIPs is not None:
- addresses += self.add_addresses('ExternalIP', externalIPs)
- if internalIPs is not None:
- addresses += self.add_addresses('InternalIP', internalIPs)
- if hostnames is not None:
- addresses += self.add_addresses('Hostname', hostnames)
-
- self.status = dict(addresses=addresses)
-
- def get_status(self):
- """ Get the dict representing the node status
-
- Returns:
- dict: representation of the node status with any empty elements
- removed
- """
- return Util.remove_empty_elements(self.status)
-
class Node(object):
""" Kubernetes Node
Attributes:
- status (dict): A dictionary representing the node
+ node (dict): A dictionary representing the node
Args:
module (AnsibleModule):
@@ -371,9 +294,6 @@ class Node(object):
version (str, optional): kubernetes api version
node_name (str, optional): name for node
hostIP (str, optional): node host ip
- hostnames (list, optional): hostnames for the node
- externalIPs (list, optional): externalIPs for the node
- internalIPs (list, optional): internalIPs for the node
cpu (str, optional): cpu resources for the node
memory (str, optional): memory resources for the node
labels (list, optional): labels for the node
@@ -382,8 +302,7 @@ class Node(object):
externalID (str, optional): external id of the node
"""
def __init__(self, module, client_opts, version='v1beta1', node_name=None,
- hostIP=None, hostnames=None, externalIPs=None,
- internalIPs=None, cpu=None, memory=None, labels=None,
+ hostIP=None, cpu=None, memory=None, labels=None,
annotations=None, podCIDR=None, externalID=None):
self.module = module
self.client_opts = client_opts
@@ -405,9 +324,7 @@ class Node(object):
apiVersion=version,
metadata=metadata,
spec=NodeSpec(version, cpu, memory, podCIDR,
- externalID),
- status=NodeStatus(version, externalIPs,
- internalIPs, hostnames))
+ externalID))
def get_name(self):
""" Get the name for the node
@@ -418,7 +335,7 @@ class Node(object):
if self.node['apiVersion'] == 'v1beta1':
return self.node['id']
elif self.node['apiVersion'] == 'v1beta3':
- return self.node['name']
+ return self.node['metadata']['name']
def get_node(self):
""" Get the dict representing the node
@@ -432,7 +349,6 @@ class Node(object):
node['resources'] = self.node['resources'].get_resources()
elif self.node['apiVersion'] == 'v1beta3':
node['spec'] = self.node['spec'].get_spec()
- node['status'] = self.node['status'].get_status()
return Util.remove_empty_elements(node)
def exists(self):
@@ -473,52 +389,15 @@ class Node(object):
else:
return True
-def main():
- """ main """
- module = AnsibleModule(
- argument_spec=dict(
- name=dict(required=True, type='str'),
- host_ip=dict(type='str'),
- hostnames=dict(type='list', default=[]),
- external_ips=dict(type='list', default=[]),
- internal_ips=dict(type='list', default=[]),
- api_version=dict(type='str', default='v1beta1',
- choices=['v1beta1', 'v1beta3']),
- cpu=dict(type='str'),
- memory=dict(type='str'),
- # TODO: needs documented
- labels=dict(type='dict', default={}),
- # TODO: needs documented
- annotations=dict(type='dict', default={}),
- # TODO: needs documented
- pod_cidr=dict(type='str'),
- # TODO: needs documented
- external_id=dict(type='str'),
- # TODO: needs documented
- client_config=dict(type='str'),
- # TODO: needs documented
- client_cluster=dict(type='str', default='master'),
- # TODO: needs documented
- client_context=dict(type='str', default='default'),
- # TODO: needs documented
- client_namespace=dict(type='str', default='default'),
- # TODO: needs documented
- client_user=dict(type='str', default='system:openshift-client'),
- # TODO: needs documented
- kubectl_cmd=dict(type='list', default=['kubectl']),
- # TODO: needs documented
- kubeconfig_flag=dict(type='str'),
- # TODO: needs documented
- default_client_config=dict(type='str')
- ),
- mutually_exclusive=[
- ['host_ip', 'external_ips'],
- ['host_ip', 'internal_ips'],
- ['host_ip', 'hostnames'],
- ],
- supports_check_mode=True
- )
+def generate_client_opts(module):
+ """ Generates the client options
+ Args:
+ module(AnsibleModule)
+
+ Returns:
+ str: client options
+ """
client_config = '~/.kube/.kubeconfig'
if 'default_client_config' in module.params:
client_config = module.params['default_client_config']
@@ -533,8 +412,7 @@ def main():
kubeconfig_flag = '--kubeconfig'
if 'kubeconfig_flag' in module.params:
kubeconfig_flag = module.params['kubeconfig_flag']
- client_opts.append(kubeconfig_flag + '=' +
- os.path.expanduser(module.params['client_config']))
+ client_opts.append(kubeconfig_flag + '=' + os.path.expanduser(module.params['client_config']))
try:
config = ClientConfig(client_opts, module)
@@ -547,51 +425,85 @@ def main():
if client_context != config.current_context():
client_opts.append("--context=%s" % client_context)
else:
- module.fail_json(msg="Context %s not found in client config" %
- client_context)
+ module.fail_json(msg="Context %s not found in client config" % client_context)
client_user = module.params['client_user']
if config.has_user(client_user):
if client_user != config.get_user_for_context(client_context):
client_opts.append("--user=%s" % client_user)
else:
- module.fail_json(msg="User %s not found in client config" %
- client_user)
+ module.fail_json(msg="User %s not found in client config" % client_user)
client_cluster = module.params['client_cluster']
if config.has_cluster(client_cluster):
if client_cluster != config.get_cluster_for_context(client_context):
client_opts.append("--cluster=%s" % client_cluster)
else:
- module.fail_json(msg="Cluster %s not found in client config" %
- client_cluster)
+ module.fail_json(msg="Cluster %s not found in client config" % client_cluster)
client_namespace = module.params['client_namespace']
if client_namespace != config.get_namespace_for_context(client_context):
client_opts.append("--namespace=%s" % client_namespace)
- node = Node(module, client_opts, module.params['api_version'],
- module.params['name'], module.params['host_ip'],
- module.params['hostnames'], module.params['external_ips'],
- module.params['internal_ips'], module.params['cpu'],
- module.params['memory'], module.params['labels'],
- module.params['annotations'], module.params['pod_cidr'],
- module.params['external_id'])
+ return client_opts
+
+
+def main():
+ """ main """
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, type='str'),
+ host_ip=dict(type='str'),
+ api_version=dict(type='str', default='v1beta1',
+ choices=['v1beta1', 'v1beta3']),
+ cpu=dict(type='str'),
+ memory=dict(type='str'),
+ # TODO: needs documented
+ labels=dict(type='dict', default={}),
+ # TODO: needs documented
+ annotations=dict(type='dict', default={}),
+ # TODO: needs documented
+ pod_cidr=dict(type='str'),
+ # TODO: needs documented
+ client_config=dict(type='str'),
+ # TODO: needs documented
+ client_cluster=dict(type='str', default='master'),
+ # TODO: needs documented
+ client_context=dict(type='str', default='default'),
+ # TODO: needs documented
+ client_namespace=dict(type='str', default='default'),
+ # TODO: needs documented
+ client_user=dict(type='str', default='system:admin'),
+ # TODO: needs documented
+ kubectl_cmd=dict(type='list', default=['kubectl']),
+ # TODO: needs documented
+ kubeconfig_flag=dict(type='str'),
+ # TODO: needs documented
+ default_client_config=dict(type='str')
+ ),
+ supports_check_mode=True
+ )
+
+ labels = module.params['labels']
+ kube_hostname_label = 'kubernetes.io/hostname'
+ if kube_hostname_label not in labels:
+ labels[kube_hostname_label] = module.params['name']
+
+ node = Node(module, generate_client_opts(module),
+ module.params['api_version'], module.params['name'],
+ module.params['host_ip'], module.params['cpu'],
+ module.params['memory'], labels, module.params['annotations'],
+ module.params['pod_cidr'])
- # TODO: attempt to support changing node settings where possible and/or
- # modifying node resources
if node.exists():
module.exit_json(changed=False, node=node.get_node())
elif module.check_mode:
module.exit_json(changed=True, node=node.get_node())
+ elif node.create():
+ module.exit_json(changed=True, msg="Node created successfully",
+ node=node.get_node())
else:
- if node.create():
- module.exit_json(changed=True,
- msg="Node created successfully",
- node=node.get_node())
- else:
- module.fail_json(msg="Unknown error creating node",
- node=node.get_node())
+ module.fail_json(msg="Unknown error creating node", node=node.get_node())
# ignore pylint errors related to the module_utils import
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
diff --git a/roles/openshift_register_nodes/tasks/main.yml b/roles/openshift_register_nodes/tasks/main.yml
index d4d72d126..11097a7cf 100644
--- a/roles/openshift_register_nodes/tasks/main.yml
+++ b/roles/openshift_register_nodes/tasks/main.yml
@@ -1,45 +1,42 @@
---
-# TODO: support new create-config command to generate node certs and config
-# TODO: recreate master/node configs if settings that affect the configs
-# change (hostname, public_hostname, ip, public_ip, etc)
+- name: Create openshift_generated_configs_dir if it doesn't exist
+ file:
+ path: "{{ openshift_generated_configs_dir }}"
+ state: directory
-
-# TODO: use a template lookup here
-# TODO: create a failed_when condition
-- name: Use enterprise default for openshift_registry_url if not set
- set_fact:
- openshift_registry_url: "openshift3_beta/ose-${component}:${version}"
- when: openshift.common.deployment_type == 'enterprise' and openshift_registry_url is not defined
-
-- name: Use online default for openshift_registry_url if not set
- set_fact:
- openshift_registry_url: "docker-registry.ops.rhcloud.com/openshift3_beta/ose-${component}:${version}"
- when: openshift.common.deployment_type == 'online' and openshift_registry_url is not defined
-
-- name: Create node config
+- name: Generate the node client config
command: >
- /usr/bin/openshift admin create-node-config
- --node-dir={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}
- --node={{ item.openshift.common.hostname }}
- --hostnames={{ [item.openshift.common.hostname, item.openshift.common.public_hostname]|unique|join(",") }}
- --dns-domain={{ openshift.dns.domain }}
- --dns-ip={{ openshift.dns.ip }}
+ {{ openshift.common.admin_binary }} create-api-client-config
+ --certificate-authority={{ openshift_master_ca_cert }}
+ --client-dir={{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}
+ --groups=system:nodes
--master={{ openshift.master.api_url }}
+ --signer-cert={{ openshift_master_ca_cert }}
--signer-key={{ openshift_master_ca_key }}
+ --signer-serial={{ openshift_master_ca_serial }}
+ --user=system:node-{{ item.openshift.common.hostname }}
+ args:
+ chdir: "{{ openshift_generated_configs_dir }}"
+ creates: "{{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}"
+ with_items: nodes_needing_certs
+
+- name: Generate the node server certificate
+ delegate_to: "{{ openshift_first_master }}"
+ command: >
+ {{ openshift.common.admin_binary }} create-server-cert
+ --cert=server.crt --key=server.key --overwrite=true
+ --hostnames={{ [item.openshift.common.hostname, item.openshift.common.public_hostname]|unique|join(",") }}
--signer-cert={{ openshift_master_ca_cert }}
- --certificate-authority={{ openshift_master_ca_cert }}
- --signer-serial={{ openshift_master_ca_dir }}/serial.txt
- --node-client-certificate-authority={{ openshift_master_ca_cert }}
- {{ ('--images=' ~ openshift_registry_url) if openshift_registry_url is defined else '' }}
- --listen=https://0.0.0.0:10250
+ --signer-key={{ openshift_master_ca_key }}
+ --signer-serial={{ openshift_master_ca_serial }}
args:
- chdir: "{{ openshift_cert_parent_dir }}"
- creates: "{{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}"
- with_items: openshift_nodes
+ chdir: "{{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}"
+ creates: "{{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}/server.crt"
+ with_items: nodes_needing_certs
- name: Register unregistered nodes
kubernetes_register_node:
- kubectl_cmd: ['osc']
+ kubectl_cmd: "{{ [openshift.common.client_binary] }}"
default_client_config: '~/.config/openshift/.config'
name: "{{ item.openshift.common.hostname }}"
api_version: "{{ openshift_kube_api_version }}"
@@ -49,8 +46,5 @@
host_ip: "{{ item.openshift.common.ip }}"
labels: "{{ item.openshift.node.labels | default({}) }}"
annotations: "{{ item.openshift.node.annotations | default({}) }}"
- external_id: "{{ item.openshift.node.external_id }}"
- # TODO: support customizing other attributes such as: client_config,
- # client_cluster, client_context, client_user
with_items: openshift_nodes
register: register_result
diff --git a/roles/openshift_register_nodes/vars/main.yml b/roles/openshift_register_nodes/vars/main.yml
index bd497f08f..3801b8427 100644
--- a/roles/openshift_register_nodes/vars/main.yml
+++ b/roles/openshift_register_nodes/vars/main.yml
@@ -1,7 +1,8 @@
---
-openshift_cert_parent_dir: /var/lib/openshift
-openshift_cert_relative_dir: openshift.local.certificates
-openshift_cert_dir: "{{ openshift_cert_parent_dir }}/{{ openshift_cert_relative_dir }}"
-openshift_master_ca_dir: "{{ openshift_cert_dir }}/ca"
-openshift_master_ca_cert: "{{ openshift_master_ca_dir }}/cert.crt"
-openshift_master_ca_key: "{{ openshift_master_ca_dir }}/key.key"
+openshift_node_config_dir: /etc/openshift/node
+openshift_master_config_dir: /etc/openshift/master
+openshift_generated_configs_dir: /etc/openshift/generated-configs
+openshift_master_ca_cert: "{{ openshift_master_config_dir }}/ca.crt"
+openshift_master_ca_key: "{{ openshift_master_config_dir }}/ca.key"
+openshift_master_ca_serial: "{{ openshift_master_config_dir }}/ca.serial.txt"
+openshift_kube_api_version: v1beta3
diff --git a/roles/openshift_registry/README.md b/roles/openshift_registry/README.md
new file mode 100644
index 000000000..202c818b8
--- /dev/null
+++ b/roles/openshift_registry/README.md
@@ -0,0 +1,42 @@
+OpenShift Container Docker Registry
+===================================
+
+OpenShift Docker Registry service installation
+
+Requirements
+------------
+
+Running OpenShift cluster
+
+Role Variables
+--------------
+
+From this role:
+| Name | Default value | |
+|--------------------|-------------------------------------------------------|---------------------|
+| | | |
+
+From openshift_common:
+| Name | Default value | |
+|-----------------------|---------------|--------------------------------------|
+| openshift_debug_level | 0 | Global openshift debug log verbosity |
+
+
+Dependencies
+------------
+
+Example Playbook
+----------------
+
+TODO
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Red Hat openshift@redhat.com
+
diff --git a/roles/openshift_registry/handlers/main.yml b/roles/openshift_registry/handlers/main.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/roles/openshift_registry/handlers/main.yml
diff --git a/roles/openshift_sdn_node/meta/main.yml b/roles/openshift_registry/meta/main.yml
index ffe10f836..93b6797d1 100644
--- a/roles/openshift_sdn_node/meta/main.yml
+++ b/roles/openshift_registry/meta/main.yml
@@ -1,7 +1,7 @@
---
galaxy_info:
- author: Jason DeTiberus
- description: OpenShift SDN Node
+ author: OpenShift Red Hat
+ description: OpenShift Embedded Docker Registry
company: Red Hat, Inc.
license: Apache License, Version 2.0
min_ansible_version: 1.7
@@ -11,5 +11,3 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies:
-- { role: openshift_common }
diff --git a/roles/openshift_registry/tasks/main.yml b/roles/openshift_registry/tasks/main.yml
new file mode 100644
index 000000000..29387d7d5
--- /dev/null
+++ b/roles/openshift_registry/tasks/main.yml
@@ -0,0 +1,11 @@
+---
+- set_fact: _oreg_images="--images={{ oreg_url|quote }}"
+ when: oreg_url is defined
+
+- name: Deploy OpenShift Registry
+ command: >
+ {{ openshift.common.admin_binary }} registry
+ --create
+ --credentials={{ openshift_master_config_dir }}/openshift-registry.kubeconfig {{ _oreg_images|default() }}
+ register: _oreg_results
+ changed_when: "'service exists' not in _oreg_results.stdout"
diff --git a/roles/openshift_registry/vars/main.yml b/roles/openshift_registry/vars/main.yml
new file mode 100644
index 000000000..9fb501e85
--- /dev/null
+++ b/roles/openshift_registry/vars/main.yml
@@ -0,0 +1,3 @@
+---
+openshift_master_config_dir: /etc/openshift/master
+
diff --git a/roles/openshift_repos/files/online/repos/enterprise-v3.repo b/roles/openshift_repos/files/online/repos/enterprise-v3.repo
index d324c142a..69c480f0a 100644
--- a/roles/openshift_repos/files/online/repos/enterprise-v3.repo
+++ b/roles/openshift_repos/files/online/repos/enterprise-v3.repo
@@ -1,10 +1,10 @@
[enterprise-v3]
-name=OpenShift Enterprise Beta3
-baseurl=https://gce-mirror1.ops.rhcloud.com/libra/libra-7-ose-beta3/
- https://mirror.ops.rhcloud.com/libra/libra-7-ose-beta3/
+name=OpenShift Enterprise Beta4
+baseurl=https://mirror.ops.rhcloud.com/libra/libra-7-ose-beta4/
+ https://gce-mirror1.ops.rhcloud.com/libra/libra-7-ose-beta4/
enabled=1
gpgcheck=0
failovermethod=priority
sslverify=False
sslclientcert=/var/lib/yum/client-cert.pem
-sslclientkey=/var/lib/yum/client-key.pem \ No newline at end of file
+sslclientkey=/var/lib/yum/client-key.pem
diff --git a/roles/openshift_sdn_master/README.md b/roles/openshift_router/README.md
index d0dcf6d11..6d8ee25c6 100644
--- a/roles/openshift_sdn_master/README.md
+++ b/roles/openshift_router/README.md
@@ -1,20 +1,20 @@
-OpenShift SDN Master
-====================
+OpenShift Container Router
+==========================
-OpenShift SDN Master service installation
+OpenShift Router service installation
Requirements
------------
-A host with the openshift_master role applied
+Running OpenShift cluster
Role Variables
--------------
From this role:
-| Name | Default value | |
-|----------------------------------|-----------------------|--------------------------------------------------|
-| openshift_sdn_master_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-master |
+| Name | Default value | |
+|--------------------|-------------------------------------------------------|---------------------|
+| | | |
From openshift_common:
| Name | Default value | |
@@ -24,7 +24,6 @@ From openshift_common:
Dependencies
------------
-
Example Playbook
----------------
@@ -38,4 +37,5 @@ Apache License, Version 2.0
Author Information
------------------
-TODO
+Red Hat openshift@redhat.com
+
diff --git a/roles/openshift_router/handlers/main.yml b/roles/openshift_router/handlers/main.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/roles/openshift_router/handlers/main.yml
diff --git a/roles/openshift_sdn_master/meta/main.yml b/roles/openshift_router/meta/main.yml
index 5de32cc13..0471e5e14 100644
--- a/roles/openshift_sdn_master/meta/main.yml
+++ b/roles/openshift_router/meta/main.yml
@@ -1,7 +1,7 @@
---
galaxy_info:
- author: Jason DeTiberus
- description: OpenShift SDN Master
+ author: OpenShift Red Hat
+ description: OpenShift Embedded Router
company: Red Hat, Inc.
license: Apache License, Version 2.0
min_ansible_version: 1.7
@@ -11,5 +11,3 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies:
-- { role: openshift_common }
diff --git a/roles/openshift_router/tasks/main.yml b/roles/openshift_router/tasks/main.yml
new file mode 100644
index 000000000..929177262
--- /dev/null
+++ b/roles/openshift_router/tasks/main.yml
@@ -0,0 +1,11 @@
+---
+- set_fact: _ortr_images="--images={{ oreg_url|quote }}"
+ when: oreg_url is defined
+
+- name: Deploy OpenShift Router
+ command: >
+ {{ openshift.common.admin_binary }} router
+ --create
+ --credentials={{ openshift_master_config_dir }}/openshift-router.kubeconfig {{ _ortr_images|default() }}
+ register: _ortr_results
+ changed_when: "'service exists' not in _ortr_results.stdout"
diff --git a/roles/openshift_router/vars/main.yml b/roles/openshift_router/vars/main.yml
new file mode 100644
index 000000000..9fb501e85
--- /dev/null
+++ b/roles/openshift_router/vars/main.yml
@@ -0,0 +1,3 @@
+---
+openshift_master_config_dir: /etc/openshift/master
+
diff --git a/roles/openshift_sdn_master/handlers/main.yml b/roles/openshift_sdn_master/handlers/main.yml
deleted file mode 100644
index cd645f2c5..000000000
--- a/roles/openshift_sdn_master/handlers/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- name: restart openshift-sdn-master
- service: name=openshift-sdn-master state=restarted
diff --git a/roles/openshift_sdn_master/tasks/main.yml b/roles/openshift_sdn_master/tasks/main.yml
deleted file mode 100644
index 77e7a80ba..000000000
--- a/roles/openshift_sdn_master/tasks/main.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-# TODO: add task to set the sdn subnet if openshift-sdn-master hasn't been
-# started yet
-
-- name: Set master sdn OpenShift facts
- openshift_facts:
- role: 'master_sdn'
- local_facts:
- debug_level: "{{ openshift_master_sdn_debug_level | default(openshift.common.debug_level) }}"
-
-- name: Install openshift-sdn-master
- yum:
- pkg: openshift-sdn-master
- state: installed
- register: install_result
-
-- name: Reload systemd units
- command: systemctl daemon-reload
- when: install_result | changed
-
-# TODO: we should probably generate certs specifically for sdn
-- name: Configure openshift-sdn-master settings
- lineinfile:
- dest: /etc/sysconfig/openshift-sdn-master
- regexp: '^OPTIONS='
- line: "OPTIONS=\"-v={{ openshift.master_sdn.debug_level }} -etcd-endpoints={{ openshift_sdn_master_url}}
- -etcd-cafile={{ openshift_cert_dir }}/ca/ca.crt
- -etcd-certfile={{ openshift_cert_dir }}/openshift-client/cert.crt
- -etcd-keyfile={{ openshift_cert_dir }}/openshift-client/key.key\""
- notify:
- - restart openshift-sdn-master
-
-- name: Enable openshift-sdn-master
- service:
- name: openshift-sdn-master
- enabled: yes
- state: started
diff --git a/roles/openshift_sdn_node/README.md b/roles/openshift_sdn_node/README.md
deleted file mode 100644
index e6b6a9503..000000000
--- a/roles/openshift_sdn_node/README.md
+++ /dev/null
@@ -1,44 +0,0 @@
-OpenShift SDN Node
-==================
-
-OpenShift SDN Node service installation
-
-Requirements
-------------
-
-A host with the openshift_node role applied
-
-Role Variables
---------------
-
-From this role:
-| Name | Default value | |
-|--------------------------------|-----------------------|--------------------------------------------------|
-| openshift_sdn_node_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-master |
-
-
-From openshift_common:
-| Name | Default value | |
-|-------------------------------|---------------------|----------------------------------------|
-| openshift_debug_level | 0 | Global openshift debug log verbosity |
-| openshift_public_ip | UNDEF (Required) | Public IP address to use for this host |
-| openshift_hostname | UNDEF (Required) | hostname to use for this instance |
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-
-TODO
-
-License
--------
-
-Apache License, Version 2.0
-
-Author Information
-------------------
-
-TODO
diff --git a/roles/openshift_sdn_node/handlers/main.yml b/roles/openshift_sdn_node/handlers/main.yml
deleted file mode 100644
index 402d82149..000000000
--- a/roles/openshift_sdn_node/handlers/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- name: restart openshift-sdn-node
- service: name=openshift-sdn-node state=restarted
diff --git a/roles/openshift_sdn_node/tasks/main.yml b/roles/openshift_sdn_node/tasks/main.yml
deleted file mode 100644
index 37a30d019..000000000
--- a/roles/openshift_sdn_node/tasks/main.yml
+++ /dev/null
@@ -1,60 +0,0 @@
----
-- name: Set node sdn OpenShift facts
- openshift_facts:
- role: 'node_sdn'
- local_facts:
- debug_level: "{{ openshift_node_sdn_debug_level | default(openshift.common.debug_level) }}"
-
-- name: Install openshift-sdn-node
- yum:
- pkg: openshift-sdn-node
- state: installed
- register: install_result
-
-- name: Reload systemd units
- command: systemctl daemon-reload
- when: install_result | changed
-
-# TODO: we are specifying -hostname= for OPTIONS as a workaround for
-# openshift-sdn-node not properly detecting the hostname.
-# TODO: we should probably generate certs specifically for sdn
-- name: Configure openshift-sdn-node settings
- lineinfile:
- dest: /etc/sysconfig/openshift-sdn-node
- regexp: "{{ item.regex }}"
- line: "{{ item.line }}"
- backrefs: yes
- with_items:
- - regex: '^(OPTIONS=)'
- line: '\1"-v={{ openshift.node_sdn.debug_level }} -hostname={{ openshift.common.hostname }}
- -etcd-cafile={{ openshift_node_cert_dir }}/ca.crt
- -etcd-certfile={{ openshift_node_cert_dir }}/client.crt
- -etcd-keyfile={{ openshift_node_cert_dir }}/client.key\"'
- - regex: '^(MASTER_URL=)'
- line: '\1"{{ openshift_sdn_master_url }}"'
- - regex: '^(MINION_IP=)'
- line: '\1"{{ openshift.common.ip }}"'
- notify: restart openshift-sdn-node
-
-- name: Ensure we aren't setting DOCKER_OPTIONS in /etc/sysconfig/openshift-sdn-node
- lineinfile:
- dest: /etc/sysconfig/openshift-sdn-node
- regexp: '^DOCKER_OPTIONS='
- state: absent
- notify: restart openshift-sdn-node
-
-# TODO lock down the insecure-registry config to a more sane value than
-# 0.0.0.0/0
-- name: Configure docker insecure-registry setting
- lineinfile:
- dest: /etc/sysconfig/docker
- regexp: INSECURE_REGISTRY=
- line: INSECURE_REGISTRY='--insecure-registry=0.0.0.0/0'
- notify: restart openshift-sdn-node
-
-
-- name: Start and enable openshift-sdn-node
- service:
- name: openshift-sdn-node
- enabled: yes
- state: started
diff --git a/roles/os_zabbix/library/zbxapi.py b/roles/os_zabbix/library/zbxapi.py
index f4f52909b..b5fa5ee2b 100755
--- a/roles/os_zabbix/library/zbxapi.py
+++ b/roles/os_zabbix/library/zbxapi.py
@@ -1,4 +1,8 @@
#!/usr/bin/env python
+# vim: expandtab:tabstop=4:shiftwidth=4
+'''
+ ZabbixAPI ansible module
+'''
# Copyright 2015 Red Hat Inc.
#
@@ -17,11 +21,22 @@
# Purpose: An ansible module to communicate with zabbix.
#
+# pylint: disable=line-too-long
+# Disabling line length for readability
+
import json
import httplib2
import sys
import os
import re
+import copy
+
+class ZabbixAPIError(Exception):
+ '''
+ ZabbixAPIError
+ Exists to propagate errors up from the api
+ '''
+ pass
class ZabbixAPI(object):
'''
@@ -69,23 +84,26 @@ class ZabbixAPI(object):
'Usermedia': ['get'],
}
- def __init__(self, data={}):
- self.server = data['server'] or None
- self.username = data['user'] or None
- self.password = data['password'] or None
- if any(map(lambda value: value == None, [self.server, self.username, self.password])):
+ def __init__(self, data=None):
+ if not data:
+ data = {}
+ self.server = data.get('server', None)
+ self.username = data.get('user', None)
+ self.password = data.get('password', None)
+ if any([value == None for value in [self.server, self.username, self.password]]):
print 'Please specify zabbix server url, username, and password.'
sys.exit(1)
- self.verbose = data.has_key('verbose')
+ self.verbose = data.get('verbose', False)
self.use_ssl = data.has_key('use_ssl')
self.auth = None
- for class_name, method_names in self.classes.items():
- #obj = getattr(self, class_name)(self)
- #obj.__dict__
- setattr(self, class_name.lower(), getattr(self, class_name)(self))
+ for cname, _ in self.classes.items():
+ setattr(self, cname.lower(), getattr(self, cname)(self))
+ # pylint: disable=no-member
+ # This method does not exist until the metaprogramming executed
+ # This is permanently disabled.
results = self.user.login(user=self.username, password=self.password)
if results[0]['status'] == '200':
@@ -98,48 +116,40 @@ class ZabbixAPI(object):
print "Error in call to zabbix. Http status: {0}.".format(results[0]['status'])
sys.exit(1)
- def perform(self, method, params):
+ def perform(self, method, rpc_params):
'''
This method calls your zabbix server.
It requires the following parameters in order for a proper request to be processed:
-
- jsonrpc - the version of the JSON-RPC protocol used by the API; the Zabbix API implements JSON-RPC version 2.0;
+ jsonrpc - the version of the JSON-RPC protocol used by the API;
+ the Zabbix API implements JSON-RPC version 2.0;
method - the API method being called;
- params - parameters that will be passed to the API method;
+ rpc_params - parameters that will be passed to the API method;
id - an arbitrary identifier of the request;
auth - a user authentication token; since we don't have one yet, it's set to null.
'''
http_method = "POST"
- if params.has_key("http_method"):
- http_method = params['http_method']
-
jsonrpc = "2.0"
- if params.has_key('jsonrpc'):
- jsonrpc = params['jsonrpc']
-
rid = 1
- if params.has_key('id'):
- rid = params['id']
http = None
if self.use_ssl:
http = httplib2.Http()
else:
- http = httplib2.Http( disable_ssl_certificate_validation=True,)
+ http = httplib2.Http(disable_ssl_certificate_validation=True,)
- headers = params.get('headers', {})
+ headers = {}
headers["Content-type"] = "application/json"
body = {
"jsonrpc": jsonrpc,
"method": method,
- "params": params,
+ "params": rpc_params.get('params', {}),
"id": rid,
'auth': self.auth,
}
- if method in ['user.login','api.version']:
+ if method in ['user.login', 'api.version']:
del body['auth']
body = json.dumps(body)
@@ -150,48 +160,70 @@ class ZabbixAPI(object):
print headers
httplib2.debuglevel = 1
- response, results = http.request(self.server, http_method, body, headers)
+ response, content = http.request(self.server, http_method, body, headers)
+
+ if response['status'] not in ['200', '201']:
+ raise ZabbixAPIError('Error calling zabbix. Zabbix returned %s' % response['status'])
if self.verbose:
print response
- print results
+ print content
try:
- results = json.loads(results)
- except ValueError as e:
- results = {"error": e.message}
+ content = json.loads(content)
+ except ValueError as err:
+ content = {"error": err.message}
- return response, results
+ return response, content
- '''
- This bit of metaprogramming is where the ZabbixAPI subclasses are created.
- For each of ZabbixAPI.classes we create a class from the key and methods
- from the ZabbixAPI.classes values. We pass a reference to ZabbixAPI class
- to each subclass in order for each to be able to call the perform method.
- '''
@staticmethod
- def meta(class_name, method_names):
- # This meta method allows a class to add methods to it.
- def meta_method(Class, method_name):
+ def meta(cname, method_names):
+ '''
+ This bit of metaprogramming is where the ZabbixAPI subclasses are created.
+ For each of ZabbixAPI.classes we create a class from the key and methods
+ from the ZabbixAPI.classes values. We pass a reference to ZabbixAPI class
+ to each subclass in order for each to be able to call the perform method.
+ '''
+ def meta_method(_class, method_name):
+ '''
+ This meta method allows a class to add methods to it.
+ '''
# This template method is a stub method for each of the subclass
# methods.
- def template_method(self, **params):
- return self.parent.perform(class_name.lower()+"."+method_name, params)
- template_method.__doc__ = "https://www.zabbix.com/documentation/2.4/manual/api/reference/%s/%s" % (class_name.lower(), method_name)
+ def template_method(self, params=None, **rpc_params):
+ '''
+ This template method is a stub method for each of the subclass methods.
+ '''
+ if params:
+ rpc_params['params'] = params
+ else:
+ rpc_params['params'] = copy.deepcopy(rpc_params)
+
+ return self.parent.perform(cname.lower()+"."+method_name, rpc_params)
+
+ template_method.__doc__ = \
+ "https://www.zabbix.com/documentation/2.4/manual/api/reference/%s/%s" % \
+ (cname.lower(), method_name)
template_method.__name__ = method_name
# this is where the template method is placed inside of the subclass
# e.g. setattr(User, "create", stub_method)
- setattr(Class, template_method.__name__, template_method)
+ setattr(_class, template_method.__name__, template_method)
# This class call instantiates a subclass. e.g. User
- Class=type(class_name, (object,), { '__doc__': "https://www.zabbix.com/documentation/2.4/manual/api/reference/%s" % class_name.lower() })
- # This init method gets placed inside of the Class
- # to allow it to be instantiated. A reference to the parent class(ZabbixAPI)
- # is passed in to allow each class access to the perform method.
+ _class = type(cname,
+ (object,),
+ {'__doc__': \
+ "https://www.zabbix.com/documentation/2.4/manual/api/reference/%s" % cname.lower()})
def __init__(self, parent):
+ '''
+ This init method gets placed inside of the _class
+ to allow it to be instantiated. A reference to the parent class(ZabbixAPI)
+ is passed in to allow each class access to the perform method.
+ '''
self.parent = parent
+
# This attaches the init to the subclass. e.g. Create
- setattr(Class, __init__.__name__, __init__)
+ setattr(_class, __init__.__name__, __init__)
# For each of our ZabbixAPI.classes dict values
# Create a method and attach it to our subclass.
# e.g. 'User': ['delete', 'get', 'updatemedia', 'updateprofile',
@@ -200,25 +232,54 @@ class ZabbixAPI(object):
# User.delete
# User.get
for method_name in method_names:
- meta_method(Class, method_name)
+ meta_method(_class, method_name)
# Return our subclass with all methods attached
- return Class
+ return _class
# Attach all ZabbixAPI.classes to ZabbixAPI class through metaprogramming
-for class_name, method_names in ZabbixAPI.classes.items():
- setattr(ZabbixAPI, class_name, ZabbixAPI.meta(class_name, method_names))
+for _class_name, _method_names in ZabbixAPI.classes.items():
+ setattr(ZabbixAPI, _class_name, ZabbixAPI.meta(_class_name, _method_names))
+
+def exists(content, key='result'):
+ ''' Check if key exists in content or the size of content[key] > 0
+ '''
+ if not content.has_key(key):
+ return False
+
+ if not content[key]:
+ return False
+
+ return True
+
+def diff_content(from_zabbix, from_user):
+ ''' Compare passed in object to results returned from zabbix
+ '''
+ terms = ['search', 'output', 'groups', 'select', 'expand']
+ regex = '(' + '|'.join(terms) + ')'
+ retval = {}
+ for key, value in from_user.items():
+ if re.findall(regex, key):
+ continue
+
+ if from_zabbix[key] != str(value):
+ retval[key] = str(value)
+
+ return retval
def main():
+ '''
+ This main method runs the ZabbixAPI Ansible Module
+ '''
module = AnsibleModule(
- argument_spec = dict(
+ argument_spec=dict(
server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
user=dict(default=None, type='str'),
password=dict(default=None, type='str'),
zbx_class=dict(choices=ZabbixAPI.classes.keys()),
- action=dict(default=None, type='str'),
params=dict(),
debug=dict(default=False, type='bool'),
+ state=dict(default='present', type='str'),
),
#supports_check_mode=True
)
@@ -227,47 +288,83 @@ def main():
if not user:
user = os.environ['ZABBIX_USER']
- pw = module.params.get('password', None)
- if not pw:
- pw = os.environ['ZABBIX_PASSWORD']
+ passwd = module.params.get('password', None)
+ if not passwd:
+ passwd = os.environ['ZABBIX_PASSWORD']
- server = module.params['server']
- if module.params['debug']:
- options['debug'] = True
api_data = {
'user': user,
- 'password': pw,
- 'server': server,
+ 'password': passwd,
+ 'server': module.params['server'],
+ 'verbose': module.params['debug']
}
- if not user or not pw or not server:
- module.fail_json('Please specify the user, password, and the zabbix server.')
+ if not user or not passwd or not module.params['server']:
+ module.fail_json(msg='Please specify the user, password, and the zabbix server.')
zapi = ZabbixAPI(api_data)
zbx_class = module.params.get('zbx_class')
- action = module.params.get('action')
- params = module.params.get('params', {})
-
+ rpc_params = module.params.get('params', {})
+ state = module.params.get('state')
# Get the instance we are trying to call
zbx_class_inst = zapi.__getattribute__(zbx_class.lower())
- # Get the instance's method we are trying to call
- zbx_action_method = zapi.__getattribute__(zbx_class.capitalize()).__dict__[action]
- # Make the call with the incoming params
- results = zbx_action_method(zbx_class_inst, **params)
-
- # Results Section
- changed_state = False
- status = results[0]['status']
- if status not in ['200', '201']:
- #changed_state = False
- module.fail_json(msg="Http response: [%s] - Error: %s" % (str(results[0]), results[1]))
- module.exit_json(**{'results': results[1]['result']})
+ # perform get
+ # Get the instance's method we are trying to call
+ zbx_action_method = zapi.__getattribute__(zbx_class.capitalize()).__dict__['get']
+ _, content = zbx_action_method(zbx_class_inst, rpc_params)
+
+ if state == 'list':
+ module.exit_json(changed=False, results=content['result'], state="list")
+
+ if state == 'absent':
+ if not exists(content):
+ module.exit_json(changed=False, state="absent")
+ # If we are coming from a query, we need to pass in the correct rpc_params for delete.
+ # specifically the zabbix class name + 'id'
+ # if rpc_params is a list then we need to pass it. (list of ids to delete)
+ idname = zbx_class.lower() + "id"
+ if not isinstance(rpc_params, list) and content['result'][0].has_key(idname):
+ rpc_params = [content['result'][0][idname]]
+
+ zbx_action_method = zapi.__getattribute__(zbx_class.capitalize()).__dict__['delete']
+ _, content = zbx_action_method(zbx_class_inst, rpc_params)
+ module.exit_json(changed=True, results=content['result'], state="absent")
+
+ if state == 'present':
+ # It's not there, create it!
+ if not exists(content):
+ zbx_action_method = zapi.__getattribute__(zbx_class.capitalize()).__dict__['create']
+ _, content = zbx_action_method(zbx_class_inst, rpc_params)
+ module.exit_json(changed=True, results=content['result'], state='present')
+
+ # It's there and the same, do nothing!
+ diff_params = diff_content(content['result'][0], rpc_params)
+ if not diff_params:
+ module.exit_json(changed=False, results=content['result'], state="present")
+
+ # Add the id to update with
+ idname = zbx_class.lower() + "id"
+ diff_params[idname] = content['result'][0][idname]
+
+
+ ## It's there and not the same, update it!
+ zbx_action_method = zapi.__getattribute__(zbx_class.capitalize()).__dict__['update']
+ _, content = zbx_action_method(zbx_class_inst, diff_params)
+ module.exit_json(changed=True, results=content, state="present")
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
from ansible.module_utils.basic import *
main()
diff --git a/roles/pods/meta/main.yml b/roles/pods/meta/main.yml
index c5c362c60..bddf14bb2 100644
--- a/roles/pods/meta/main.yml
+++ b/roles/pods/meta/main.yml
@@ -1,7 +1,7 @@
---
galaxy_info:
author: your name
- description:
+ description:
company: your company (optional)
# Some suggested licenses:
# - BSD (default)
@@ -14,7 +14,7 @@ galaxy_info:
min_ansible_version: 1.2
#
# Below are all platforms currently available. Just uncomment
- # the ones that apply to your role. If you don't see your
+ # the ones that apply to your role. If you don't see your
# platform on this list, let us know and we'll get it added!
#
#platforms:
@@ -121,4 +121,4 @@ dependencies: []
# dependencies available via galaxy should be listed here.
# Be sure to remove the '[]' above if you add dependencies
# to this list.
-
+