summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/docker/tasks/main.yml2
-rw-r--r--roles/lib_openshift/library/oc_edit.py1377
-rw-r--r--roles/lib_openshift/library/oc_obj.py1444
-rw-r--r--roles/lib_openshift/library/oc_route.py1614
-rw-r--r--roles/lib_openshift/library/oc_version.py1232
-rw-r--r--roles/lib_openshift/src/ansible/oc_edit.py48
-rw-r--r--roles/lib_openshift/src/ansible/oc_obj.py37
-rw-r--r--roles/lib_openshift/src/ansible/oc_route.py84
-rw-r--r--roles/lib_openshift/src/ansible/oc_version.py26
-rw-r--r--roles/lib_openshift/src/class/oc_edit.py94
-rw-r--r--roles/lib_openshift/src/class/oc_obj.py193
-rw-r--r--roles/lib_openshift/src/class/oc_route.py170
-rw-r--r--roles/lib_openshift/src/class/oc_version.py47
-rw-r--r--roles/lib_openshift/src/doc/edit116
-rw-r--r--roles/lib_openshift/src/doc/generated10
-rw-r--r--roles/lib_openshift/src/doc/license16
-rw-r--r--roles/lib_openshift/src/doc/obj95
-rw-r--r--roles/lib_openshift/src/doc/route120
-rw-r--r--roles/lib_openshift/src/doc/version40
-rwxr-xr-xroles/lib_openshift/src/generate.py75
-rw-r--r--roles/lib_openshift/src/lib/base.py522
-rw-r--r--roles/lib_openshift/src/lib/import.py17
-rw-r--r--roles/lib_openshift/src/lib/route.py123
-rw-r--r--roles/lib_openshift/src/sources.yml38
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_route.yml77
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_version.yml17
-rwxr-xr-xroles/lib_openshift/src/test/unit/oc_version.py70
-rw-r--r--roles/lib_utils/library/yedit.py114
-rw-r--r--roles/lib_utils/src/ansible/yedit.py44
-rw-r--r--roles/lib_utils/src/class/yedit.py64
-rw-r--r--roles/lib_utils/src/doc/generated9
-rw-r--r--roles/lib_utils/src/doc/yedit6
-rwxr-xr-xroles/lib_utils/src/generate.py82
-rw-r--r--roles/lib_utils/src/sources.yml (renamed from roles/lib_utils/src/generate_sources.yml)1
-rw-r--r--roles/lib_utils/src/test/integration/kube-manager-test.yaml.orig52
-rw-r--r--roles/openshift_builddefaults/vars/main.yml1
-rw-r--r--roles/openshift_buildoverrides/tasks/main.yml9
-rw-r--r--roles/openshift_buildoverrides/vars/main.yml3
-rw-r--r--roles/openshift_ca/tasks/main.yml2
-rw-r--r--roles/openshift_certificate_expiry/README.md312
-rw-r--r--roles/openshift_certificate_expiry/examples/cert-expiry-report-html.pngbin0 -> 189466 bytes
-rw-r--r--roles/openshift_certificate_expiry/examples/cert-expiry-report.html396
-rw-r--r--roles/openshift_certificate_expiry/examples/cert-expiry-report.json178
-rw-r--r--roles/openshift_certificate_expiry/examples/playbooks/default.yaml10
-rw-r--r--roles/openshift_certificate_expiry/examples/playbooks/easy-mode.yaml21
-rw-r--r--roles/openshift_certificate_expiry/examples/playbooks/html_and_json_default_paths.yaml12
-rw-r--r--roles/openshift_certificate_expiry/examples/playbooks/longer-warning-period-json-results.yaml13
-rw-r--r--roles/openshift_certificate_expiry/examples/playbooks/longer_warning_period.yaml12
-rw-r--r--roles/openshift_certificate_expiry/library/openshift_cert_expiry.py35
-rw-r--r--roles/openshift_certificate_expiry/templates/cert-expiry-table.html.j229
-rw-r--r--roles/openshift_docker_facts/tasks/main.yml2
-rw-r--r--roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-ephemeral-template.json14
-rw-r--r--roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-persistent-template.json14
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py29
-rw-r--r--roles/openshift_logging/README.md3
-rw-r--r--roles/openshift_logging/files/generate-jks.sh12
-rw-r--r--roles/openshift_logging/library/openshift_logging_facts.py4
-rw-r--r--roles/openshift_logging/tasks/generate_certs.yaml126
-rw-r--r--roles/openshift_logging/tasks/generate_configmaps.yaml2
-rw-r--r--roles/openshift_logging/tasks/generate_jks.yaml98
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml21
-rw-r--r--roles/openshift_logging/tasks/label_node.yaml27
-rw-r--r--roles/openshift_logging/tasks/main.yaml1
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml65
-rw-r--r--roles/openshift_metrics/README.md6
-rwxr-xr-xroles/openshift_metrics/files/import_jks_certs.sh2
-rw-r--r--roles/openshift_metrics/tasks/import_jks_certs.yaml146
-rw-r--r--roles/openshift_metrics/tasks/install_metrics.yaml16
-rw-r--r--roles/openshift_metrics/tasks/install_support.yaml18
-rw-r--r--roles/openshift_metrics/tasks/main.yaml7
-rw-r--r--roles/openshift_metrics/templates/jks_pod.j238
-rw-r--r--roles/openshift_node/meta/main.yml4
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service2
-rw-r--r--roles/openshift_node_certificates/tasks/main.yml26
-rwxr-xr-xroles/os_firewall/library/os_firewall_manage_iptables.py4
-rw-r--r--roles/os_firewall/tasks/firewall/firewalld.yml5
-rw-r--r--roles/rhel_subscribe/tasks/main.yml5
77 files changed, 9205 insertions, 601 deletions
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index a8935370a..66c9cfa0f 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -96,7 +96,7 @@
dest: /etc/sysconfig/docker
regexp: '^OPTIONS=.*$'
line: "OPTIONS='\
- {% if ansible_selinux and ansible_selinux.status == '''enabled''' %} --selinux-enabled{% endif %}\
+ {% if ansible_selinux.status | default(None) == '''enabled''' and docker_selinux_enabled | default(true) %} --selinux-enabled {% endif %}\
{% if docker_log_driver is defined %} --log-driver {{ docker_log_driver }}{% endif %}\
{% if docker_log_options is defined %} {{ docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %}\
{% if docker_options is defined %} {{ docker_options }}{% endif %}\
diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py
new file mode 100644
index 000000000..1a361ae20
--- /dev/null
+++ b/roles/lib_openshift/library/oc_edit.py
@@ -0,0 +1,1377 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import json
+import os
+import re
+import shutil
+import subprocess
+# pylint: disable=import-error
+import ruamel.yaml as yaml
+from ansible.module_utils.basic import AnsibleModule
+
+DOCUMENTATION = '''
+---
+module: oc_edit
+short_description: Modify, and idempotently manage openshift objects.
+description:
+ - Modify openshift objects programmatically.
+options:
+ state:
+ description:
+ - Currently present is only supported state.
+ required: true
+ default: present
+ choices: ["present"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ namespace:
+ description:
+ - The namespace where the object lives.
+ required: false
+ default: str
+ aliases: []
+ kind:
+ description:
+ - The kind attribute of the object.
+ required: True
+ default: None
+ choices:
+ - bc
+ - buildconfig
+ - configmaps
+ - dc
+ - deploymentconfig
+ - imagestream
+ - imagestreamtag
+ - is
+ - istag
+ - namespace
+ - project
+ - projects
+ - node
+ - ns
+ - persistentvolume
+ - pv
+ - rc
+ - replicationcontroller
+ - routes
+ - scc
+ - secret
+ - securitycontextconstraints
+ - service
+ - svc
+ aliases: []
+ file_name:
+ description:
+ - The file name in which to edit
+ required: false
+ default: None
+ aliases: []
+ file_format:
+ description:
+ - The format of the file being edited.
+ required: false
+ default: yaml
+ aliases: []
+ content:
+ description:
+ - Content of the file
+ required: false
+ default: None
+ aliases: []
+ force:
+ description:
+ - Whether or not to force the operation
+ required: false
+ default: None
+ aliases: []
+ separator:
+ description:
+ - The separator format for the edit.
+ required: false
+ default: '.'
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+oc_edit:
+ kind: rc
+ name: hawkular-cassandra-rc
+ namespace: openshift-infra
+ content:
+ spec.template.spec.containers[0].resources.limits.memory: 512
+ spec.template.spec.containers[0].resources.requests.memory: 256
+'''
+# noqa: E301,E302
+
+
+class YeditException(Exception):
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object):
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @separator.setter
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key % ''.join(common_separators), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ return None
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ tmp_filename = self.filename + '.yedit'
+ with open(tmp_filename, 'w') as yfd:
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+
+ yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+
+ os.rename(tmp_filename, self.filename)
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. %s' % err)
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # pylint: disable=no-member,maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in ' +
+ 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(module):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=module.params['src'],
+ backup=module.params['backup'],
+ separator=module.params['separator'])
+
+ if module.params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and \
+ module.params['state'] != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [%s]. Verify that the ' +
+ 'file exists, that it is has correct' +
+ ' permissions, and is valid yaml.'}
+
+ if module.params['state'] == 'list':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['key']:
+ rval = yamlfile.get(module.params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': "list"}
+
+ elif module.params['state'] == 'absent':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['update']:
+ rval = yamlfile.pop(module.params['key'],
+ module.params['value'])
+ else:
+ rval = yamlfile.delete(module.params['key'])
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+
+ elif module.params['state'] == 'present':
+ # check if content is different than what is in the file
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ module.params['value'] is None:
+ return {'changed': False,
+ 'result': yamlfile.yaml_dict,
+ 'state': "present"}
+
+ yamlfile.yaml_dict = content
+
+ # we were passed a value; parse it
+ if module.params['value']:
+ value = Yedit.parse_value(module.params['value'],
+ module.params['value_type'])
+ key = module.params['key']
+ if module.params['update']:
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
+ module.params['curr_value_format']) # noqa: E501
+
+ rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+
+ elif module.params['append']:
+ rval = yamlfile.append(key, value)
+ else:
+ rval = yamlfile.put(key, value)
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0],
+ 'result': rval[1], 'state': "present"}
+
+ # no edits to make
+ if module.params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': "present"}
+
+ return {'failed': True, 'msg': 'Unkown state passed'}
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = kubeconfig
+ self.all_namespaces = all_namespaces
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, rname, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource, rname]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = '/tmp/%s' % template_name
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, rname=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.extend(['-o', 'json'])
+
+ if rname:
+ cmd.append(rname)
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.append('--schedulable=%s' % schedulable)
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ if grace_period:
+ cmd.append('--grace-period=%s' % int(grace_period))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = []
+ if oadm:
+ cmds = ['/usr/bin/oadm']
+ else:
+ cmds = ['/usr/bin/oc']
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace:
+ cmds.extend(['-n', self.namespace])
+
+ cmds.extend(cmd)
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env={'KUBECONFIG': self.kubeconfig})
+
+ stdout, stderr = proc.communicate(input_data)
+ rval = {"returncode": proc.returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if proc.returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.args:
+ err = err.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+ @staticmethod
+ def create_file(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+ path = os.path.join('/tmp', rname)
+ with open(path, 'w') as fds:
+ if ftype == 'yaml':
+ fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
+
+ elif ftype == 'json':
+ fds.write(json.dumps(data))
+ else:
+ fds.write(data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [path])
+ return path
+
+ @staticmethod
+ def create_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_file(item['path'], item['data'], ftype=content_type)
+ files.append({'name': os.path.basename(path), 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(value)
+ print(user_def[key])
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(api_values)
+ print(user_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key, data in self.config_options.items():
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+
+ return rval
+
+
+class Edit(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools
+ '''
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ kind,
+ namespace,
+ resource_name=None,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ separator='.',
+ verbose=False):
+ ''' Constructor for OpenshiftOC '''
+ super(Edit, self).__init__(namespace, kubeconfig)
+ self.namespace = namespace
+ self.kind = kind
+ self.name = resource_name
+ self.kubeconfig = kubeconfig
+ self.separator = separator
+ self.verbose = verbose
+
+ def get(self):
+ '''return a secret by name '''
+ return self._get(self.kind, self.name)
+
+ def update(self, file_name, content, force=False, content_type='yaml'):
+ '''run update '''
+ if file_name:
+ if content_type == 'yaml':
+ data = yaml.load(open(file_name))
+ elif content_type == 'json':
+ data = json.loads(open(file_name).read())
+
+ changes = []
+ yed = Yedit(filename=file_name, content=data, separator=self.separator)
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([not change[0] for change in changes]):
+ return {'returncode': 0, 'updated': False}
+
+ yed.write()
+
+ atexit.register(Utils.cleanup, [file_name])
+
+ return self._replace(file_name, force=force)
+
+ return self._replace_content(self.kind, self.name, content, force=force, sep=self.separator)
+
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run the ansible idempotent code'''
+
+ ocedit = Edit(params['kind'],
+ params['namespace'],
+ params['name'],
+ kubeconfig=params['kubeconfig'],
+ separator=params['separator'],
+ verbose=params['debug'])
+
+ api_rval = ocedit.get()
+
+ ########
+ # Create
+ ########
+ if not Utils.exists(api_rval['results'], params['name']):
+ return {"failed": True, 'msg': api_rval}
+
+ ########
+ # Update
+ ########
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed edit'}
+
+ api_rval = ocedit.update(params['file_name'],
+ params['content'],
+ params['force'],
+ params['file_format'])
+
+ if api_rval['returncode'] != 0:
+ return {"failed": True, 'msg': api_rval}
+
+ if 'updated' in api_rval and not api_rval['updated']:
+ return {"changed": False, 'results': api_rval, 'state': 'present'}
+
+ # return the created object
+ api_rval = ocedit.get()
+
+ if api_rval['returncode'] != 0:
+ return {"failed": True, 'msg': api_rval}
+
+ return {"changed": True, 'results': api_rval, 'state': 'present'}
+
+
+def main():
+ '''
+ ansible oc module for editing objects
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present']),
+ debug=dict(default=False, type='bool'),
+ namespace=dict(default='default', type='str'),
+ name=dict(default=None, required=True, type='str'),
+ kind=dict(required=True,
+ type='str',
+ choices=['dc', 'deploymentconfig',
+ 'rc', 'replicationcontroller',
+ 'svc', 'service',
+ 'scc', 'securitycontextconstraints',
+ 'ns', 'namespace', 'project', 'projects',
+ 'is', 'imagestream',
+ 'istag', 'imagestreamtag',
+ 'bc', 'buildconfig',
+ 'routes',
+ 'node',
+ 'secret',
+ 'pv', 'persistentvolume']),
+ file_name=dict(default=None, type='str'),
+ file_format=dict(default='yaml', type='str'),
+ content=dict(default=None, required=True, type='dict'),
+ force=dict(default=False, type='bool'),
+ separator=dict(default='.', type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ rval = Edit.run_ansible(module.params, module.check_mode)
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+ module.exit_json(**rval)
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py
new file mode 100644
index 000000000..5b501484b
--- /dev/null
+++ b/roles/lib_openshift/library/oc_obj.py
@@ -0,0 +1,1444 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import json
+import os
+import re
+import shutil
+import subprocess
+# pylint: disable=import-error
+import ruamel.yaml as yaml
+from ansible.module_utils.basic import AnsibleModule
+
+DOCUMENTATION = '''
+---
+module: oc_obj
+short_description: Generic interface to openshift objects
+description:
+ - Manage openshift objects programmatically.
+options:
+ state:
+ description:
+ - Currently present is only supported state.
+ required: true
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ namespace:
+ description:
+ - The namespace where the object lives.
+ required: false
+ default: str
+ aliases: []
+ all_namespace:
+ description:
+ - The namespace where the object lives.
+ required: false
+ default: false
+ aliases: []
+ kind:
+ description:
+ - The kind attribute of the object. e.g. dc, bc, svc, route
+ required: True
+ default: None
+ aliases: []
+ files:
+ description:
+ - A list of files provided for object
+ required: false
+ default: None
+ aliases: []
+ delete_after:
+ description:
+ - Whether or not to delete the files after processing them.
+ required: false
+ default: false
+ aliases: []
+ content:
+ description:
+ - Content of the object being managed.
+ required: false
+ default: None
+ aliases: []
+ force:
+ description:
+ - Whether or not to force the operation
+ required: false
+ default: None
+ aliases: []
+ selector:
+ description:
+ - Selector that gets added to the query.
+ required: false
+ default: None
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+oc_obj:
+ kind: dc
+ name: router
+ namespace: default
+register: router_output
+'''
+# noqa: E301,E302
+
+
+class YeditException(Exception):
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object):
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @separator.setter
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key % ''.join(common_separators), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ return None
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ tmp_filename = self.filename + '.yedit'
+ with open(tmp_filename, 'w') as yfd:
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+
+ yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+
+ os.rename(tmp_filename, self.filename)
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. %s' % err)
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # pylint: disable=no-member,maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in ' +
+ 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(module):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=module.params['src'],
+ backup=module.params['backup'],
+ separator=module.params['separator'])
+
+ if module.params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and \
+ module.params['state'] != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [%s]. Verify that the ' +
+ 'file exists, that it is has correct' +
+ ' permissions, and is valid yaml.'}
+
+ if module.params['state'] == 'list':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['key']:
+ rval = yamlfile.get(module.params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': "list"}
+
+ elif module.params['state'] == 'absent':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['update']:
+ rval = yamlfile.pop(module.params['key'],
+ module.params['value'])
+ else:
+ rval = yamlfile.delete(module.params['key'])
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+
+ elif module.params['state'] == 'present':
+ # check if content is different than what is in the file
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ module.params['value'] is None:
+ return {'changed': False,
+ 'result': yamlfile.yaml_dict,
+ 'state': "present"}
+
+ yamlfile.yaml_dict = content
+
+ # we were passed a value; parse it
+ if module.params['value']:
+ value = Yedit.parse_value(module.params['value'],
+ module.params['value_type'])
+ key = module.params['key']
+ if module.params['update']:
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
+ module.params['curr_value_format']) # noqa: E501
+
+ rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+
+ elif module.params['append']:
+ rval = yamlfile.append(key, value)
+ else:
+ rval = yamlfile.put(key, value)
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0],
+ 'result': rval[1], 'state': "present"}
+
+ # no edits to make
+ if module.params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': "present"}
+
+ return {'failed': True, 'msg': 'Unkown state passed'}
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = kubeconfig
+ self.all_namespaces = all_namespaces
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, rname, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource, rname]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = '/tmp/%s' % template_name
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, rname=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.extend(['-o', 'json'])
+
+ if rname:
+ cmd.append(rname)
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.append('--schedulable=%s' % schedulable)
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ if grace_period:
+ cmd.append('--grace-period=%s' % int(grace_period))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = []
+ if oadm:
+ cmds = ['/usr/bin/oadm']
+ else:
+ cmds = ['/usr/bin/oc']
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace:
+ cmds.extend(['-n', self.namespace])
+
+ cmds.extend(cmd)
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env={'KUBECONFIG': self.kubeconfig})
+
+ stdout, stderr = proc.communicate(input_data)
+ rval = {"returncode": proc.returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if proc.returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.args:
+ err = err.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+ @staticmethod
+ def create_file(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+ path = os.path.join('/tmp', rname)
+ with open(path, 'w') as fds:
+ if ftype == 'yaml':
+ fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
+
+ elif ftype == 'json':
+ fds.write(json.dumps(data))
+ else:
+ fds.write(data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [path])
+ return path
+
+ @staticmethod
+ def create_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_file(item['path'], item['data'], ftype=content_type)
+ files.append({'name': os.path.basename(path), 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(value)
+ print(user_def[key])
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(api_values)
+ print(user_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key, data in self.config_options.items():
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+
+ return rval
+
+
+# pylint: disable=too-many-instance-attributes
+class OCObject(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+
+ # pylint allows 5. we need 6
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ kind,
+ namespace,
+ rname=None,
+ selector=None,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftOC '''
+ super(OCObject, self).__init__(namespace, kubeconfig,
+ all_namespaces=all_namespaces)
+ self.kind = kind
+ self.namespace = namespace
+ self.name = rname
+ self.selector = selector
+ self.kubeconfig = kubeconfig
+ self.verbose = verbose
+
+ def get(self):
+ '''return a kind by name '''
+ results = self._get(self.kind, rname=self.name, selector=self.selector)
+ if results['returncode'] != 0 and 'stderr' in results and \
+ '\"%s\" not found' % self.name in results['stderr']:
+ results['returncode'] = 0
+
+ return results
+
+ def delete(self):
+ '''return all pods '''
+ return self._delete(self.kind, self.name)
+
+ def create(self, files=None, content=None):
+ '''
+ Create a config
+
+ NOTE: This creates the first file OR the first conent.
+ TODO: Handle all files and content passed in
+ '''
+ if files:
+ return self._create(files[0])
+
+ content['data'] = yaml.dump(content['data'])
+ content_file = Utils.create_files_from_contents(content)[0]
+
+ return self._create(content_file['path'])
+
+ # pylint: disable=too-many-function-args
+ def update(self, files=None, content=None, force=False):
+ '''update a current openshift object
+
+ This receives a list of file names or content
+ and takes the first and calls replace.
+
+ TODO: take an entire list
+ '''
+ if files:
+ return self._replace(files[0], force)
+
+ if content and 'data' in content:
+ content = content['data']
+
+ return self.update_content(content, force)
+
+ def update_content(self, content, force=False):
+ '''update an object through using the content param'''
+ return self._replace_content(self.kind, self.name, content, force=force)
+
+ def needs_update(self, files=None, content=None, content_type='yaml'):
+ ''' check to see if we need to update '''
+ objects = self.get()
+ if objects['returncode'] != 0:
+ return objects
+
+ # pylint: disable=no-member
+ data = None
+ if files:
+ data = Utils.get_resource_file(files[0], content_type)
+ elif content and 'data' in content:
+ data = content['data']
+ else:
+ data = content
+
+ # if equal then no need. So not equal is True
+ return not Utils.check_def_equal(data, objects['results'][0], skip_keys=None, debug=False)
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(params, check_mode=False):
+ '''perform the ansible idempotent code'''
+
+ ocobj = OCObject(params['kind'],
+ params['namespace'],
+ params['name'],
+ params['selector'],
+ kubeconfig=params['kubeconfig'],
+ verbose=params['debug'],
+ all_namespaces=params['all_namespaces'])
+
+ state = params['state']
+
+ api_rval = ocobj.get()
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False, 'results': api_rval, 'state': 'list'}
+
+ if not params['name']:
+ return {'failed': True, 'msg': 'Please specify a name when state is absent|present.'} # noqa: E501
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if not Utils.exists(api_rval['results'], params['name']):
+ return {'changed': False, 'state': 'absent'}
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete'}
+
+ api_rval = ocobj.delete()
+
+ return {'changed': True, 'results': api_rval, 'state': 'absent'}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not Utils.exists(api_rval['results'], params['name']):
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create'}
+
+ # Create it here
+ api_rval = ocobj.create(params['files'], params['content'])
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = ocobj.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # Remove files
+ if params['files'] and params['delete_after']:
+ Utils.cleanup(params['files'])
+
+ return {'changed': True, 'results': api_rval, 'state': "present"}
+
+ ########
+ # Update
+ ########
+ # if a file path is passed, use it.
+ update = ocobj.needs_update(params['files'], params['content'])
+ if not isinstance(update, bool):
+ return {'failed': True, 'msg': update}
+
+ # No changes
+ if not update:
+ if params['files'] and params['delete_after']:
+ Utils.cleanup(params['files'])
+
+ return {'changed': False, 'results': api_rval['results'][0], 'state': "present"}
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}
+
+ api_rval = ocobj.update(params['files'],
+ params['content'],
+ params['force'])
+
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = ocobj.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': "present"}
+
+# pylint: disable=too-many-branches
+def main():
+ '''
+ ansible oc module for services
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ namespace=dict(default='default', type='str'),
+ all_namespaces=dict(defaul=False, type='bool'),
+ name=dict(default=None, type='str'),
+ files=dict(default=None, type='list'),
+ kind=dict(required=True, type='str'),
+ delete_after=dict(default=False, type='bool'),
+ content=dict(default=None, type='dict'),
+ force=dict(default=False, type='bool'),
+ selector=dict(default=None, type='str'),
+ ),
+ mutually_exclusive=[["content", "files"]],
+
+ supports_check_mode=True,
+ )
+ rval = OCObject.run_ansible(module.params, module.check_mode)
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+ module.exit_json(**rval)
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py
new file mode 100644
index 000000000..19c7462ea
--- /dev/null
+++ b/roles/lib_openshift/library/oc_route.py
@@ -0,0 +1,1614 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import json
+import os
+import re
+import shutil
+import subprocess
+# pylint: disable=import-error
+import ruamel.yaml as yaml
+from ansible.module_utils.basic import AnsibleModule
+
+DOCUMENTATION = '''
+---
+module: oc_route
+short_description: Create, modify, and idempotently manage openshift routes.
+description:
+ - Manage openshift route objects programmatically.
+options:
+ state:
+ description:
+ - State represents whether to create, modify, delete, or list
+ required: true
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ namespace:
+ description:
+ - The namespace where the object lives.
+ required: false
+ default: str
+ aliases: []
+ tls_termination:
+ description:
+ - The options for termination. e.g. reencrypt
+ required: false
+ default: None
+ aliases: []
+ dest_cacert_path:
+ description:
+ - The path to the dest_cacert
+ required: false
+ default: None
+ aliases: []
+ cacert_path:
+ description:
+ - The path to the cacert
+ required: false
+ default: None
+ aliases: []
+ cert_path:
+ description:
+ - The path to the cert
+ required: false
+ default: None
+ aliases: []
+ key_path:
+ description:
+ - The path to the key
+ required: false
+ default: None
+ aliases: []
+ dest_cacert_content:
+ description:
+ - The dest_cacert content
+ required: false
+ default: None
+ aliases: []
+ cacert_content:
+ description:
+ - The cacert content
+ required: false
+ default: None
+ aliases: []
+ cert_content:
+ description:
+ - The cert content
+ required: false
+ default: None
+ aliases: []
+ service_name:
+ description:
+ - The name of the service that this route points to.
+ required: false
+ default: None
+ aliases: []
+ host:
+ description:
+ - The host that the route will use. e.g. myapp.x.y.z
+ required: false
+ default: None
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: Configure certificates for reencrypt route
+ oc_route:
+ name: myapproute
+ namespace: awesomeapp
+ cert_path: "/etc/origin/master/named_certificates/myapp_cert
+ key_path: "/etc/origin/master/named_certificates/myapp_key
+ cacert_path: "/etc/origin/master/named_certificates/myapp_cacert
+ dest_cacert_content: "{{ dest_cacert_content }}"
+ service_name: myapp_php
+ host: myapp.awesomeapp.openshift.com
+ tls_termination: reencrypt
+ run_once: true
+'''
+# noqa: E301,E302
+
+
+class YeditException(Exception):
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object):
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @separator.setter
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key % ''.join(common_separators), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ return None
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ tmp_filename = self.filename + '.yedit'
+ with open(tmp_filename, 'w') as yfd:
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+
+ yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+
+ os.rename(tmp_filename, self.filename)
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. %s' % err)
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # pylint: disable=no-member,maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in ' +
+ 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(module):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=module.params['src'],
+ backup=module.params['backup'],
+ separator=module.params['separator'])
+
+ if module.params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and \
+ module.params['state'] != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [%s]. Verify that the ' +
+ 'file exists, that it is has correct' +
+ ' permissions, and is valid yaml.'}
+
+ if module.params['state'] == 'list':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['key']:
+ rval = yamlfile.get(module.params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': "list"}
+
+ elif module.params['state'] == 'absent':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['update']:
+ rval = yamlfile.pop(module.params['key'],
+ module.params['value'])
+ else:
+ rval = yamlfile.delete(module.params['key'])
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+
+ elif module.params['state'] == 'present':
+ # check if content is different than what is in the file
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ module.params['value'] is None:
+ return {'changed': False,
+ 'result': yamlfile.yaml_dict,
+ 'state': "present"}
+
+ yamlfile.yaml_dict = content
+
+ # we were passed a value; parse it
+ if module.params['value']:
+ value = Yedit.parse_value(module.params['value'],
+ module.params['value_type'])
+ key = module.params['key']
+ if module.params['update']:
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
+ module.params['curr_value_format']) # noqa: E501
+
+ rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+
+ elif module.params['append']:
+ rval = yamlfile.append(key, value)
+ else:
+ rval = yamlfile.put(key, value)
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0],
+ 'result': rval[1], 'state': "present"}
+
+ # no edits to make
+ if module.params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': "present"}
+
+ return {'failed': True, 'msg': 'Unkown state passed'}
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = kubeconfig
+ self.all_namespaces = all_namespaces
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, rname, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource, rname]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = '/tmp/%s' % template_name
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, rname=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.extend(['-o', 'json'])
+
+ if rname:
+ cmd.append(rname)
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.append('--schedulable=%s' % schedulable)
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ if grace_period:
+ cmd.append('--grace-period=%s' % int(grace_period))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = []
+ if oadm:
+ cmds = ['/usr/bin/oadm']
+ else:
+ cmds = ['/usr/bin/oc']
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace:
+ cmds.extend(['-n', self.namespace])
+
+ cmds.extend(cmd)
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env={'KUBECONFIG': self.kubeconfig})
+
+ stdout, stderr = proc.communicate(input_data)
+ rval = {"returncode": proc.returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if proc.returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.args:
+ err = err.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+ @staticmethod
+ def create_file(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+ path = os.path.join('/tmp', rname)
+ with open(path, 'w') as fds:
+ if ftype == 'yaml':
+ fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
+
+ elif ftype == 'json':
+ fds.write(json.dumps(data))
+ else:
+ fds.write(data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [path])
+ return path
+
+ @staticmethod
+ def create_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_file(item['path'], item['data'], ftype=content_type)
+ files.append({'name': os.path.basename(path), 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(value)
+ print(user_def[key])
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(api_values)
+ print(user_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key, data in self.config_options.items():
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+
+ return rval
+
+# noqa: E302,E301
+
+
+# pylint: disable=too-many-instance-attributes
+class RouteConfig(object):
+ ''' Handle route options '''
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ sname,
+ namespace,
+ kubeconfig,
+ destcacert=None,
+ cacert=None,
+ cert=None,
+ key=None,
+ host=None,
+ tls_termination=None,
+ service_name=None,
+ wildcard_policy=None,
+ weight=None):
+ ''' constructor for handling route options '''
+ self.kubeconfig = kubeconfig
+ self.name = sname
+ self.namespace = namespace
+ self.host = host
+ self.tls_termination = tls_termination
+ self.destcacert = destcacert
+ self.cacert = cacert
+ self.cert = cert
+ self.key = key
+ self.service_name = service_name
+ self.data = {}
+ self.wildcard_policy = wildcard_policy
+ if wildcard_policy is None:
+ self.wildcard_policy = 'None'
+ self.weight = weight
+ if weight is None:
+ self.weight = 100
+
+ self.create_dict()
+
+ def create_dict(self):
+ ''' return a service as a dict '''
+ self.data['apiVersion'] = 'v1'
+ self.data['kind'] = 'Route'
+ self.data['metadata'] = {}
+ self.data['metadata']['name'] = self.name
+ self.data['metadata']['namespace'] = self.namespace
+ self.data['spec'] = {}
+
+ self.data['spec']['host'] = self.host
+
+ if self.tls_termination:
+ self.data['spec']['tls'] = {}
+
+ if self.tls_termination == 'reencrypt':
+ self.data['spec']['tls']['destinationCACertificate'] = self.destcacert
+ self.data['spec']['tls']['key'] = self.key
+ self.data['spec']['tls']['caCertificate'] = self.cacert
+ self.data['spec']['tls']['certificate'] = self.cert
+ self.data['spec']['tls']['termination'] = self.tls_termination
+
+ self.data['spec']['to'] = {'kind': 'Service',
+ 'name': self.service_name,
+ 'weight': self.weight}
+
+ self.data['spec']['wildcardPolicy'] = self.wildcard_policy
+
+# pylint: disable=too-many-instance-attributes,too-many-public-methods
+class Route(Yedit):
+ ''' Class to wrap the oc command line tools '''
+ wildcard_policy = "spec.wildcardPolicy"
+ host_path = "spec.host"
+ service_path = "spec.to.name"
+ weight_path = "spec.to.weight"
+ cert_path = "spec.tls.certificate"
+ cacert_path = "spec.tls.caCertificate"
+ destcacert_path = "spec.tls.destinationCACertificate"
+ termination_path = "spec.tls.termination"
+ key_path = "spec.tls.key"
+ kind = 'route'
+
+ def __init__(self, content):
+ '''Route constructor'''
+ super(Route, self).__init__(content=content)
+
+ def get_destcacert(self):
+ ''' return cert '''
+ return self.get(Route.destcacert_path)
+
+ def get_cert(self):
+ ''' return cert '''
+ return self.get(Route.cert_path)
+
+ def get_key(self):
+ ''' return key '''
+ return self.get(Route.key_path)
+
+ def get_cacert(self):
+ ''' return cacert '''
+ return self.get(Route.cacert_path)
+
+ def get_service(self):
+ ''' return service name '''
+ return self.get(Route.service_path)
+
+ def get_weight(self):
+ ''' return service weight '''
+ return self.get(Route.weight_path)
+
+ def get_termination(self):
+ ''' return tls termination'''
+ return self.get(Route.termination_path)
+
+ def get_host(self):
+ ''' return host '''
+ return self.get(Route.host_path)
+
+ def get_wildcard_policy(self):
+ ''' return wildcardPolicy '''
+ return self.get(Route.wildcard_policy)
+
+
+# pylint: disable=too-many-instance-attributes
+class OCRoute(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+ kind = 'route'
+
+ def __init__(self,
+ config,
+ verbose=False):
+ ''' Constructor for OCVolume '''
+ super(OCRoute, self).__init__(config.namespace, config.kubeconfig)
+ self.config = config
+ self.namespace = config.namespace
+ self._route = None
+
+ @property
+ def route(self):
+ ''' property function for route'''
+ if not self._route:
+ self.get()
+ return self._route
+
+ @route.setter
+ def route(self, data):
+ ''' setter function for route '''
+ self._route = data
+
+ def exists(self):
+ ''' return whether a route exists '''
+ if self.route:
+ return True
+
+ return False
+
+ def get(self):
+ '''return route information '''
+ result = self._get(self.kind, self.config.name)
+ if result['returncode'] == 0:
+ self.route = Route(content=result['results'][0])
+ elif 'routes \"%s\" not found' % self.config.name in result['stderr']:
+ result['returncode'] = 0
+ result['results'] = [{}]
+
+ return result
+
+ def delete(self):
+ '''delete the object'''
+ return self._delete(self.kind, self.config.name)
+
+ def create(self):
+ '''create the object'''
+ return self._create_from_content(self.config.name, self.config.data)
+
+ def update(self):
+ '''update the object'''
+ # need to update the tls information and the service name
+ return self._replace_content(self.kind, self.config.name, self.config.data)
+
+ def needs_update(self):
+ ''' verify an update is needed '''
+ skip = []
+ return not Utils.check_def_equal(self.config.data, self.route.yaml_dict, skip_keys=skip, debug=True)
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(params, files, check_mode=False):
+ ''' run the idempotent asnible code
+
+ params comes from the ansible portion for this module
+ files: a dictionary for the certificates
+ {'cert': {'path': '',
+ 'content': '',
+ 'value': ''
+ }
+ }
+ check_mode: does the module support check mode. (module.check_mode)
+ '''
+
+ rconfig = RouteConfig(params['name'],
+ params['namespace'],
+ params['kubeconfig'],
+ files['destcacert']['value'],
+ files['cacert']['value'],
+ files['cert']['value'],
+ files['key']['value'],
+ params['host'],
+ params['tls_termination'],
+ params['service_name'],
+ params['wildcard_policy'],
+ params['weight'])
+
+ oc_route = OCRoute(rconfig, verbose=params['debug'])
+
+ state = params['state']
+
+ api_rval = oc_route.get()
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False,
+ 'results': api_rval['results'],
+ 'state': 'list'}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if oc_route.exists():
+
+ if check_mode:
+ return {'changed': False, 'msg': 'CHECK_MODE: Would have performed a delete.'} # noqa: E501
+
+ api_rval = oc_route.delete()
+
+ return {'changed': True, 'results': api_rval, 'state': "absent"} # noqa: E501
+ return {'changed': False, 'state': 'absent'}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not oc_route.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'} # noqa: E501
+
+ # Create it here
+ api_rval = oc_route.create()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
+
+ # return the created object
+ api_rval = oc_route.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
+
+ return {'changed': True, 'results': api_rval, 'state': "present"} # noqa: E501
+
+ ########
+ # Update
+ ########
+ if oc_route.needs_update():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'} # noqa: E501
+
+ api_rval = oc_route.update()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
+
+ # return the created object
+ api_rval = oc_route.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
+
+ return {'changed': True, 'results': api_rval, 'state': "present"} # noqa: E501
+
+ return {'changed': False, 'results': api_rval, 'state': "present"}
+
+ # catch all
+ return {'failed': True, 'msg': "Unknown State passed"}
+
+
+def get_cert_data(path, content):
+ '''get the data for a particular value'''
+ if not path and not content:
+ return None
+
+ rval = None
+ if path and os.path.exists(path) and os.access(path, os.R_OK):
+ rval = open(path).read()
+ elif content:
+ rval = content
+
+ return rval
+
+
+# pylint: disable=too-many-branches
+def main():
+ '''
+ ansible oc module for route
+ '''
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ name=dict(default=None, required=True, type='str'),
+ namespace=dict(default=None, required=True, type='str'),
+ tls_termination=dict(default=None, type='str'),
+ dest_cacert_path=dict(default=None, type='str'),
+ cacert_path=dict(default=None, type='str'),
+ cert_path=dict(default=None, type='str'),
+ key_path=dict(default=None, type='str'),
+ dest_cacert_content=dict(default=None, type='str'),
+ cacert_content=dict(default=None, type='str'),
+ cert_content=dict(default=None, type='str'),
+ key_content=dict(default=None, type='str'),
+ service_name=dict(default=None, type='str'),
+ host=dict(default=None, type='str'),
+ wildcard_policy=dict(default=None, type='str'),
+ weight=dict(default=None, type='int'),
+ ),
+ mutually_exclusive=[('dest_cacert_path', 'dest_cacert_content'),
+ ('cacert_path', 'cacert_content'),
+ ('cert_path', 'cert_content'),
+ ('key_path', 'key_content'), ],
+ supports_check_mode=True,
+ )
+ files = {'destcacert': {'path': module.params['dest_cacert_path'],
+ 'content': module.params['dest_cacert_content'],
+ 'value': None, },
+ 'cacert': {'path': module.params['cacert_path'],
+ 'content': module.params['cacert_content'],
+ 'value': None, },
+ 'cert': {'path': module.params['cert_path'],
+ 'content': module.params['cert_content'],
+ 'value': None, },
+ 'key': {'path': module.params['key_path'],
+ 'content': module.params['key_content'],
+ 'value': None, }, }
+
+ if module.params['tls_termination']:
+ for key, option in files.items():
+ if key == 'destcacert' and module.params['tls_termination'] != 'reencrypt':
+ continue
+
+ option['value'] = get_cert_data(option['path'], option['content'])
+
+ if not option['value']:
+ module.fail_json(msg='Verify that you pass a value for %s' % key)
+
+ results = OCRoute.run_ansible(module.params, files, module.check_mode)
+
+ if 'failed' in results:
+ module.fail_json(**results)
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py
new file mode 100644
index 000000000..197a0a947
--- /dev/null
+++ b/roles/lib_openshift/library/oc_version.py
@@ -0,0 +1,1232 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import json
+import os
+import re
+import shutil
+import subprocess
+# pylint: disable=import-error
+import ruamel.yaml as yaml
+from ansible.module_utils.basic import AnsibleModule
+
+DOCUMENTATION = '''
+---
+module: oc_version
+short_description: Return the current openshift version
+description:
+ - Return the openshift installed version. `oc version`
+options:
+ state:
+ description:
+ - Currently list is only supported state.
+ required: true
+ default: list
+ choices: ["list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+oc_version:
+- name: get oc version
+ oc_version:
+ register: oc_version
+'''
+# noqa: E301,E302
+
+
+class YeditException(Exception):
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object):
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @separator.setter
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key % ''.join(common_separators), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ return None
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ tmp_filename = self.filename + '.yedit'
+ with open(tmp_filename, 'w') as yfd:
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+
+ yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+
+ os.rename(tmp_filename, self.filename)
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. %s' % err)
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # pylint: disable=no-member,maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in ' +
+ 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(module):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=module.params['src'],
+ backup=module.params['backup'],
+ separator=module.params['separator'])
+
+ if module.params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and \
+ module.params['state'] != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [%s]. Verify that the ' +
+ 'file exists, that it is has correct' +
+ ' permissions, and is valid yaml.'}
+
+ if module.params['state'] == 'list':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['key']:
+ rval = yamlfile.get(module.params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': "list"}
+
+ elif module.params['state'] == 'absent':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['update']:
+ rval = yamlfile.pop(module.params['key'],
+ module.params['value'])
+ else:
+ rval = yamlfile.delete(module.params['key'])
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+
+ elif module.params['state'] == 'present':
+ # check if content is different than what is in the file
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ module.params['value'] is None:
+ return {'changed': False,
+ 'result': yamlfile.yaml_dict,
+ 'state': "present"}
+
+ yamlfile.yaml_dict = content
+
+ # we were passed a value; parse it
+ if module.params['value']:
+ value = Yedit.parse_value(module.params['value'],
+ module.params['value_type'])
+ key = module.params['key']
+ if module.params['update']:
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
+ module.params['curr_value_format']) # noqa: E501
+
+ rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+
+ elif module.params['append']:
+ rval = yamlfile.append(key, value)
+ else:
+ rval = yamlfile.put(key, value)
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0],
+ 'result': rval[1], 'state': "present"}
+
+ # no edits to make
+ if module.params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': "present"}
+
+ return {'failed': True, 'msg': 'Unkown state passed'}
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = kubeconfig
+ self.all_namespaces = all_namespaces
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, rname, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource, rname]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = '/tmp/%s' % template_name
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, rname=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.extend(['-o', 'json'])
+
+ if rname:
+ cmd.append(rname)
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.append('--schedulable=%s' % schedulable)
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ if grace_period:
+ cmd.append('--grace-period=%s' % int(grace_period))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = []
+ if oadm:
+ cmds = ['/usr/bin/oadm']
+ else:
+ cmds = ['/usr/bin/oc']
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace:
+ cmds.extend(['-n', self.namespace])
+
+ cmds.extend(cmd)
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env={'KUBECONFIG': self.kubeconfig})
+
+ stdout, stderr = proc.communicate(input_data)
+ rval = {"returncode": proc.returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if proc.returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.args:
+ err = err.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+ @staticmethod
+ def create_file(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+ path = os.path.join('/tmp', rname)
+ with open(path, 'w') as fds:
+ if ftype == 'yaml':
+ fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
+
+ elif ftype == 'json':
+ fds.write(json.dumps(data))
+ else:
+ fds.write(data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [path])
+ return path
+
+ @staticmethod
+ def create_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_file(item['path'], item['data'], ftype=content_type)
+ files.append({'name': os.path.basename(path), 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(value)
+ print(user_def[key])
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(api_values)
+ print(user_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key, data in self.config_options.items():
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+
+ return rval
+
+
+
+# pylint: disable=too-many-instance-attributes
+class OCVersion(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+ # pylint allows 5
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ config,
+ debug):
+ ''' Constructor for OCVersion '''
+ super(OCVersion, self).__init__(None, config)
+ self.debug = debug
+
+ def get(self):
+ '''get and return version information '''
+
+ results = {}
+
+ version_results = self._version()
+
+ if version_results['returncode'] == 0:
+ filtered_vers = Utils.filter_versions(version_results['results'])
+ custom_vers = Utils.add_custom_versions(filtered_vers)
+
+ results['returncode'] = version_results['returncode']
+ results.update(filtered_vers)
+ results.update(custom_vers)
+
+ return results
+
+ raise OpenShiftCLIError('Problem detecting openshift version.')
+
+ @staticmethod
+ def run_ansible(params):
+ '''run the idempotent ansible code'''
+ oc_version = OCVersion(params['kubeconfig'], params['debug'])
+
+ if params['state'] == 'list':
+
+ #pylint: disable=protected-access
+ result = oc_version.get()
+ return {'state': params['state'],
+ 'results': result,
+ 'changed': False}
+
+def main():
+ ''' ansible oc module for version '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='list', type='str',
+ choices=['list']),
+ debug=dict(default=False, type='bool'),
+ ),
+ supports_check_mode=True,
+ )
+
+ rval = OCVersion.run_ansible(module.params)
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+
+ module.exit_json(**rval)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/ansible/oc_edit.py b/roles/lib_openshift/src/ansible/oc_edit.py
new file mode 100644
index 000000000..5c5954747
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_edit.py
@@ -0,0 +1,48 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+def main():
+ '''
+ ansible oc module for editing objects
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present']),
+ debug=dict(default=False, type='bool'),
+ namespace=dict(default='default', type='str'),
+ name=dict(default=None, required=True, type='str'),
+ kind=dict(required=True,
+ type='str',
+ choices=['dc', 'deploymentconfig',
+ 'rc', 'replicationcontroller',
+ 'svc', 'service',
+ 'scc', 'securitycontextconstraints',
+ 'ns', 'namespace', 'project', 'projects',
+ 'is', 'imagestream',
+ 'istag', 'imagestreamtag',
+ 'bc', 'buildconfig',
+ 'routes',
+ 'node',
+ 'secret',
+ 'pv', 'persistentvolume']),
+ file_name=dict(default=None, type='str'),
+ file_format=dict(default='yaml', type='str'),
+ content=dict(default=None, required=True, type='dict'),
+ force=dict(default=False, type='bool'),
+ separator=dict(default='.', type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ rval = Edit.run_ansible(module.params, module.check_mode)
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+ module.exit_json(**rval)
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/ansible/oc_obj.py b/roles/lib_openshift/src/ansible/oc_obj.py
new file mode 100644
index 000000000..701740e4f
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_obj.py
@@ -0,0 +1,37 @@
+# pylint: skip-file
+# flake8: noqa
+
+# pylint: disable=too-many-branches
+def main():
+ '''
+ ansible oc module for services
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ namespace=dict(default='default', type='str'),
+ all_namespaces=dict(defaul=False, type='bool'),
+ name=dict(default=None, type='str'),
+ files=dict(default=None, type='list'),
+ kind=dict(required=True, type='str'),
+ delete_after=dict(default=False, type='bool'),
+ content=dict(default=None, type='dict'),
+ force=dict(default=False, type='bool'),
+ selector=dict(default=None, type='str'),
+ ),
+ mutually_exclusive=[["content", "files"]],
+
+ supports_check_mode=True,
+ )
+ rval = OCObject.run_ansible(module.params, module.check_mode)
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+ module.exit_json(**rval)
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/ansible/oc_route.py b/roles/lib_openshift/src/ansible/oc_route.py
new file mode 100644
index 000000000..c87e6738f
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_route.py
@@ -0,0 +1,84 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+def get_cert_data(path, content):
+ '''get the data for a particular value'''
+ if not path and not content:
+ return None
+
+ rval = None
+ if path and os.path.exists(path) and os.access(path, os.R_OK):
+ rval = open(path).read()
+ elif content:
+ rval = content
+
+ return rval
+
+
+# pylint: disable=too-many-branches
+def main():
+ '''
+ ansible oc module for route
+ '''
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ name=dict(default=None, required=True, type='str'),
+ namespace=dict(default=None, required=True, type='str'),
+ tls_termination=dict(default=None, type='str'),
+ dest_cacert_path=dict(default=None, type='str'),
+ cacert_path=dict(default=None, type='str'),
+ cert_path=dict(default=None, type='str'),
+ key_path=dict(default=None, type='str'),
+ dest_cacert_content=dict(default=None, type='str'),
+ cacert_content=dict(default=None, type='str'),
+ cert_content=dict(default=None, type='str'),
+ key_content=dict(default=None, type='str'),
+ service_name=dict(default=None, type='str'),
+ host=dict(default=None, type='str'),
+ wildcard_policy=dict(default=None, type='str'),
+ weight=dict(default=None, type='int'),
+ ),
+ mutually_exclusive=[('dest_cacert_path', 'dest_cacert_content'),
+ ('cacert_path', 'cacert_content'),
+ ('cert_path', 'cert_content'),
+ ('key_path', 'key_content'), ],
+ supports_check_mode=True,
+ )
+ files = {'destcacert': {'path': module.params['dest_cacert_path'],
+ 'content': module.params['dest_cacert_content'],
+ 'value': None, },
+ 'cacert': {'path': module.params['cacert_path'],
+ 'content': module.params['cacert_content'],
+ 'value': None, },
+ 'cert': {'path': module.params['cert_path'],
+ 'content': module.params['cert_content'],
+ 'value': None, },
+ 'key': {'path': module.params['key_path'],
+ 'content': module.params['key_content'],
+ 'value': None, }, }
+
+ if module.params['tls_termination']:
+ for key, option in files.items():
+ if key == 'destcacert' and module.params['tls_termination'] != 'reencrypt':
+ continue
+
+ option['value'] = get_cert_data(option['path'], option['content'])
+
+ if not option['value']:
+ module.fail_json(msg='Verify that you pass a value for %s' % key)
+
+ results = OCRoute.run_ansible(module.params, files, module.check_mode)
+
+ if 'failed' in results:
+ module.fail_json(**results)
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/ansible/oc_version.py b/roles/lib_openshift/src/ansible/oc_version.py
new file mode 100644
index 000000000..57ef849ca
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_version.py
@@ -0,0 +1,26 @@
+# pylint: skip-file
+# flake8: noqa
+
+def main():
+ ''' ansible oc module for version '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='list', type='str',
+ choices=['list']),
+ debug=dict(default=False, type='bool'),
+ ),
+ supports_check_mode=True,
+ )
+
+ rval = OCVersion.run_ansible(module.params)
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+
+ module.exit_json(**rval)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/class/oc_edit.py b/roles/lib_openshift/src/class/oc_edit.py
new file mode 100644
index 000000000..0734e2085
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_edit.py
@@ -0,0 +1,94 @@
+# pylint: skip-file
+# flake8: noqa
+
+class Edit(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools
+ '''
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ kind,
+ namespace,
+ resource_name=None,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ separator='.',
+ verbose=False):
+ ''' Constructor for OpenshiftOC '''
+ super(Edit, self).__init__(namespace, kubeconfig)
+ self.namespace = namespace
+ self.kind = kind
+ self.name = resource_name
+ self.kubeconfig = kubeconfig
+ self.separator = separator
+ self.verbose = verbose
+
+ def get(self):
+ '''return a secret by name '''
+ return self._get(self.kind, self.name)
+
+ def update(self, file_name, content, force=False, content_type='yaml'):
+ '''run update '''
+ if file_name:
+ if content_type == 'yaml':
+ data = yaml.load(open(file_name))
+ elif content_type == 'json':
+ data = json.loads(open(file_name).read())
+
+ changes = []
+ yed = Yedit(filename=file_name, content=data, separator=self.separator)
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([not change[0] for change in changes]):
+ return {'returncode': 0, 'updated': False}
+
+ yed.write()
+
+ atexit.register(Utils.cleanup, [file_name])
+
+ return self._replace(file_name, force=force)
+
+ return self._replace_content(self.kind, self.name, content, force=force, sep=self.separator)
+
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run the ansible idempotent code'''
+
+ ocedit = Edit(params['kind'],
+ params['namespace'],
+ params['name'],
+ kubeconfig=params['kubeconfig'],
+ separator=params['separator'],
+ verbose=params['debug'])
+
+ api_rval = ocedit.get()
+
+ ########
+ # Create
+ ########
+ if not Utils.exists(api_rval['results'], params['name']):
+ return {"failed": True, 'msg': api_rval}
+
+ ########
+ # Update
+ ########
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed edit'}
+
+ api_rval = ocedit.update(params['file_name'],
+ params['content'],
+ params['force'],
+ params['file_format'])
+
+ if api_rval['returncode'] != 0:
+ return {"failed": True, 'msg': api_rval}
+
+ if 'updated' in api_rval and not api_rval['updated']:
+ return {"changed": False, 'results': api_rval, 'state': 'present'}
+
+ # return the created object
+ api_rval = ocedit.get()
+
+ if api_rval['returncode'] != 0:
+ return {"failed": True, 'msg': api_rval}
+
+ return {"changed": True, 'results': api_rval, 'state': 'present'}
diff --git a/roles/lib_openshift/src/class/oc_obj.py b/roles/lib_openshift/src/class/oc_obj.py
new file mode 100644
index 000000000..9d0b8e45b
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_obj.py
@@ -0,0 +1,193 @@
+# pylint: skip-file
+# flake8: noqa
+
+# pylint: disable=too-many-instance-attributes
+class OCObject(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+
+ # pylint allows 5. we need 6
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ kind,
+ namespace,
+ rname=None,
+ selector=None,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftOC '''
+ super(OCObject, self).__init__(namespace, kubeconfig,
+ all_namespaces=all_namespaces)
+ self.kind = kind
+ self.namespace = namespace
+ self.name = rname
+ self.selector = selector
+ self.kubeconfig = kubeconfig
+ self.verbose = verbose
+
+ def get(self):
+ '''return a kind by name '''
+ results = self._get(self.kind, rname=self.name, selector=self.selector)
+ if results['returncode'] != 0 and 'stderr' in results and \
+ '\"%s\" not found' % self.name in results['stderr']:
+ results['returncode'] = 0
+
+ return results
+
+ def delete(self):
+ '''return all pods '''
+ return self._delete(self.kind, self.name)
+
+ def create(self, files=None, content=None):
+ '''
+ Create a config
+
+ NOTE: This creates the first file OR the first conent.
+ TODO: Handle all files and content passed in
+ '''
+ if files:
+ return self._create(files[0])
+
+ content['data'] = yaml.dump(content['data'])
+ content_file = Utils.create_files_from_contents(content)[0]
+
+ return self._create(content_file['path'])
+
+ # pylint: disable=too-many-function-args
+ def update(self, files=None, content=None, force=False):
+ '''update a current openshift object
+
+ This receives a list of file names or content
+ and takes the first and calls replace.
+
+ TODO: take an entire list
+ '''
+ if files:
+ return self._replace(files[0], force)
+
+ if content and 'data' in content:
+ content = content['data']
+
+ return self.update_content(content, force)
+
+ def update_content(self, content, force=False):
+ '''update an object through using the content param'''
+ return self._replace_content(self.kind, self.name, content, force=force)
+
+ def needs_update(self, files=None, content=None, content_type='yaml'):
+ ''' check to see if we need to update '''
+ objects = self.get()
+ if objects['returncode'] != 0:
+ return objects
+
+ # pylint: disable=no-member
+ data = None
+ if files:
+ data = Utils.get_resource_file(files[0], content_type)
+ elif content and 'data' in content:
+ data = content['data']
+ else:
+ data = content
+
+ # if equal then no need. So not equal is True
+ return not Utils.check_def_equal(data, objects['results'][0], skip_keys=None, debug=False)
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(params, check_mode=False):
+ '''perform the ansible idempotent code'''
+
+ ocobj = OCObject(params['kind'],
+ params['namespace'],
+ params['name'],
+ params['selector'],
+ kubeconfig=params['kubeconfig'],
+ verbose=params['debug'],
+ all_namespaces=params['all_namespaces'])
+
+ state = params['state']
+
+ api_rval = ocobj.get()
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False, 'results': api_rval, 'state': 'list'}
+
+ if not params['name']:
+ return {'failed': True, 'msg': 'Please specify a name when state is absent|present.'} # noqa: E501
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if not Utils.exists(api_rval['results'], params['name']):
+ return {'changed': False, 'state': 'absent'}
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete'}
+
+ api_rval = ocobj.delete()
+
+ return {'changed': True, 'results': api_rval, 'state': 'absent'}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not Utils.exists(api_rval['results'], params['name']):
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create'}
+
+ # Create it here
+ api_rval = ocobj.create(params['files'], params['content'])
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = ocobj.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # Remove files
+ if params['files'] and params['delete_after']:
+ Utils.cleanup(params['files'])
+
+ return {'changed': True, 'results': api_rval, 'state': "present"}
+
+ ########
+ # Update
+ ########
+ # if a file path is passed, use it.
+ update = ocobj.needs_update(params['files'], params['content'])
+ if not isinstance(update, bool):
+ return {'failed': True, 'msg': update}
+
+ # No changes
+ if not update:
+ if params['files'] and params['delete_after']:
+ Utils.cleanup(params['files'])
+
+ return {'changed': False, 'results': api_rval['results'][0], 'state': "present"}
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}
+
+ api_rval = ocobj.update(params['files'],
+ params['content'],
+ params['force'])
+
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = ocobj.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': "present"}
diff --git a/roles/lib_openshift/src/class/oc_route.py b/roles/lib_openshift/src/class/oc_route.py
new file mode 100644
index 000000000..42af2c01c
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_route.py
@@ -0,0 +1,170 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+# pylint: disable=too-many-instance-attributes
+class OCRoute(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+ kind = 'route'
+
+ def __init__(self,
+ config,
+ verbose=False):
+ ''' Constructor for OCVolume '''
+ super(OCRoute, self).__init__(config.namespace, config.kubeconfig)
+ self.config = config
+ self.namespace = config.namespace
+ self._route = None
+
+ @property
+ def route(self):
+ ''' property function for route'''
+ if not self._route:
+ self.get()
+ return self._route
+
+ @route.setter
+ def route(self, data):
+ ''' setter function for route '''
+ self._route = data
+
+ def exists(self):
+ ''' return whether a route exists '''
+ if self.route:
+ return True
+
+ return False
+
+ def get(self):
+ '''return route information '''
+ result = self._get(self.kind, self.config.name)
+ if result['returncode'] == 0:
+ self.route = Route(content=result['results'][0])
+ elif 'routes \"%s\" not found' % self.config.name in result['stderr']:
+ result['returncode'] = 0
+ result['results'] = [{}]
+
+ return result
+
+ def delete(self):
+ '''delete the object'''
+ return self._delete(self.kind, self.config.name)
+
+ def create(self):
+ '''create the object'''
+ return self._create_from_content(self.config.name, self.config.data)
+
+ def update(self):
+ '''update the object'''
+ # need to update the tls information and the service name
+ return self._replace_content(self.kind, self.config.name, self.config.data)
+
+ def needs_update(self):
+ ''' verify an update is needed '''
+ skip = []
+ return not Utils.check_def_equal(self.config.data, self.route.yaml_dict, skip_keys=skip, debug=True)
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(params, files, check_mode=False):
+ ''' run the idempotent asnible code
+
+ params comes from the ansible portion for this module
+ files: a dictionary for the certificates
+ {'cert': {'path': '',
+ 'content': '',
+ 'value': ''
+ }
+ }
+ check_mode: does the module support check mode. (module.check_mode)
+ '''
+
+ rconfig = RouteConfig(params['name'],
+ params['namespace'],
+ params['kubeconfig'],
+ files['destcacert']['value'],
+ files['cacert']['value'],
+ files['cert']['value'],
+ files['key']['value'],
+ params['host'],
+ params['tls_termination'],
+ params['service_name'],
+ params['wildcard_policy'],
+ params['weight'])
+
+ oc_route = OCRoute(rconfig, verbose=params['debug'])
+
+ state = params['state']
+
+ api_rval = oc_route.get()
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False,
+ 'results': api_rval['results'],
+ 'state': 'list'}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if oc_route.exists():
+
+ if check_mode:
+ return {'changed': False, 'msg': 'CHECK_MODE: Would have performed a delete.'} # noqa: E501
+
+ api_rval = oc_route.delete()
+
+ return {'changed': True, 'results': api_rval, 'state': "absent"} # noqa: E501
+ return {'changed': False, 'state': 'absent'}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not oc_route.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'} # noqa: E501
+
+ # Create it here
+ api_rval = oc_route.create()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
+
+ # return the created object
+ api_rval = oc_route.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
+
+ return {'changed': True, 'results': api_rval, 'state': "present"} # noqa: E501
+
+ ########
+ # Update
+ ########
+ if oc_route.needs_update():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'} # noqa: E501
+
+ api_rval = oc_route.update()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
+
+ # return the created object
+ api_rval = oc_route.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
+
+ return {'changed': True, 'results': api_rval, 'state': "present"} # noqa: E501
+
+ return {'changed': False, 'results': api_rval, 'state': "present"}
+
+ # catch all
+ return {'failed': True, 'msg': "Unknown State passed"}
diff --git a/roles/lib_openshift/src/class/oc_version.py b/roles/lib_openshift/src/class/oc_version.py
new file mode 100644
index 000000000..7f8c721d8
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_version.py
@@ -0,0 +1,47 @@
+# flake8: noqa
+# pylint: skip-file
+
+
+# pylint: disable=too-many-instance-attributes
+class OCVersion(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+ # pylint allows 5
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ config,
+ debug):
+ ''' Constructor for OCVersion '''
+ super(OCVersion, self).__init__(None, config)
+ self.debug = debug
+
+ def get(self):
+ '''get and return version information '''
+
+ results = {}
+
+ version_results = self._version()
+
+ if version_results['returncode'] == 0:
+ filtered_vers = Utils.filter_versions(version_results['results'])
+ custom_vers = Utils.add_custom_versions(filtered_vers)
+
+ results['returncode'] = version_results['returncode']
+ results.update(filtered_vers)
+ results.update(custom_vers)
+
+ return results
+
+ raise OpenShiftCLIError('Problem detecting openshift version.')
+
+ @staticmethod
+ def run_ansible(params):
+ '''run the idempotent ansible code'''
+ oc_version = OCVersion(params['kubeconfig'], params['debug'])
+
+ if params['state'] == 'list':
+
+ #pylint: disable=protected-access
+ result = oc_version.get()
+ return {'state': params['state'],
+ 'results': result,
+ 'changed': False}
diff --git a/roles/lib_openshift/src/doc/edit b/roles/lib_openshift/src/doc/edit
new file mode 100644
index 000000000..212d88f65
--- /dev/null
+++ b/roles/lib_openshift/src/doc/edit
@@ -0,0 +1,116 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_edit
+short_description: Modify, and idempotently manage openshift objects.
+description:
+ - Modify openshift objects programmatically.
+options:
+ state:
+ description:
+ - Currently present is only supported state.
+ required: true
+ default: present
+ choices: ["present"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ namespace:
+ description:
+ - The namespace where the object lives.
+ required: false
+ default: str
+ aliases: []
+ kind:
+ description:
+ - The kind attribute of the object.
+ required: True
+ default: None
+ choices:
+ - bc
+ - buildconfig
+ - configmaps
+ - dc
+ - deploymentconfig
+ - imagestream
+ - imagestreamtag
+ - is
+ - istag
+ - namespace
+ - project
+ - projects
+ - node
+ - ns
+ - persistentvolume
+ - pv
+ - rc
+ - replicationcontroller
+ - routes
+ - scc
+ - secret
+ - securitycontextconstraints
+ - service
+ - svc
+ aliases: []
+ file_name:
+ description:
+ - The file name in which to edit
+ required: false
+ default: None
+ aliases: []
+ file_format:
+ description:
+ - The format of the file being edited.
+ required: false
+ default: yaml
+ aliases: []
+ content:
+ description:
+ - Content of the file
+ required: false
+ default: None
+ aliases: []
+ force:
+ description:
+ - Whether or not to force the operation
+ required: false
+ default: None
+ aliases: []
+ separator:
+ description:
+ - The separator format for the edit.
+ required: false
+ default: '.'
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+oc_edit:
+ kind: rc
+ name: hawkular-cassandra-rc
+ namespace: openshift-infra
+ content:
+ spec.template.spec.containers[0].resources.limits.memory: 512
+ spec.template.spec.containers[0].resources.requests.memory: 256
+'''
diff --git a/roles/lib_openshift/src/doc/generated b/roles/lib_openshift/src/doc/generated
new file mode 100644
index 000000000..b55d18cff
--- /dev/null
+++ b/roles/lib_openshift/src/doc/generated
@@ -0,0 +1,10 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
diff --git a/roles/lib_openshift/src/doc/license b/roles/lib_openshift/src/doc/license
new file mode 100644
index 000000000..717bb7f17
--- /dev/null
+++ b/roles/lib_openshift/src/doc/license
@@ -0,0 +1,16 @@
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
diff --git a/roles/lib_openshift/src/doc/obj b/roles/lib_openshift/src/doc/obj
new file mode 100644
index 000000000..e44843eb3
--- /dev/null
+++ b/roles/lib_openshift/src/doc/obj
@@ -0,0 +1,95 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_obj
+short_description: Generic interface to openshift objects
+description:
+ - Manage openshift objects programmatically.
+options:
+ state:
+ description:
+ - Currently present is only supported state.
+ required: true
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ namespace:
+ description:
+ - The namespace where the object lives.
+ required: false
+ default: str
+ aliases: []
+ all_namespace:
+ description:
+ - The namespace where the object lives.
+ required: false
+ default: false
+ aliases: []
+ kind:
+ description:
+ - The kind attribute of the object. e.g. dc, bc, svc, route
+ required: True
+ default: None
+ aliases: []
+ files:
+ description:
+ - A list of files provided for object
+ required: false
+ default: None
+ aliases: []
+ delete_after:
+ description:
+ - Whether or not to delete the files after processing them.
+ required: false
+ default: false
+ aliases: []
+ content:
+ description:
+ - Content of the object being managed.
+ required: false
+ default: None
+ aliases: []
+ force:
+ description:
+ - Whether or not to force the operation
+ required: false
+ default: None
+ aliases: []
+ selector:
+ description:
+ - Selector that gets added to the query.
+ required: false
+ default: None
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+oc_obj:
+ kind: dc
+ name: router
+ namespace: default
+register: router_output
+'''
diff --git a/roles/lib_openshift/src/doc/route b/roles/lib_openshift/src/doc/route
new file mode 100644
index 000000000..1797d4d33
--- /dev/null
+++ b/roles/lib_openshift/src/doc/route
@@ -0,0 +1,120 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_route
+short_description: Create, modify, and idempotently manage openshift routes.
+description:
+ - Manage openshift route objects programmatically.
+options:
+ state:
+ description:
+ - State represents whether to create, modify, delete, or list
+ required: true
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ namespace:
+ description:
+ - The namespace where the object lives.
+ required: false
+ default: str
+ aliases: []
+ tls_termination:
+ description:
+ - The options for termination. e.g. reencrypt
+ required: false
+ default: None
+ aliases: []
+ dest_cacert_path:
+ description:
+ - The path to the dest_cacert
+ required: false
+ default: None
+ aliases: []
+ cacert_path:
+ description:
+ - The path to the cacert
+ required: false
+ default: None
+ aliases: []
+ cert_path:
+ description:
+ - The path to the cert
+ required: false
+ default: None
+ aliases: []
+ key_path:
+ description:
+ - The path to the key
+ required: false
+ default: None
+ aliases: []
+ dest_cacert_content:
+ description:
+ - The dest_cacert content
+ required: false
+ default: None
+ aliases: []
+ cacert_content:
+ description:
+ - The cacert content
+ required: false
+ default: None
+ aliases: []
+ cert_content:
+ description:
+ - The cert content
+ required: false
+ default: None
+ aliases: []
+ service_name:
+ description:
+ - The name of the service that this route points to.
+ required: false
+ default: None
+ aliases: []
+ host:
+ description:
+ - The host that the route will use. e.g. myapp.x.y.z
+ required: false
+ default: None
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: Configure certificates for reencrypt route
+ oc_route:
+ name: myapproute
+ namespace: awesomeapp
+ cert_path: "/etc/origin/master/named_certificates/myapp_cert
+ key_path: "/etc/origin/master/named_certificates/myapp_key
+ cacert_path: "/etc/origin/master/named_certificates/myapp_cacert
+ dest_cacert_content: "{{ dest_cacert_content }}"
+ service_name: myapp_php
+ host: myapp.awesomeapp.openshift.com
+ tls_termination: reencrypt
+ run_once: true
+'''
diff --git a/roles/lib_openshift/src/doc/version b/roles/lib_openshift/src/doc/version
new file mode 100644
index 000000000..c0fdd53e7
--- /dev/null
+++ b/roles/lib_openshift/src/doc/version
@@ -0,0 +1,40 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_version
+short_description: Return the current openshift version
+description:
+ - Return the openshift installed version. `oc version`
+options:
+ state:
+ description:
+ - Currently list is only supported state.
+ required: true
+ default: list
+ choices: ["list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+oc_version:
+- name: get oc version
+ oc_version:
+ register: oc_version
+'''
diff --git a/roles/lib_openshift/src/generate.py b/roles/lib_openshift/src/generate.py
new file mode 100755
index 000000000..6daade108
--- /dev/null
+++ b/roles/lib_openshift/src/generate.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python
+'''
+ Generate the openshift-ansible/roles/lib_openshift_cli/library/ modules.
+'''
+
+import argparse
+import os
+import yaml
+import six
+
+OPENSHIFT_ANSIBLE_PATH = os.path.dirname(os.path.realpath(__file__))
+OPENSHIFT_ANSIBLE_SOURCES_PATH = os.path.join(OPENSHIFT_ANSIBLE_PATH, 'sources.yml') # noqa: E501
+LIBRARY = os.path.join(OPENSHIFT_ANSIBLE_PATH, '..', 'library/')
+
+
+class GenerateAnsibleException(Exception):
+ '''General Exception for generate function'''
+ pass
+
+
+def parse_args():
+ '''parse arguments to generate'''
+ parser = argparse.ArgumentParser(description="Generate ansible modules.")
+ parser.add_argument('--verify', action='store_true', default=False,
+ help='Verify library code matches the generated code.')
+
+ return parser.parse_args()
+
+
+def generate(parts):
+ '''generate the source code for the ansible modules'''
+
+ data = six.StringIO()
+ for fpart in parts:
+ # first line is pylint disable so skip it
+ with open(os.path.join(OPENSHIFT_ANSIBLE_PATH, fpart)) as pfd:
+ for idx, line in enumerate(pfd):
+ if idx in [0, 1] and 'flake8: noqa' in line or 'pylint: skip-file' in line: # noqa: E501
+ continue
+
+ data.write(line)
+
+ return data
+
+
+def get_sources():
+ '''return the path to the generate sources'''
+ return yaml.load(open(OPENSHIFT_ANSIBLE_SOURCES_PATH).read())
+
+
+def verify():
+ '''verify if the generated code matches the library code'''
+ for fname, parts in get_sources().items():
+ data = generate(parts)
+ fname = os.path.join(LIBRARY, fname)
+ if not open(fname).read() == data.getvalue():
+ raise GenerateAnsibleException('Generated content does not match for %s' % fname)
+
+
+def main():
+ ''' combine the necessary files to create the ansible module '''
+ args = parse_args()
+ if args.verify:
+ verify()
+
+ for fname, parts in get_sources().items():
+ data = generate(parts)
+ fname = os.path.join(LIBRARY, fname)
+ with open(fname, 'w') as afd:
+ afd.seek(0)
+ afd.write(data.getvalue())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py
new file mode 100644
index 000000000..db5f4e890
--- /dev/null
+++ b/roles/lib_openshift/src/lib/base.py
@@ -0,0 +1,522 @@
+# pylint: skip-file
+# flake8: noqa
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = kubeconfig
+ self.all_namespaces = all_namespaces
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, rname, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource, rname]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = '/tmp/%s' % template_name
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, rname=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.extend(['-o', 'json'])
+
+ if rname:
+ cmd.append(rname)
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.append('--schedulable=%s' % schedulable)
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ if grace_period:
+ cmd.append('--grace-period=%s' % int(grace_period))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = []
+ if oadm:
+ cmds = ['/usr/bin/oadm']
+ else:
+ cmds = ['/usr/bin/oc']
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace:
+ cmds.extend(['-n', self.namespace])
+
+ cmds.extend(cmd)
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env={'KUBECONFIG': self.kubeconfig})
+
+ stdout, stderr = proc.communicate(input_data)
+ rval = {"returncode": proc.returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if proc.returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.args:
+ err = err.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+ @staticmethod
+ def create_file(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+ path = os.path.join('/tmp', rname)
+ with open(path, 'w') as fds:
+ if ftype == 'yaml':
+ fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
+
+ elif ftype == 'json':
+ fds.write(json.dumps(data))
+ else:
+ fds.write(data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [path])
+ return path
+
+ @staticmethod
+ def create_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_file(item['path'], item['data'], ftype=content_type)
+ files.append({'name': os.path.basename(path), 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(value)
+ print(user_def[key])
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(api_values)
+ print(user_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key, data in self.config_options.items():
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+
+ return rval
+
diff --git a/roles/lib_openshift/src/lib/import.py b/roles/lib_openshift/src/lib/import.py
new file mode 100644
index 000000000..c2b30e019
--- /dev/null
+++ b/roles/lib_openshift/src/lib/import.py
@@ -0,0 +1,17 @@
+# pylint: skip-file
+# flake8: noqa
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import json
+import os
+import re
+import shutil
+import subprocess
+# pylint: disable=import-error
+import ruamel.yaml as yaml
+from ansible.module_utils.basic import AnsibleModule
diff --git a/roles/lib_openshift/src/lib/route.py b/roles/lib_openshift/src/lib/route.py
new file mode 100644
index 000000000..3130e7358
--- /dev/null
+++ b/roles/lib_openshift/src/lib/route.py
@@ -0,0 +1,123 @@
+# pylint: skip-file
+# flake8: noqa
+# noqa: E302,E301
+
+
+# pylint: disable=too-many-instance-attributes
+class RouteConfig(object):
+ ''' Handle route options '''
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ sname,
+ namespace,
+ kubeconfig,
+ destcacert=None,
+ cacert=None,
+ cert=None,
+ key=None,
+ host=None,
+ tls_termination=None,
+ service_name=None,
+ wildcard_policy=None,
+ weight=None):
+ ''' constructor for handling route options '''
+ self.kubeconfig = kubeconfig
+ self.name = sname
+ self.namespace = namespace
+ self.host = host
+ self.tls_termination = tls_termination
+ self.destcacert = destcacert
+ self.cacert = cacert
+ self.cert = cert
+ self.key = key
+ self.service_name = service_name
+ self.data = {}
+ self.wildcard_policy = wildcard_policy
+ if wildcard_policy is None:
+ self.wildcard_policy = 'None'
+ self.weight = weight
+ if weight is None:
+ self.weight = 100
+
+ self.create_dict()
+
+ def create_dict(self):
+ ''' return a service as a dict '''
+ self.data['apiVersion'] = 'v1'
+ self.data['kind'] = 'Route'
+ self.data['metadata'] = {}
+ self.data['metadata']['name'] = self.name
+ self.data['metadata']['namespace'] = self.namespace
+ self.data['spec'] = {}
+
+ self.data['spec']['host'] = self.host
+
+ if self.tls_termination:
+ self.data['spec']['tls'] = {}
+
+ if self.tls_termination == 'reencrypt':
+ self.data['spec']['tls']['destinationCACertificate'] = self.destcacert
+ self.data['spec']['tls']['key'] = self.key
+ self.data['spec']['tls']['caCertificate'] = self.cacert
+ self.data['spec']['tls']['certificate'] = self.cert
+ self.data['spec']['tls']['termination'] = self.tls_termination
+
+ self.data['spec']['to'] = {'kind': 'Service',
+ 'name': self.service_name,
+ 'weight': self.weight}
+
+ self.data['spec']['wildcardPolicy'] = self.wildcard_policy
+
+# pylint: disable=too-many-instance-attributes,too-many-public-methods
+class Route(Yedit):
+ ''' Class to wrap the oc command line tools '''
+ wildcard_policy = "spec.wildcardPolicy"
+ host_path = "spec.host"
+ service_path = "spec.to.name"
+ weight_path = "spec.to.weight"
+ cert_path = "spec.tls.certificate"
+ cacert_path = "spec.tls.caCertificate"
+ destcacert_path = "spec.tls.destinationCACertificate"
+ termination_path = "spec.tls.termination"
+ key_path = "spec.tls.key"
+ kind = 'route'
+
+ def __init__(self, content):
+ '''Route constructor'''
+ super(Route, self).__init__(content=content)
+
+ def get_destcacert(self):
+ ''' return cert '''
+ return self.get(Route.destcacert_path)
+
+ def get_cert(self):
+ ''' return cert '''
+ return self.get(Route.cert_path)
+
+ def get_key(self):
+ ''' return key '''
+ return self.get(Route.key_path)
+
+ def get_cacert(self):
+ ''' return cacert '''
+ return self.get(Route.cacert_path)
+
+ def get_service(self):
+ ''' return service name '''
+ return self.get(Route.service_path)
+
+ def get_weight(self):
+ ''' return service weight '''
+ return self.get(Route.weight_path)
+
+ def get_termination(self):
+ ''' return tls termination'''
+ return self.get(Route.termination_path)
+
+ def get_host(self):
+ ''' return host '''
+ return self.get(Route.host_path)
+
+ def get_wildcard_policy(self):
+ ''' return wildcardPolicy '''
+ return self.get(Route.wildcard_policy)
diff --git a/roles/lib_openshift/src/sources.yml b/roles/lib_openshift/src/sources.yml
new file mode 100644
index 000000000..f1fd558d3
--- /dev/null
+++ b/roles/lib_openshift/src/sources.yml
@@ -0,0 +1,38 @@
+---
+oc_edit.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/edit
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- class/oc_edit.py
+- ansible/oc_edit.py
+oc_obj.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/obj
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- class/oc_obj.py
+- ansible/oc_obj.py
+oc_route.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/route
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- lib/route.py
+- class/oc_route.py
+- ansible/oc_route.py
+oc_version.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/version
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- class/oc_version.py
+- ansible/oc_version.py
diff --git a/roles/lib_openshift/src/test/integration/oc_route.yml b/roles/lib_openshift/src/test/integration/oc_route.yml
new file mode 100755
index 000000000..620d5d5e7
--- /dev/null
+++ b/roles/lib_openshift/src/test/integration/oc_route.yml
@@ -0,0 +1,77 @@
+#!/usr/bin/ansible-playbook --module-path=../../../library/
+# ./oc_route.yml -M ../../../library -e "cli_master_test=$OPENSHIFT_MASTER
+---
+- hosts: "{{ cli_master_test }}"
+ gather_facts: no
+ user: root
+ tasks:
+ - name: create route
+ oc_route:
+ name: test
+ namespace: default
+ tls_termination: edge
+ cert_content: testing cert
+ cacert_content: testing cacert
+ key_content: key content
+ service_name: test
+ host: test.example
+ register: routeout
+ - debug: var=routeout
+
+ - assert:
+ that: "routeout.results.results[0]['metadata']['name'] == 'test'"
+ msg: route create failed
+
+ - name: get route
+ oc_route:
+ state: list
+ name: test
+ namespace: default
+ register: routeout
+ - debug: var=routeout
+
+ - assert:
+ that: "routeout.results[0]['metadata']['name'] == 'test'"
+ msg: get route failed
+
+ - name: delete route
+ oc_route:
+ state: absent
+ name: test
+ namespace: default
+ register: routeout
+ - debug: var=routeout
+
+ - assert:
+ that: "routeout.results.returncode == 0"
+ msg: delete route failed
+
+ - name: create route
+ oc_route:
+ name: test
+ namespace: default
+ tls_termination: edge
+ cert_content: testing cert
+ cacert_content: testing cacert
+ key_content: testing key
+ service_name: test
+ host: test.example
+ register: routeout
+ - debug: var=routeout
+
+ - name: create route noop
+ oc_route:
+ name: test
+ namespace: default
+ tls_termination: edge
+ cert_content: testing cert
+ cacert_content: testing cacert
+ key_content: testing key
+ service_name: test
+ host: test.example
+ register: routeout
+ - debug: var=routeout
+
+ - assert:
+ that: "routeout.changed == False"
+ msg: Route create not idempotent
diff --git a/roles/lib_openshift/src/test/integration/oc_version.yml b/roles/lib_openshift/src/test/integration/oc_version.yml
new file mode 100755
index 000000000..52336d8da
--- /dev/null
+++ b/roles/lib_openshift/src/test/integration/oc_version.yml
@@ -0,0 +1,17 @@
+#!/usr/bin/ansible-playbook --module-path=../../../library/
+# ./oc_version.yml -e "cli_master_test=$OPENSHIFT_MASTER
+---
+- hosts: "{{ cli_master_test }}"
+ gather_facts: no
+ user: root
+ tasks:
+ - name: Get openshift version
+ oc_version:
+ register: versionout
+
+ - debug: var=versionout
+
+ - assert:
+ that:
+ - "'oc_numeric' in versionout.results.keys()"
+ msg: "Did not find 'oc_numeric' in version results."
diff --git a/roles/lib_openshift/src/test/unit/oc_version.py b/roles/lib_openshift/src/test/unit/oc_version.py
new file mode 100755
index 000000000..8d9128187
--- /dev/null
+++ b/roles/lib_openshift/src/test/unit/oc_version.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python2
+'''
+ Unit tests for oc version
+'''
+# To run
+# python -m unittest version
+#
+# .
+# Ran 1 test in 0.597s
+#
+# OK
+
+import os
+import sys
+import unittest
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name,no-name-in-module
+# Disable import-error b/c our libraries aren't loaded in jenkins
+# pylint: disable=import-error,wrong-import-position
+# place class in our python path
+module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
+sys.path.insert(0, module_path)
+from oc_version import OCVersion # noqa: E402
+
+
+# pylint: disable=unused-argument
+def oc_cmd_mock(cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''mock command for openshift_cmd'''
+ version = '''oc v3.4.0.39
+kubernetes v1.4.0+776c994
+features: Basic-Auth GSSAPI Kerberos SPNEGO
+
+Server https://internal.api.opstest.openshift.com
+openshift v3.4.0.39
+kubernetes v1.4.0+776c994
+'''
+ if 'version' in cmd:
+ return {'stderr': None,
+ 'stdout': version,
+ 'returncode': 0,
+ 'results': version,
+ 'cmd': cmd}
+
+
+class OCVersionTest(unittest.TestCase):
+ '''
+ Test class for OCVersion
+ '''
+
+ def setUp(self):
+ ''' setup method will create a file and set to known configuration '''
+ self.oc_ver = OCVersion(None, False)
+ self.oc_ver.openshift_cmd = oc_cmd_mock
+
+ def test_get(self):
+ ''' Testing a get '''
+ results = self.oc_ver.get()
+ self.assertEqual(results['oc_short'], '3.4')
+ self.assertEqual(results['oc_numeric'], '3.4.0.39')
+ self.assertEqual(results['kubernetes_numeric'], '1.4.0')
+
+ def tearDown(self):
+ '''TearDown method'''
+ pass
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/roles/lib_utils/library/yedit.py b/roles/lib_utils/library/yedit.py
index fb545c7c8..6a5b40dcc 100644
--- a/roles/lib_utils/library/yedit.py
+++ b/roles/lib_utils/library/yedit.py
@@ -24,7 +24,6 @@
# limitations under the License.
#
-
# pylint: disable=wrong-import-order
import json
import os
@@ -135,6 +134,12 @@ options:
required: false
default: true
aliases: []
+ separator:
+ description:
+ - The separator being used when parsing strings.
+ required: false
+ default: '.'
+ aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
@@ -163,6 +168,7 @@ EXAMPLES = '''
# b:
# c: d
'''
+# noqa: E301,E302
class YeditException(Exception):
@@ -170,6 +176,7 @@ class YeditException(Exception):
pass
+# pylint: disable=too-many-public-methods
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
@@ -590,6 +597,48 @@ class Yedit(object):
return (False, self.yaml_dict)
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(module):
@@ -610,8 +659,8 @@ class Yedit(object):
if module.params['state'] == 'list':
if module.params['content']:
- content = parse_value(module.params['content'],
- module.params['content_type'])
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
yamlfile.yaml_dict = content
if module.params['key']:
@@ -621,8 +670,8 @@ class Yedit(object):
elif module.params['state'] == 'absent':
if module.params['content']:
- content = parse_value(module.params['content'],
- module.params['content_type'])
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
yamlfile.yaml_dict = content
if module.params['update']:
@@ -639,8 +688,8 @@ class Yedit(object):
elif module.params['state'] == 'present':
# check if content is different than what is in the file
if module.params['content']:
- content = parse_value(module.params['content'],
- module.params['content_type'])
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
@@ -653,12 +702,13 @@ class Yedit(object):
# we were passed a value; parse it
if module.params['value']:
- value = parse_value(module.params['value'],
- module.params['value_type'])
+ value = Yedit.parse_value(module.params['value'],
+ module.params['value_type'])
key = module.params['key']
if module.params['update']:
# pylint: disable=line-too-long
- curr_value = get_curr_value(parse_value(module.params['curr_value']), module.params['curr_value_format']) # noqa: #501
+ curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
+ module.params['curr_value_format']) # noqa: E501
rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
@@ -684,48 +734,6 @@ class Yedit(object):
return {'failed': True, 'msg': 'Unkown state passed'}
-def get_curr_value(invalue, val_type):
- '''return the current value'''
- if invalue is None:
- return None
-
- curr_value = invalue
- if val_type == 'yaml':
- curr_value = yaml.load(invalue)
- elif val_type == 'json':
- curr_value = json.loads(invalue)
-
- return curr_value
-
-
-def parse_value(inc_value, vtype=''):
- '''determine value type passed'''
- true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
- 'on', 'On', 'ON', ]
- false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
- 'off', 'Off', 'OFF']
-
- # It came in as a string but you didn't specify value_type as string
- # we will convert to bool if it matches any of the above cases
- if isinstance(inc_value, str) and 'bool' in vtype:
- if inc_value not in true_bools and inc_value not in false_bools:
- raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
- % (inc_value, vtype))
- elif isinstance(inc_value, bool) and 'str' in vtype:
- inc_value = str(inc_value)
-
- # If vtype is not str then go ahead and attempt to yaml load it.
- if isinstance(inc_value, str) and 'str' not in vtype:
- try:
- inc_value = yaml.load(inc_value)
- except Exception:
- raise YeditException('Could not determine type of incoming ' +
- 'value. value=[%s] vtype=[%s]'
- % (type(inc_value), vtype))
-
- return inc_value
-
-
# pylint: disable=too-many-branches
def main():
''' ansible oc module for secrets '''
@@ -757,7 +765,7 @@ def main():
rval = Yedit.run_ansible(module)
if 'failed' in rval and rval['failed']:
- module.fail_json(msg=rval['msg'])
+ module.fail_json(**rval)
module.exit_json(**rval)
diff --git a/roles/lib_utils/src/ansible/yedit.py b/roles/lib_utils/src/ansible/yedit.py
index a80cd520c..8a1a7c2dc 100644
--- a/roles/lib_utils/src/ansible/yedit.py
+++ b/roles/lib_utils/src/ansible/yedit.py
@@ -2,48 +2,6 @@
# pylint: skip-file
-def get_curr_value(invalue, val_type):
- '''return the current value'''
- if invalue is None:
- return None
-
- curr_value = invalue
- if val_type == 'yaml':
- curr_value = yaml.load(invalue)
- elif val_type == 'json':
- curr_value = json.loads(invalue)
-
- return curr_value
-
-
-def parse_value(inc_value, vtype=''):
- '''determine value type passed'''
- true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
- 'on', 'On', 'ON', ]
- false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
- 'off', 'Off', 'OFF']
-
- # It came in as a string but you didn't specify value_type as string
- # we will convert to bool if it matches any of the above cases
- if isinstance(inc_value, str) and 'bool' in vtype:
- if inc_value not in true_bools and inc_value not in false_bools:
- raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
- % (inc_value, vtype))
- elif isinstance(inc_value, bool) and 'str' in vtype:
- inc_value = str(inc_value)
-
- # If vtype is not str then go ahead and attempt to yaml load it.
- if isinstance(inc_value, str) and 'str' not in vtype:
- try:
- inc_value = yaml.load(inc_value)
- except Exception:
- raise YeditException('Could not determine type of incoming ' +
- 'value. value=[%s] vtype=[%s]'
- % (type(inc_value), vtype))
-
- return inc_value
-
-
# pylint: disable=too-many-branches
def main():
''' ansible oc module for secrets '''
@@ -75,7 +33,7 @@ def main():
rval = Yedit.run_ansible(module)
if 'failed' in rval and rval['failed']:
- module.fail_json(msg=rval['msg'])
+ module.fail_json(**rval)
module.exit_json(**rval)
diff --git a/roles/lib_utils/src/class/yedit.py b/roles/lib_utils/src/class/yedit.py
index e110bc11e..b1644f9b2 100644
--- a/roles/lib_utils/src/class/yedit.py
+++ b/roles/lib_utils/src/class/yedit.py
@@ -1,11 +1,14 @@
# flake8: noqa
# pylint: skip-file
+# noqa: E301,E302
+
class YeditException(Exception):
''' Exception class for Yedit '''
pass
+# pylint: disable=too-many-public-methods
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
@@ -426,6 +429,48 @@ class Yedit(object):
return (False, self.yaml_dict)
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(module):
@@ -446,8 +491,8 @@ class Yedit(object):
if module.params['state'] == 'list':
if module.params['content']:
- content = parse_value(module.params['content'],
- module.params['content_type'])
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
yamlfile.yaml_dict = content
if module.params['key']:
@@ -457,8 +502,8 @@ class Yedit(object):
elif module.params['state'] == 'absent':
if module.params['content']:
- content = parse_value(module.params['content'],
- module.params['content_type'])
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
yamlfile.yaml_dict = content
if module.params['update']:
@@ -475,8 +520,8 @@ class Yedit(object):
elif module.params['state'] == 'present':
# check if content is different than what is in the file
if module.params['content']:
- content = parse_value(module.params['content'],
- module.params['content_type'])
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
@@ -489,12 +534,13 @@ class Yedit(object):
# we were passed a value; parse it
if module.params['value']:
- value = parse_value(module.params['value'],
- module.params['value_type'])
+ value = Yedit.parse_value(module.params['value'],
+ module.params['value_type'])
key = module.params['key']
if module.params['update']:
# pylint: disable=line-too-long
- curr_value = get_curr_value(parse_value(module.params['curr_value']), module.params['curr_value_format']) # noqa: #501
+ curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
+ module.params['curr_value_format']) # noqa: E501
rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
diff --git a/roles/lib_utils/src/doc/generated b/roles/lib_utils/src/doc/generated
new file mode 100644
index 000000000..054780313
--- /dev/null
+++ b/roles/lib_utils/src/doc/generated
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
diff --git a/roles/lib_utils/src/doc/yedit b/roles/lib_utils/src/doc/yedit
index e367a389e..16b44943e 100644
--- a/roles/lib_utils/src/doc/yedit
+++ b/roles/lib_utils/src/doc/yedit
@@ -102,6 +102,12 @@ options:
required: false
default: true
aliases: []
+ separator:
+ description:
+ - The separator being used when parsing strings.
+ required: false
+ default: '.'
+ aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
diff --git a/roles/lib_utils/src/generate.py b/roles/lib_utils/src/generate.py
index f4b46aa91..6daade108 100755
--- a/roles/lib_utils/src/generate.py
+++ b/roles/lib_utils/src/generate.py
@@ -3,42 +3,72 @@
Generate the openshift-ansible/roles/lib_openshift_cli/library/ modules.
'''
+import argparse
import os
import yaml
-
-# pylint: disable=anomalous-backslash-in-string
-GEN_STR = "#!/usr/bin/env python\n" + \
- "# pylint: disable=missing-docstring\n" + \
- "# ___ ___ _ _ ___ ___ _ _____ ___ ___\n" + \
- "# / __| __| \| | __| _ \ /_\_ _| __| \\\n" + \
- "# | (_ | _|| .` | _|| / / _ \| | | _|| |) |\n" + \
- "# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____\n" + \
- "# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|\n" + \
- "# | |) | (_) | | .` | (_) || | | _|| |) | | | |\n" + \
- "# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|\n"
+import six
OPENSHIFT_ANSIBLE_PATH = os.path.dirname(os.path.realpath(__file__))
-OPENSHIFT_ANSIBLE_SOURCES_PATH = os.path.join(OPENSHIFT_ANSIBLE_PATH, 'generate_sources.yml') # noqa: E501
+OPENSHIFT_ANSIBLE_SOURCES_PATH = os.path.join(OPENSHIFT_ANSIBLE_PATH, 'sources.yml') # noqa: E501
+LIBRARY = os.path.join(OPENSHIFT_ANSIBLE_PATH, '..', 'library/')
+
+
+class GenerateAnsibleException(Exception):
+ '''General Exception for generate function'''
+ pass
+
+
+def parse_args():
+ '''parse arguments to generate'''
+ parser = argparse.ArgumentParser(description="Generate ansible modules.")
+ parser.add_argument('--verify', action='store_true', default=False,
+ help='Verify library code matches the generated code.')
+
+ return parser.parse_args()
+
+
+def generate(parts):
+ '''generate the source code for the ansible modules'''
+
+ data = six.StringIO()
+ for fpart in parts:
+ # first line is pylint disable so skip it
+ with open(os.path.join(OPENSHIFT_ANSIBLE_PATH, fpart)) as pfd:
+ for idx, line in enumerate(pfd):
+ if idx in [0, 1] and 'flake8: noqa' in line or 'pylint: skip-file' in line: # noqa: E501
+ continue
+
+ data.write(line)
+
+ return data
+
+
+def get_sources():
+ '''return the path to the generate sources'''
+ return yaml.load(open(OPENSHIFT_ANSIBLE_SOURCES_PATH).read())
+
+
+def verify():
+ '''verify if the generated code matches the library code'''
+ for fname, parts in get_sources().items():
+ data = generate(parts)
+ fname = os.path.join(LIBRARY, fname)
+ if not open(fname).read() == data.getvalue():
+ raise GenerateAnsibleException('Generated content does not match for %s' % fname)
def main():
''' combine the necessary files to create the ansible module '''
+ args = parse_args()
+ if args.verify:
+ verify()
- library = os.path.join(OPENSHIFT_ANSIBLE_PATH, '..', 'library/')
- sources = yaml.load(open(OPENSHIFT_ANSIBLE_SOURCES_PATH).read())
- for fname, parts in sources.items():
- with open(os.path.join(library, fname), 'w') as afd:
+ for fname, parts in get_sources().items():
+ data = generate(parts)
+ fname = os.path.join(LIBRARY, fname)
+ with open(fname, 'w') as afd:
afd.seek(0)
- afd.write(GEN_STR)
- for fpart in parts:
- with open(os.path.join(OPENSHIFT_ANSIBLE_PATH, fpart)) as pfd:
- # first line is pylint disable so skip it
- for idx, line in enumerate(pfd):
- if idx in [0, 1] and 'flake8: noqa' in line \
- or 'pylint: skip-file' in line:
- continue
-
- afd.write(line)
+ afd.write(data.getvalue())
if __name__ == '__main__':
diff --git a/roles/lib_utils/src/generate_sources.yml b/roles/lib_utils/src/sources.yml
index 83b21de1b..9cf3a0981 100644
--- a/roles/lib_utils/src/generate_sources.yml
+++ b/roles/lib_utils/src/sources.yml
@@ -1,5 +1,6 @@
---
yedit.py:
+- doc/generated
- doc/license
- class/import.py
- doc/yedit
diff --git a/roles/lib_utils/src/test/integration/kube-manager-test.yaml.orig b/roles/lib_utils/src/test/integration/kube-manager-test.yaml.orig
new file mode 100644
index 000000000..5541c3dae
--- /dev/null
+++ b/roles/lib_utils/src/test/integration/kube-manager-test.yaml.orig
@@ -0,0 +1,52 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: kube-controller-manager
+ namespace: kube-system
+spec:
+ hostNetwork: true
+ containers:
+ - name: kube-controller-manager
+ image: openshift/kube:v1.0.0
+ command:
+ - /hyperkube
+ - controller-manager
+ - --master=http://127.0.0.1:8080
+ - --leader-elect=true
+ - --service-account-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem
+ - --root-ca-file=/etc/k8s/ssl/my.pem
+ - --my-new-parameter=openshift
+ livenessProbe:
+ httpGet:
+ host: 127.0.0.1
+ path: /healthz
+ port: 10252
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
+ volumeMounts:
+ - mountPath: /etc/kubernetes/ssl
+ name: ssl-certs-kubernetes
+ readOnly: true
+ - mountPath: /etc/ssl/certs
+ name: ssl-certs-host
+ readOnly: 'true'
+ volumes:
+ - hostPath:
+ path: /etc/kubernetes/ssl
+ name: ssl-certs-kubernetes
+ - hostPath:
+ path: /usr/share/ca-certificates
+ name: ssl-certs-host
+yedittest: yedittest
+metadata-namespace: openshift-is-awesome
+nonexistingkey:
+- --my-new-parameter=openshift
+a:
+ b:
+ c: d
+e:
+ f:
+ g:
+ h:
+ i:
+ j: k
diff --git a/roles/openshift_builddefaults/vars/main.yml b/roles/openshift_builddefaults/vars/main.yml
index c9ec3b82f..fe6069ea9 100644
--- a/roles/openshift_builddefaults/vars/main.yml
+++ b/roles/openshift_builddefaults/vars/main.yml
@@ -23,7 +23,6 @@ builddefaults_yaml:
imageLabels: "{{ openshift_builddefaults_image_labels | default(None) }}"
nodeSelector: "{{ openshift_builddefaults_nodeselectors | default(None) }}"
annotations: "{{ openshift_builddefaults_annotations | default(None) }}"
- #resources: "{{ openshift.builddefaults.resources | default(None) }}"
resources:
requests:
cpu: "{{ openshift_builddefaults_resources_requests_cpu | default(None) }}"
diff --git a/roles/openshift_buildoverrides/tasks/main.yml b/roles/openshift_buildoverrides/tasks/main.yml
index 82fce1c5b..87d0e6f21 100644
--- a/roles/openshift_buildoverrides/tasks/main.yml
+++ b/roles/openshift_buildoverrides/tasks/main.yml
@@ -1,13 +1,4 @@
---
-#- name: Set buildoverrides
-# openshift_facts:
-# role: buildoverrides
-# local_facts:
-# force_pull: "{{ openshift_buildoverrides_force_pull | default(None) }}"
-# image_labels: "{{ openshift_buildoverrides_image_labels | default(None) }}"
-# nodeselectors: "{{ openshift_buildoverrides_nodeselectors | default(None) }}"
-# annotations: "{{ openshift_buildoverrides_annotations | default(None) }}"
-
- name: Set buildoverrides config structure
openshift_facts:
role: buildoverrides
diff --git a/roles/openshift_buildoverrides/vars/main.yml b/roles/openshift_buildoverrides/vars/main.yml
index f0f9c255b..cf49a6ebf 100644
--- a/roles/openshift_buildoverrides/vars/main.yml
+++ b/roles/openshift_buildoverrides/vars/main.yml
@@ -1,10 +1,11 @@
---
+force_pull: "{{ openshift_buildoverrides_force_pull | default('') }}"
buildoverrides_yaml:
BuildOverrides:
configuration:
apiVersion: v1
kind: BuildOverridesConfig
- forcePull: "{{ openshift_buildoverrides_force_pull | default('', true) }}"
+ forcePull: "{{ '' if force_pull == '' else force_pull | bool }}"
imageLabels: "{{ openshift_buildoverrides_image_labels | default(None) }}"
nodeSelector: "{{ openshift_buildoverrides_nodeselectors | default(None) }}"
annotations: "{{ openshift_buildoverrides_annotations | default(None) }}"
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
index e2a12e5ff..e21397170 100644
--- a/roles/openshift_ca/tasks/main.yml
+++ b/roles/openshift_ca/tasks/main.yml
@@ -86,7 +86,7 @@
{% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
--certificate-authority {{ named_ca_certificate }}
{% endfor %}
- --hostnames={{ openshift_master_hostnames | join(',') }}
+ --hostnames={{ openshift.common.all_hostnames | join(',') }}
--master={{ openshift.master.api_url }}
--public-master={{ openshift.master.public_api_url }}
--cert-dir={{ openshift_ca_config_dir }}
diff --git a/roles/openshift_certificate_expiry/README.md b/roles/openshift_certificate_expiry/README.md
index a88470bdd..327cc004b 100644
--- a/roles/openshift_certificate_expiry/README.md
+++ b/roles/openshift_certificate_expiry/README.md
@@ -1,5 +1,4 @@
-OpenShift Certificate Expiration Checker
-========================================
+# OpenShift Certificate Expiration Checker
OpenShift certificate expiration checking. Be warned of certificates
expiring within a configurable window of days, and notified of
@@ -21,8 +20,7 @@ cluster. For best results run `ansible-playbook` with the `-v` option.
-Role Variables
---------------
+# Role Variables
Core variables in this role:
@@ -42,8 +40,64 @@ Optional report/result saving variables in this role:
| `openshift_certificate_expiry_json_results_path` | `/tmp/cert-expiry-report.json` | The full path to save the json report as |
-Example Playbook
-----------------
+# Using this Role
+
+How to use the Certificate Expiration Checking Role.
+
+> **NOTE:** In the examples shown below, ensure you change **HOSTS**
+> to the path of your inventory file.
+
+## Run with ansible-playbook
+
+Run one of the example playbooks using an inventory file
+representative of your existing cluster. Some example playbooks are
+included in this repo, or you can read on below after this example to
+craft you own.
+
+```
+$ ansible-playbook -v -i HOSTS ./roles/openshift_certificate_expiry/examples/playbooks/easy-mode.yaml
+```
+
+Using the `easy-mode.yaml` playbook will produce:
+
+* Reports including healthy and unhealthy hosts
+* A JSON report in `/tmp/`
+* A stylized HTML report in `/tmp/`
+
+
+## More Example Playbooks
+
+> **Note:** These Playbooks are available to run directly out of the
+> [examples/playbooks/](examples/playbooks/) directory.
+
+
+This example playbook is great if you're just wanting to **try the
+role out**. This playbook enables HTML and JSON reports. The warning
+window is set very large so you will almost always get results back.
+All certificates (healthy or not) are included in the results:
+
+```yaml
+---
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_warning_days: 1500
+ openshift_certificate_expiry_save_json_results: yes
+ openshift_certificate_expiry_generate_html_report: yes
+ openshift_certificate_expiry_show_all: yes
+ roles:
+ - role: openshift_certificate_expiry
+```
+
+```
+$ ansible-playbook -v -i HOSTS ./roles/openshift_certificate_expiry/examples/playbooks/easy-mode.yaml
+```
+
+> [View This Playbook](examples/playbooks/easy-mode.yaml)
+
+***
Default behavior:
@@ -57,6 +111,16 @@ Default behavior:
- role: openshift_certificate_expiry
```
+```
+$ ansible-playbook -v -i HOSTS ./roles/openshift_certificate_expiry/examples/playbooks/default.yaml
+```
+
+
+> [View This Playbook](examples/playbooks/default.yaml)
+
+***
+
+
Generate HTML and JSON artifacts in their default paths:
```yaml
@@ -72,6 +136,15 @@ Generate HTML and JSON artifacts in their default paths:
- role: openshift_certificate_expiry
```
+```
+$ ansible-playbook -v -i HOSTS ./roles/openshift_certificate_expiry/examples/playbooks/html_and_json_default_paths.yaml
+```
+
+
+> [View This Playbook](examples/playbooks/html_and_json_default_paths.yaml)
+
+***
+
Change the expiration warning window to 1500 days (good for testing
the module out):
@@ -87,6 +160,15 @@ the module out):
- role: openshift_certificate_expiry
```
+```
+$ ansible-playbook -v -i HOSTS ./roles/openshift_certificate_expiry/examples/playbooks/longer_warning_period.yaml
+```
+
+
+> [View This Playbook](examples/playbooks/longer_warning_period.yaml)
+
+***
+
Change the expiration warning window to 1500 days (good for testing
the module out) and save the results as a JSON file:
@@ -103,9 +185,31 @@ the module out) and save the results as a JSON file:
- role: openshift_certificate_expiry
```
+```
+$ ansible-playbook -v -i HOSTS ./roles/openshift_certificate_expiry/examples/playbooks/longer-warning-period-json-results.yaml
+```
+
+
+> [View This Playbook](examples/playbooks/longer-warning-period-json-results.yaml)
+
-JSON Output
------------
+
+# Output Formats
+
+As noted above there are two ways to format your check report. In
+`json` format for machine parsing, or as a stylized `html` page for
+easy skimming. These options are shown below.
+
+## HTML Report
+
+![HTML Expiration Report](examples/cert-expiry-report-html.png)
+
+For an example of the HTML report you can browse, save
+[examples/cert-expiry-report.html](examples/cert-expiry-report.html)
+and then open the file in your browser.
+
+
+## JSON Report
There are two top-level keys in the saved JSON results, `data` and
`summary`.
@@ -122,85 +226,116 @@ certificates:
* expiring within the configured warning window
* already expired
-The example below is abbreviated to save space:
+For an example of the full JSON report, see [examples/cert-expiry-report.json](examples/cert-expiry-report.json).
+
+The example below is abbreviated to save space.
```json
{
- "data": {
- "192.168.124.148": {
- "etcd": [
- {
- "cert_cn": "CN:etcd-signer@1474563722",
- "days_remaining": 350,
- "expiry": "2017-09-22 17:02:25",
- "health": "warning",
- "path": "/etc/etcd/ca.crt"
- },
- ],
- "kubeconfigs": [
- {
- "cert_cn": "O:system:nodes, CN:system:node:m01.example.com",
- "days_remaining": 715,
- "expiry": "2018-09-22 17:08:57",
- "health": "warning",
- "path": "/etc/origin/node/system:node:m01.example.com.kubeconfig"
- },
- {
- "cert_cn": "O:system:cluster-admins, CN:system:admin",
- "days_remaining": 715,
- "expiry": "2018-09-22 17:04:40",
- "health": "warning",
- "path": "/etc/origin/master/admin.kubeconfig"
- }
- ],
- "meta": {
- "checked_at_time": "2016-10-07 15:26:47.608192",
- "show_all": "True",
- "warn_before_date": "2020-11-15 15:26:47.608192",
- "warning_days": 1500
- },
- "ocp_certs": [
- {
- "cert_cn": "CN:172.30.0.1, DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc, DNS:kubernetes.default.svc.cluster.local, DNS:m01.example.com, DNS:openshift, DNS:openshift.default, DNS:openshift.default.svc, DNS:openshift.default.svc.cluster.local, DNS:172.30.0.1, DNS:192.168.124.148, IP Address:172.30.0.1, IP Address:192.168.124.148",
- "days_remaining": 715,
- "expiry": "2018-09-22 17:04:39",
- "health": "warning",
- "path": "/etc/origin/master/master.server.crt"
- },
- {
- "cert_cn": "CN:openshift-signer@1474563878",
- "days_remaining": 1810,
- "expiry": "2021-09-21 17:04:38",
- "health": "ok",
- "path": "/etc/origin/node/ca.crt"
- }
- ],
- "registry": [
- {
- "cert_cn": "CN:172.30.101.81, DNS:docker-registry-default.router.default.svc.cluster.local, DNS:docker-registry.default.svc.cluster.local, DNS:172.30.101.81, IP Address:172.30.101.81",
- "days_remaining": 728,
- "expiry": "2018-10-05 18:54:29",
- "health": "warning",
- "path": "/api/v1/namespaces/default/secrets/registry-certificates"
- }
- ],
- "router": [
- {
- "cert_cn": "CN:router.default.svc, DNS:router.default.svc, DNS:router.default.svc.cluster.local",
- "days_remaining": 715,
- "expiry": "2018-09-22 17:48:23",
- "health": "warning",
- "path": "/api/v1/namespaces/default/secrets/router-certs"
- }
- ]
+ "data": {
+ "m01.example.com": {
+ "etcd": [
+ {
+ "cert_cn": "CN:172.30.0.1, DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc,...",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:00:03",
+ "health": "warning",
+ "path": "/etc/origin/master/etcd.server.crt",
+ "serial": 7,
+ "serial_hex": "0x7"
+ }
+ ],
+ "kubeconfigs": [
+ {
+ "cert_cn": "O:system:nodes, CN:system:node:m01.example.com",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:03:28",
+ "health": "warning",
+ "path": "/etc/origin/node/system:node:m01.example.com.kubeconfig",
+ "serial": 11,
+ "serial_hex": "0xb"
}
+ ],
+ "meta": {
+ "checked_at_time": "2017-01-17 10:36:25.230920",
+ "show_all": "True",
+ "warn_before_date": "2021-02-25 10:36:25.230920",
+ "warning_days": 1500
+ },
+ "ocp_certs": [
+ {
+ "cert_cn": "CN:172.30.0.1, DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc,...",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:00:02",
+ "health": "warning",
+ "path": "/etc/origin/master/master.server.crt",
+ "serial": 4,
+ "serial_hex": "0x4"
+ }
+ ],
+ "registry": [
+ {
+ "cert_cn": "CN:172.30.242.251, DNS:docker-registry-default.router.default.svc.cluster.local,...",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:05:54",
+ "health": "warning",
+ "path": "/api/v1/namespaces/default/secrets/registry-certificates",
+ "serial": 13,
+ "serial_hex": "0xd"
+ }
+ ],
+ "router": [
+ {
+ "cert_cn": "CN:router.default.svc, DNS:router.default.svc, DNS:router.default.svc.cluster.local",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:05:46",
+ "health": "warning",
+ "path": "/api/v1/namespaces/default/secrets/router-certs",
+ "serial": 5050662940948454653,
+ "serial_hex": "0x46178f2f6b765cfd"
+ }
+ ]
},
- "summary": {
- "warning": 6,
- "expired": 0,
- "total": 7,
- "ok": 1
+ "n01.example.com": {
+ "etcd": [],
+ "kubeconfigs": [
+ {
+ "cert_cn": "O:system:nodes, CN:system:node:n01.example.com",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:03:28",
+ "health": "warning",
+ "path": "/etc/origin/node/system:node:n01.example.com.kubeconfig",
+ "serial": 11,
+ "serial_hex": "0xb"
+ }
+ ],
+ "meta": {
+ "checked_at_time": "2017-01-17 10:36:25.217103",
+ "show_all": "True",
+ "warn_before_date": "2021-02-25 10:36:25.217103",
+ "warning_days": 1500
+ },
+ "ocp_certs": [
+ {
+ "cert_cn": "CN:192.168.124.11, DNS:n01.example.com, DNS:192.168.124.11, IP Address:192.168.124.11",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:03:29",
+ "health": "warning",
+ "path": "/etc/origin/node/server.crt",
+ "serial": 12,
+ "serial_hex": "0xc"
+ }
+ ],
+ "registry": [],
+ "router": []
}
+ },
+ "summary": {
+ "expired": 0,
+ "ok": 3,
+ "total": 15,
+ "warning": 12
+ }
}
```
@@ -233,24 +368,17 @@ $ jq '.summary.warning,.summary.expired' /tmp/cert-expiry-report.json
```
-Requirements
-------------
-
+# Requirements
* None
-Dependencies
-------------
-
+# Dependencies
* None
-License
--------
-
+# License
Apache License, Version 2.0
-Author Information
-------------------
+# Author Information
Tim Bielawa (tbielawa@redhat.com)
diff --git a/roles/openshift_certificate_expiry/examples/cert-expiry-report-html.png b/roles/openshift_certificate_expiry/examples/cert-expiry-report-html.png
new file mode 100644
index 000000000..799131659
--- /dev/null
+++ b/roles/openshift_certificate_expiry/examples/cert-expiry-report-html.png
Binary files differ
diff --git a/roles/openshift_certificate_expiry/examples/cert-expiry-report.html b/roles/openshift_certificate_expiry/examples/cert-expiry-report.html
new file mode 100644
index 000000000..db03a5060
--- /dev/null
+++ b/roles/openshift_certificate_expiry/examples/cert-expiry-report.html
@@ -0,0 +1,396 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="UTF-8" />
+ <title>OCP Certificate Expiry Report</title>
+ <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css" />
+ <link href="https://fonts.googleapis.com/css?family=Source+Sans+Pro:300,400,700" rel="stylesheet" />
+ <style type="text/css">
+ body {
+ font-family: 'Source Sans Pro', sans-serif;
+ margin-left: 50px;
+ margin-right: 50px;
+ margin-bottom: 20px;
+ padding-top: 70px;
+ }
+ table {
+ border-collapse: collapse;
+ margin-bottom: 20px;
+ }
+ table, th, td {
+ border: 1px solid black;
+ }
+ th, td {
+ padding: 5px;
+ }
+ .cert-kind {
+ margin-top: 5px;
+ margin-bottom: 5px;
+ }
+ footer {
+ font-size: small;
+ text-align: center;
+ }
+ tr.odd {
+ background-color: #f2f2f2;
+ }
+ </style>
+ </head>
+ <body>
+ <nav class="navbar navbar-default navbar-fixed-top">
+ <div class="container-fluid">
+ <div class="navbar-header">
+ <a class="navbar-brand" href="#">OCP Certificate Expiry Report</a>
+ </div>
+ <div class="collapse navbar-collapse">
+ <p class="navbar-text navbar-right">
+ <button>
+ <a href="https://docs.openshift.com/container-platform/latest/install_config/redeploying_certificates.html"
+ target="_blank"
+ class="navbar-link">
+ <i class="glyphicon glyphicon-book"></i> Redeploying Certificates
+ </a>
+ </button>
+ <button>
+ <a href="https://github.com/openshift/openshift-ansible/tree/master/roles/openshift_certificate_expiry"
+ target="_blank"
+ class="navbar-link">
+ <i class="glyphicon glyphicon-book"></i> Expiry Role Documentation
+ </a>
+ </button>
+ </p>
+ </div>
+ </div>
+ </nav>
+
+ <h1>m01.example.com</h1>
+
+ <p>
+ Checked 12 total certificates. Expired/Warning/OK: 0/10/2. Warning window: 1500 days
+ </p>
+ <ul>
+ <li><b>Expirations checked at:</b> 2017-01-17 10:36:25.230920</li>
+ <li><b>Warn after date:</b> 2021-02-25 10:36:25.230920</li>
+ </ul>
+
+ <table border="1" width="100%">
+ <tr>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">ocp_certs</h2></th>
+ </tr>
+
+ <tr>
+ <th>&nbsp;</th>
+ <th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
+ <th>Health</th>
+ <th>Days Remaining</th>
+ <th>Expiration Date</th>
+ <th>Path</th>
+ </tr>
+
+
+ <tr class="odd">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">CN:172.30.0.1, DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc, DNS:kubernetes.default.svc.cluster.local, DNS:m01.example.com, DNS:openshift, DNS:openshift.default, DNS:openshift.default.svc, DNS:openshift.default.svc.cluster.local, DNS:172.30.0.1, DNS:192.168.124.148, IP Address:172.30.0.1, IP Address:192.168.124.148</td>
+ <td><code>int(4)/hex(0x4)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:00:02</td>
+ <td>/etc/origin/master/master.server.crt</td>
+ </tr>
+
+ <tr class="even">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">CN:192.168.124.148, DNS:m01.example.com, DNS:192.168.124.148, IP Address:192.168.124.148</td>
+ <td><code>int(12)/hex(0xc)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:03:29</td>
+ <td>/etc/origin/node/server.crt</td>
+ </tr>
+
+ <tr class="odd">
+ <td style="text-align:center"><i class="glyphicon glyphicon-ok"></i></td>
+ <td style="width:33%">CN:openshift-signer@1483981200</td>
+ <td><code>int(1)/hex(0x1)</code></td>
+ <td>ok</td>
+ <td>1817</td>
+ <td>2022-01-08 17:00:01</td>
+ <td>/etc/origin/master/ca.crt</td>
+ </tr>
+
+ <tr class="even">
+ <td style="text-align:center"><i class="glyphicon glyphicon-ok"></i></td>
+ <td style="width:33%">CN:openshift-signer@1483981200</td>
+ <td><code>int(1)/hex(0x1)</code></td>
+ <td>ok</td>
+ <td>1817</td>
+ <td>2022-01-08 17:00:01</td>
+ <td>/etc/origin/node/ca.crt</td>
+ </tr>
+ <tr>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">etcd</h2></th>
+ </tr>
+
+ <tr>
+ <th>&nbsp;</th>
+ <th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
+ <th>Health</th>
+ <th>Days Remaining</th>
+ <th>Expiration Date</th>
+ <th>Path</th>
+ </tr>
+
+
+ <tr class="odd">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">CN:172.30.0.1, DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc, DNS:kubernetes.default.svc.cluster.local, DNS:m01.example.com, DNS:openshift, DNS:openshift.default, DNS:openshift.default.svc, DNS:openshift.default.svc.cluster.local, DNS:172.30.0.1, DNS:192.168.124.148, IP Address:172.30.0.1, IP Address:192.168.124.148</td>
+ <td><code>int(7)/hex(0x7)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:00:03</td>
+ <td>/etc/origin/master/etcd.server.crt</td>
+ </tr>
+ <tr>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">kubeconfigs</h2></th>
+ </tr>
+
+ <tr>
+ <th>&nbsp;</th>
+ <th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
+ <th>Health</th>
+ <th>Days Remaining</th>
+ <th>Expiration Date</th>
+ <th>Path</th>
+ </tr>
+
+
+ <tr class="odd">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">O:system:nodes, CN:system:node:m01.example.com</td>
+ <td><code>int(11)/hex(0xb)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:03:28</td>
+ <td>/etc/origin/node/system:node:m01.example.com.kubeconfig</td>
+ </tr>
+
+ <tr class="even">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">O:system:cluster-admins, CN:system:admin</td>
+ <td><code>int(8)/hex(0x8)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:00:03</td>
+ <td>/etc/origin/master/admin.kubeconfig</td>
+ </tr>
+
+ <tr class="odd">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">O:system:masters, CN:system:openshift-master</td>
+ <td><code>int(3)/hex(0x3)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:00:02</td>
+ <td>/etc/origin/master/openshift-master.kubeconfig</td>
+ </tr>
+
+ <tr class="even">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">O:system:routers, CN:system:openshift-router</td>
+ <td><code>int(9)/hex(0x9)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:00:03</td>
+ <td>/etc/origin/master/openshift-router.kubeconfig</td>
+ </tr>
+
+ <tr class="odd">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">O:system:registries, CN:system:openshift-registry</td>
+ <td><code>int(10)/hex(0xa)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:00:03</td>
+ <td>/etc/origin/master/openshift-registry.kubeconfig</td>
+ </tr>
+ <tr>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">router</h2></th>
+ </tr>
+
+ <tr>
+ <th>&nbsp;</th>
+ <th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
+ <th>Health</th>
+ <th>Days Remaining</th>
+ <th>Expiration Date</th>
+ <th>Path</th>
+ </tr>
+
+
+ <tr class="odd">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">CN:router.default.svc, DNS:router.default.svc, DNS:router.default.svc.cluster.local</td>
+ <td><code>int(5050662940948454653)/hex(0x46178f2f6b765cfd)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:05:46</td>
+ <td>/api/v1/namespaces/default/secrets/router-certs</td>
+ </tr>
+ <tr>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">registry</h2></th>
+ </tr>
+
+ <tr>
+ <th>&nbsp;</th>
+ <th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
+ <th>Health</th>
+ <th>Days Remaining</th>
+ <th>Expiration Date</th>
+ <th>Path</th>
+ </tr>
+
+
+ <tr class="odd">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">CN:172.30.242.251, DNS:docker-registry-default.router.default.svc.cluster.local, DNS:docker-registry.default.svc.cluster.local, DNS:172.30.242.251, IP Address:172.30.242.251</td>
+ <td><code>int(13)/hex(0xd)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:05:54</td>
+ <td>/api/v1/namespaces/default/secrets/registry-certificates</td>
+ </tr>
+ </table>
+ <hr />
+ <h1>n01.example.com</h1>
+
+ <p>
+ Checked 3 total certificates. Expired/Warning/OK: 0/2/1. Warning window: 1500 days
+ </p>
+ <ul>
+ <li><b>Expirations checked at:</b> 2017-01-17 10:36:25.217103</li>
+ <li><b>Warn after date:</b> 2021-02-25 10:36:25.217103</li>
+ </ul>
+
+ <table border="1" width="100%">
+ <tr>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">ocp_certs</h2></th>
+ </tr>
+
+ <tr>
+ <th>&nbsp;</th>
+ <th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
+ <th>Health</th>
+ <th>Days Remaining</th>
+ <th>Expiration Date</th>
+ <th>Path</th>
+ </tr>
+
+
+ <tr class="odd">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">CN:192.168.124.11, DNS:n01.example.com, DNS:192.168.124.11, IP Address:192.168.124.11</td>
+ <td><code>int(12)/hex(0xc)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:03:29</td>
+ <td>/etc/origin/node/server.crt</td>
+ </tr>
+
+ <tr class="even">
+ <td style="text-align:center"><i class="glyphicon glyphicon-ok"></i></td>
+ <td style="width:33%">CN:openshift-signer@1483981200</td>
+ <td><code>int(1)/hex(0x1)</code></td>
+ <td>ok</td>
+ <td>1817</td>
+ <td>2022-01-08 17:00:01</td>
+ <td>/etc/origin/node/ca.crt</td>
+ </tr>
+ <tr>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">etcd</h2></th>
+ </tr>
+
+ <tr>
+ <th>&nbsp;</th>
+ <th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
+ <th>Health</th>
+ <th>Days Remaining</th>
+ <th>Expiration Date</th>
+ <th>Path</th>
+ </tr>
+
+ <tr>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">kubeconfigs</h2></th>
+ </tr>
+
+ <tr>
+ <th>&nbsp;</th>
+ <th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
+ <th>Health</th>
+ <th>Days Remaining</th>
+ <th>Expiration Date</th>
+ <th>Path</th>
+ </tr>
+
+
+ <tr class="odd">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">O:system:nodes, CN:system:node:n01.example.com</td>
+ <td><code>int(11)/hex(0xb)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:03:28</td>
+ <td>/etc/origin/node/system:node:n01.example.com.kubeconfig</td>
+ </tr>
+ <tr>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">router</h2></th>
+ </tr>
+
+ <tr>
+ <th>&nbsp;</th>
+ <th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
+ <th>Health</th>
+ <th>Days Remaining</th>
+ <th>Expiration Date</th>
+ <th>Path</th>
+ </tr>
+
+ <tr>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">registry</h2></th>
+ </tr>
+
+ <tr>
+ <th>&nbsp;</th>
+ <th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
+ <th>Health</th>
+ <th>Days Remaining</th>
+ <th>Expiration Date</th>
+ <th>Path</th>
+ </tr>
+
+ </table>
+ <hr />
+
+ <footer>
+ <p>
+ Expiration report generated by
+ the <a href="https://github.com/openshift/openshift-ansible"
+ target="_blank">openshift-ansible</a>
+ <a href="https://github.com/openshift/openshift-ansible/tree/master/roles/openshift_certificate_expiry"
+ target="_blank">certificate expiry</a> role.
+ </p>
+ <p>
+ Status icons from bootstrap/glyphicon
+ </p>
+ </footer>
+ </body>
+</html>
diff --git a/roles/openshift_certificate_expiry/examples/cert-expiry-report.json b/roles/openshift_certificate_expiry/examples/cert-expiry-report.json
new file mode 100644
index 000000000..8206e2842
--- /dev/null
+++ b/roles/openshift_certificate_expiry/examples/cert-expiry-report.json
@@ -0,0 +1,178 @@
+{
+ "data": {
+ "m01.example.com": {
+ "etcd": [
+ {
+ "cert_cn": "CN:172.30.0.1, DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc, DNS:kubernetes.default.svc.cluster.local, DNS:m01.example.com, DNS:openshift, DNS:openshift.default, DNS:openshift.default.svc, DNS:openshift.default.svc.cluster.local, DNS:172.30.0.1, DNS:192.168.124.148, IP Address:172.30.0.1, IP Address:192.168.124.148",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:00:03",
+ "health": "warning",
+ "path": "/etc/origin/master/etcd.server.crt",
+ "serial": 7,
+ "serial_hex": "0x7"
+ }
+ ],
+ "kubeconfigs": [
+ {
+ "cert_cn": "O:system:nodes, CN:system:node:m01.example.com",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:03:28",
+ "health": "warning",
+ "path": "/etc/origin/node/system:node:m01.example.com.kubeconfig",
+ "serial": 11,
+ "serial_hex": "0xb"
+ },
+ {
+ "cert_cn": "O:system:cluster-admins, CN:system:admin",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:00:03",
+ "health": "warning",
+ "path": "/etc/origin/master/admin.kubeconfig",
+ "serial": 8,
+ "serial_hex": "0x8"
+ },
+ {
+ "cert_cn": "O:system:masters, CN:system:openshift-master",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:00:02",
+ "health": "warning",
+ "path": "/etc/origin/master/openshift-master.kubeconfig",
+ "serial": 3,
+ "serial_hex": "0x3"
+ },
+ {
+ "cert_cn": "O:system:routers, CN:system:openshift-router",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:00:03",
+ "health": "warning",
+ "path": "/etc/origin/master/openshift-router.kubeconfig",
+ "serial": 9,
+ "serial_hex": "0x9"
+ },
+ {
+ "cert_cn": "O:system:registries, CN:system:openshift-registry",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:00:03",
+ "health": "warning",
+ "path": "/etc/origin/master/openshift-registry.kubeconfig",
+ "serial": 10,
+ "serial_hex": "0xa"
+ }
+ ],
+ "meta": {
+ "checked_at_time": "2017-01-17 10:36:25.230920",
+ "show_all": "True",
+ "warn_before_date": "2021-02-25 10:36:25.230920",
+ "warning_days": 1500
+ },
+ "ocp_certs": [
+ {
+ "cert_cn": "CN:172.30.0.1, DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc, DNS:kubernetes.default.svc.cluster.local, DNS:m01.example.com, DNS:openshift, DNS:openshift.default, DNS:openshift.default.svc, DNS:openshift.default.svc.cluster.local, DNS:172.30.0.1, DNS:192.168.124.148, IP Address:172.30.0.1, IP Address:192.168.124.148",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:00:02",
+ "health": "warning",
+ "path": "/etc/origin/master/master.server.crt",
+ "serial": 4,
+ "serial_hex": "0x4"
+ },
+ {
+ "cert_cn": "CN:192.168.124.148, DNS:m01.example.com, DNS:192.168.124.148, IP Address:192.168.124.148",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:03:29",
+ "health": "warning",
+ "path": "/etc/origin/node/server.crt",
+ "serial": 12,
+ "serial_hex": "0xc"
+ },
+ {
+ "cert_cn": "CN:openshift-signer@1483981200",
+ "days_remaining": 1817,
+ "expiry": "2022-01-08 17:00:01",
+ "health": "ok",
+ "path": "/etc/origin/master/ca.crt",
+ "serial": 1,
+ "serial_hex": "0x1"
+ },
+ {
+ "cert_cn": "CN:openshift-signer@1483981200",
+ "days_remaining": 1817,
+ "expiry": "2022-01-08 17:00:01",
+ "health": "ok",
+ "path": "/etc/origin/node/ca.crt",
+ "serial": 1,
+ "serial_hex": "0x1"
+ }
+ ],
+ "registry": [
+ {
+ "cert_cn": "CN:172.30.242.251, DNS:docker-registry-default.router.default.svc.cluster.local, DNS:docker-registry.default.svc.cluster.local, DNS:172.30.242.251, IP Address:172.30.242.251",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:05:54",
+ "health": "warning",
+ "path": "/api/v1/namespaces/default/secrets/registry-certificates",
+ "serial": 13,
+ "serial_hex": "0xd"
+ }
+ ],
+ "router": [
+ {
+ "cert_cn": "CN:router.default.svc, DNS:router.default.svc, DNS:router.default.svc.cluster.local",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:05:46",
+ "health": "warning",
+ "path": "/api/v1/namespaces/default/secrets/router-certs",
+ "serial": 5050662940948454653,
+ "serial_hex": "0x46178f2f6b765cfd"
+ }
+ ]
+ },
+ "n01.example.com": {
+ "etcd": [],
+ "kubeconfigs": [
+ {
+ "cert_cn": "O:system:nodes, CN:system:node:n01.example.com",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:03:28",
+ "health": "warning",
+ "path": "/etc/origin/node/system:node:n01.example.com.kubeconfig",
+ "serial": 11,
+ "serial_hex": "0xb"
+ }
+ ],
+ "meta": {
+ "checked_at_time": "2017-01-17 10:36:25.217103",
+ "show_all": "True",
+ "warn_before_date": "2021-02-25 10:36:25.217103",
+ "warning_days": 1500
+ },
+ "ocp_certs": [
+ {
+ "cert_cn": "CN:192.168.124.11, DNS:n01.example.com, DNS:192.168.124.11, IP Address:192.168.124.11",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:03:29",
+ "health": "warning",
+ "path": "/etc/origin/node/server.crt",
+ "serial": 12,
+ "serial_hex": "0xc"
+ },
+ {
+ "cert_cn": "CN:openshift-signer@1483981200",
+ "days_remaining": 1817,
+ "expiry": "2022-01-08 17:00:01",
+ "health": "ok",
+ "path": "/etc/origin/node/ca.crt",
+ "serial": 1,
+ "serial_hex": "0x1"
+ }
+ ],
+ "registry": [],
+ "router": []
+ }
+ },
+ "summary": {
+ "expired": 0,
+ "ok": 3,
+ "total": 15,
+ "warning": 12
+ }
+}
diff --git a/roles/openshift_certificate_expiry/examples/playbooks/default.yaml b/roles/openshift_certificate_expiry/examples/playbooks/default.yaml
new file mode 100644
index 000000000..630135cae
--- /dev/null
+++ b/roles/openshift_certificate_expiry/examples/playbooks/default.yaml
@@ -0,0 +1,10 @@
+---
+# Default behavior, you will need to ensure you run ansible with the
+# -v option to see report results:
+
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ roles:
+ - role: openshift_certificate_expiry
diff --git a/roles/openshift_certificate_expiry/examples/playbooks/easy-mode.yaml b/roles/openshift_certificate_expiry/examples/playbooks/easy-mode.yaml
new file mode 100644
index 000000000..d0209426f
--- /dev/null
+++ b/roles/openshift_certificate_expiry/examples/playbooks/easy-mode.yaml
@@ -0,0 +1,21 @@
+---
+# This example playbook is great if you're just wanting to try the
+# role out.
+#
+# This example enables HTML and JSON reports
+#
+# The warning window is set very large so you will almost always get results back
+#
+# All certificates (healthy or not) are included in the results
+
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_warning_days: 1500
+ openshift_certificate_expiry_save_json_results: yes
+ openshift_certificate_expiry_generate_html_report: yes
+ openshift_certificate_expiry_show_all: yes
+ roles:
+ - role: openshift_certificate_expiry
diff --git a/roles/openshift_certificate_expiry/examples/playbooks/html_and_json_default_paths.yaml b/roles/openshift_certificate_expiry/examples/playbooks/html_and_json_default_paths.yaml
new file mode 100644
index 000000000..d80cb6ff4
--- /dev/null
+++ b/roles/openshift_certificate_expiry/examples/playbooks/html_and_json_default_paths.yaml
@@ -0,0 +1,12 @@
+---
+# Generate HTML and JSON artifacts in their default paths:
+
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_generate_html_report: yes
+ openshift_certificate_expiry_save_json_results: yes
+ roles:
+ - role: openshift_certificate_expiry
diff --git a/roles/openshift_certificate_expiry/examples/playbooks/longer-warning-period-json-results.yaml b/roles/openshift_certificate_expiry/examples/playbooks/longer-warning-period-json-results.yaml
new file mode 100644
index 000000000..87a0f3be4
--- /dev/null
+++ b/roles/openshift_certificate_expiry/examples/playbooks/longer-warning-period-json-results.yaml
@@ -0,0 +1,13 @@
+---
+# Change the expiration warning window to 1500 days (good for testing
+# the module out) and save the results as a JSON file:
+
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_warning_days: 1500
+ openshift_certificate_expiry_save_json_results: yes
+ roles:
+ - role: openshift_certificate_expiry
diff --git a/roles/openshift_certificate_expiry/examples/playbooks/longer_warning_period.yaml b/roles/openshift_certificate_expiry/examples/playbooks/longer_warning_period.yaml
new file mode 100644
index 000000000..960457c4b
--- /dev/null
+++ b/roles/openshift_certificate_expiry/examples/playbooks/longer_warning_period.yaml
@@ -0,0 +1,12 @@
+---
+# Change the expiration warning window to 1500 days (good for testing
+# the module out):
+
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_warning_days: 1500
+ roles:
+ - role: openshift_certificate_expiry
diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
index a474b36b0..85671b164 100644
--- a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
+++ b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
@@ -122,6 +122,8 @@ A 3-tuple of the form: (certificate_common_name, certificate_expiry_date, certif
cert_loaded = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM, _cert_string)
+ cert_serial = cert_loaded.get_serial_number()
+
######################################################################
# Read all possible names from the cert
cert_subjects = []
@@ -178,7 +180,7 @@ A 3-tuple of the form: (certificate_common_name, certificate_expiry_date, certif
time_remaining = cert_expiry_date - now
- return (cert_subject, cert_expiry_date, time_remaining)
+ return (cert_subject, cert_expiry_date, time_remaining, cert_serial)
def classify_cert(cert_meta, now, time_remaining, expire_window, cert_list):
@@ -210,6 +212,7 @@ Return:
cert_meta['health'] = 'ok'
cert_meta['expiry'] = expiry_str
+ cert_meta['serial_hex'] = hex(int(cert_meta['serial']))
cert_list.append(cert_meta)
return cert_list
@@ -373,7 +376,10 @@ an OpenShift Container Platform cluster
for _, v in cert_meta.items():
with open(v, 'r') as fp:
cert = fp.read()
- cert_subject, cert_expiry_date, time_remaining = load_and_handle_cert(cert, now)
+ (cert_subject,
+ cert_expiry_date,
+ time_remaining,
+ cert_serial) = load_and_handle_cert(cert, now)
expire_check_result = {
'cert_cn': cert_subject,
@@ -381,6 +387,7 @@ an OpenShift Container Platform cluster
'expiry': cert_expiry_date,
'days_remaining': time_remaining.days,
'health': None,
+ 'serial': cert_serial
}
classify_cert(expire_check_result, now, time_remaining, expire_window, ocp_certs)
@@ -420,7 +427,8 @@ an OpenShift Container Platform cluster
c = cfg['users'][0]['user']['client-certificate-data']
(cert_subject,
cert_expiry_date,
- time_remaining) = load_and_handle_cert(c, now, base64decode=True)
+ time_remaining,
+ cert_serial) = load_and_handle_cert(c, now, base64decode=True)
expire_check_result = {
'cert_cn': cert_subject,
@@ -428,6 +436,7 @@ an OpenShift Container Platform cluster
'expiry': cert_expiry_date,
'days_remaining': time_remaining.days,
'health': None,
+ 'serial': cert_serial
}
classify_cert(expire_check_result, now, time_remaining, expire_window, kubeconfigs)
@@ -448,7 +457,8 @@ an OpenShift Container Platform cluster
c = cfg['users'][0]['user']['client-certificate-data']
(cert_subject,
cert_expiry_date,
- time_remaining) = load_and_handle_cert(c, now, base64decode=True)
+ time_remaining,
+ cert_serial) = load_and_handle_cert(c, now, base64decode=True)
expire_check_result = {
'cert_cn': cert_subject,
@@ -456,6 +466,7 @@ an OpenShift Container Platform cluster
'expiry': cert_expiry_date,
'days_remaining': time_remaining.days,
'health': None,
+ 'serial': cert_serial
}
classify_cert(expire_check_result, now, time_remaining, expire_window, kubeconfigs)
@@ -500,7 +511,8 @@ an OpenShift Container Platform cluster
c = fp.read()
(cert_subject,
cert_expiry_date,
- time_remaining) = load_and_handle_cert(c, now)
+ time_remaining,
+ cert_serial) = load_and_handle_cert(c, now)
expire_check_result = {
'cert_cn': cert_subject,
@@ -508,6 +520,7 @@ an OpenShift Container Platform cluster
'expiry': cert_expiry_date,
'days_remaining': time_remaining.days,
'health': None,
+ 'serial': cert_serial
}
classify_cert(expire_check_result, now, time_remaining, expire_window, etcd_certs)
@@ -537,7 +550,8 @@ an OpenShift Container Platform cluster
with open(etcd_cert, 'r') as etcd_fp:
(cert_subject,
cert_expiry_date,
- time_remaining) = load_and_handle_cert(etcd_fp.read(), now)
+ time_remaining,
+ cert_serial) = load_and_handle_cert(etcd_fp.read(), now)
expire_check_result = {
'cert_cn': cert_subject,
@@ -545,6 +559,7 @@ an OpenShift Container Platform cluster
'expiry': cert_expiry_date,
'days_remaining': time_remaining.days,
'health': None,
+ 'serial': cert_serial
}
classify_cert(expire_check_result, now, time_remaining, expire_window, etcd_certs)
@@ -581,7 +596,8 @@ an OpenShift Container Platform cluster
else:
(cert_subject,
cert_expiry_date,
- time_remaining) = load_and_handle_cert(router_c, now, base64decode=True)
+ time_remaining,
+ cert_serial) = load_and_handle_cert(router_c, now, base64decode=True)
expire_check_result = {
'cert_cn': cert_subject,
@@ -589,6 +605,7 @@ an OpenShift Container Platform cluster
'expiry': cert_expiry_date,
'days_remaining': time_remaining.days,
'health': None,
+ 'serial': cert_serial
}
classify_cert(expire_check_result, now, time_remaining, expire_window, router_certs)
@@ -610,7 +627,8 @@ an OpenShift Container Platform cluster
else:
(cert_subject,
cert_expiry_date,
- time_remaining) = load_and_handle_cert(registry_c, now, base64decode=True)
+ time_remaining,
+ cert_serial) = load_and_handle_cert(registry_c, now, base64decode=True)
expire_check_result = {
'cert_cn': cert_subject,
@@ -618,6 +636,7 @@ an OpenShift Container Platform cluster
'expiry': cert_expiry_date,
'days_remaining': time_remaining.days,
'health': None,
+ 'serial': cert_serial
}
classify_cert(expire_check_result, now, time_remaining, expire_window, registry_certs)
diff --git a/roles/openshift_certificate_expiry/templates/cert-expiry-table.html.j2 b/roles/openshift_certificate_expiry/templates/cert-expiry-table.html.j2
index b05110336..1d4bb24e9 100644
--- a/roles/openshift_certificate_expiry/templates/cert-expiry-table.html.j2
+++ b/roles/openshift_certificate_expiry/templates/cert-expiry-table.html.j2
@@ -45,11 +45,20 @@
</div>
<div class="collapse navbar-collapse">
<p class="navbar-text navbar-right">
- <a href="https://docs.openshift.com/container-platform/latest/install_config/redeploying_certificates.html"
- target="_blank"
- class="navbar-link">
- <i class="glyphicon glyphicon-book"></i> Redeploying Certificates
- </a>
+ <button>
+ <a href="https://docs.openshift.com/container-platform/latest/install_config/redeploying_certificates.html"
+ target="_blank"
+ class="navbar-link">
+ <i class="glyphicon glyphicon-book"></i> Redeploying Certificates
+ </a>
+ </button>
+ <button>
+ <a href="https://github.com/openshift/openshift-ansible/tree/master/roles/openshift_certificate_expiry"
+ target="_blank"
+ class="navbar-link">
+ <i class="glyphicon glyphicon-book"></i> Expiry Role Documentation
+ </a>
+ </button>
</p>
</div>
</div>
@@ -71,12 +80,13 @@
{# These are hard-coded right now, but should be grabbed dynamically from the registered results #}
{%- for kind in ['ocp_certs', 'etcd', 'kubeconfigs', 'router', 'registry'] -%}
<tr>
- <th colspan="6" style="text-align:center"><h2 class="cert-kind">{{ kind }}</h2></th>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">{{ kind }}</h2></th>
</tr>
<tr>
<th>&nbsp;</th>
<th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
<th>Health</th>
<th>Days Remaining</th>
<th>Expiration Date</th>
@@ -98,6 +108,7 @@
<tr class="{{ loop.cycle('odd', 'even') }}">
<td style="text-align:center"><i class="{{ health_icon }}"></i></td>
<td style="width:33%">{{ v.cert_cn }}</td>
+ <td><code>int({{ v.serial }})/hex({{ v.serial_hex }})</code></td>
<td>{{ v.health }}</td>
<td>{{ v.days_remaining }}</td>
<td>{{ v.expiry }}</td>
@@ -114,7 +125,11 @@
<footer>
<p>
- Expiration report generated by <a href="https://github.com/openshift/openshift-ansible" target="_blank">openshift-ansible</a>
+ Expiration report generated by
+ the <a href="https://github.com/openshift/openshift-ansible"
+ target="_blank">openshift-ansible</a>
+ <a href="https://github.com/openshift/openshift-ansible/tree/master/roles/openshift_certificate_expiry"
+ target="_blank">certificate expiry</a> role.
</p>
<p>
Status icons from bootstrap/glyphicon
diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml
index 613c237a3..049ceffe0 100644
--- a/roles/openshift_docker_facts/tasks/main.yml
+++ b/roles/openshift_docker_facts/tasks/main.yml
@@ -9,6 +9,7 @@
additional_registries: "{{ openshift_docker_additional_registries | default(None) }}"
blocked_registries: "{{ openshift_docker_blocked_registries | default(None) }}"
insecure_registries: "{{ openshift_docker_insecure_registries | default(None) }}"
+ selinux_enabled: "{{ openshift_docker_selinux_enabled | default(None) }}"
log_driver: "{{ openshift_docker_log_driver | default(None) }}"
log_options: "{{ openshift_docker_log_options | default(None) }}"
options: "{{ openshift_docker_options | default(None) }}"
@@ -23,6 +24,7 @@
| default(omit) }}"
docker_insecure_registries: "{{ openshift.docker.insecure_registries
| default(omit) }}"
+ docker_selinux_enabled: "{{ openshift.docker.selinux_enabled | default(omit) }}"
docker_log_driver: "{{ openshift.docker.log_driver | default(omit) }}"
docker_log_options: "{{ openshift.docker.log_options | default(omit) }}"
docker_push_dockerhub: "{{ openshift.docker.disable_push_dockerhub
diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-ephemeral-template.json
index 62ccc5b7f..ab1c85b7e 100644
--- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-ephemeral-template.json
@@ -98,14 +98,6 @@
},
"env": [
{
- "name": "OPENSHIFT_ENABLE_OAUTH",
- "value": "${ENABLE_OAUTH}"
- },
- {
- "name": "OPENSHIFT_ENABLE_REDIRECT_PROMPT",
- "value": "true"
- },
- {
"name": "KUBERNETES_MASTER",
"value": "https://kubernetes.default:443"
},
@@ -245,12 +237,6 @@
"value": "jenkins-jnlp"
},
{
- "name": "ENABLE_OAUTH",
- "displayName": "Enable OAuth in Jenkins",
- "description": "Whether to enable OAuth OpenShift integration. If false, the static account 'admin' will be initialized with the password 'password'.",
- "value": "true"
- },
- {
"name": "MEMORY_LIMIT",
"displayName": "Memory Limit",
"description": "Maximum amount of memory the container can use.",
diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-persistent-template.json
index 50c4ad566..87c439ad2 100644
--- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-persistent-template.json
@@ -115,14 +115,6 @@
},
"env": [
{
- "name": "OPENSHIFT_ENABLE_OAUTH",
- "value": "${ENABLE_OAUTH}"
- },
- {
- "name": "OPENSHIFT_ENABLE_REDIRECT_PROMPT",
- "value": "true"
- },
- {
"name": "KUBERNETES_MASTER",
"value": "https://kubernetes.default:443"
},
@@ -262,12 +254,6 @@
"value": "jenkins-jnlp"
},
{
- "name": "ENABLE_OAUTH",
- "displayName": "Enable OAuth in Jenkins",
- "description": "Whether to enable OAuth OpenShift integration. If false, the static account 'admin' will be initialized with the password 'password'.",
- "value": "true"
- },
- {
"name": "MEMORY_LIMIT",
"displayName": "Memory Limit",
"description": "Maximum amount of memory the container can use.",
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 10121f82a..c99452062 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -1690,9 +1690,38 @@ def set_builddefaults_facts(facts):
if 'admission_plugin_config' not in facts['master']:
facts['master']['admission_plugin_config'] = dict()
facts['master']['admission_plugin_config'].update(builddefaults['config'])
+ # if the user didn't actually provide proxy values, delete the proxy env variable defaults.
+ delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env'])
+
return facts
+def delete_empty_keys(keylist):
+ """ Delete dictionary elements from keylist where "value" is empty.
+
+ Args:
+ keylist(list): A list of builddefault configuration envs.
+
+ Returns:
+ none
+
+ Example:
+ keylist = [{'name': 'HTTP_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
+ {'name': 'HTTPS_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
+ {'name': 'NO_PROXY', 'value': ''}]
+
+ After calling delete_empty_keys the provided list is modified to become:
+
+ [{'name': 'HTTP_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
+ {'name': 'HTTPS_PROXY', 'value': 'http://file.rdu.redhat.com:3128'}]
+ """
+ count = 0
+ for i in range(0, len(keylist)):
+ if len(keylist[i - count]['value']) == 0:
+ del keylist[i - count]
+ count += 1
+
+
def set_buildoverrides_facts(facts):
""" Set build overrides
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index 2cc2c48ee..9b71dc676 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -6,6 +6,9 @@ This role is used for installing the Aggregated Logging stack. It should be run
a single host, it will create any missing certificates and API objects that the current
[logging deployer](https://github.com/openshift/origin-aggregated-logging/tree/master/deployer) does.
+This role requires that the control host it is run on has Java installed as part of keystore
+generation for Elasticsearch (it uses JKS) as well as openssl to sign certificates.
+
As part of the installation, it is recommended that you add the Fluentd node selector label
to the list of persisted [node labels](https://docs.openshift.org/latest/install_config/install/advanced_install.html#configuring-node-host-labels).
diff --git a/roles/openshift_logging/files/generate-jks.sh b/roles/openshift_logging/files/generate-jks.sh
index 995ec0b98..9fe557f83 100644
--- a/roles/openshift_logging/files/generate-jks.sh
+++ b/roles/openshift_logging/files/generate-jks.sh
@@ -1,6 +1,10 @@
#! /bin/sh
set -ex
+function usage() {
+ echo Usage: `basename $0` cert_directory [logging_namespace] 1>&2
+}
+
function generate_JKS_chain() {
dir=${SCRATCH_DIR:-_output}
ADD_OID=$1
@@ -147,8 +151,14 @@ function createTruststore() {
-noprompt -alias sig-ca
}
-dir="$CERT_DIR"
+if [ $# -lt 1 ]; then
+ usage
+ exit 1
+fi
+
+dir=$1
SCRATCH_DIR=$dir
+PROJECT=${2:-logging}
if [[ ! -f $dir/system.admin.jks || -z "$(keytool -list -keystore $dir/system.admin.jks -storepass kspass | grep sig-ca)" ]]; then
generate_JKS_client_cert "system.admin"
diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py
index 8bbfdf7bf..64bc33435 100644
--- a/roles/openshift_logging/library/openshift_logging_facts.py
+++ b/roles/openshift_logging/library/openshift_logging_facts.py
@@ -105,9 +105,9 @@ class OpenshiftLoggingFacts(OCBaseCommand):
def add_facts_for(self, comp, kind, name=None, facts=None):
''' Add facts for the provided kind '''
- if comp in self.facts is False:
+ if comp not in self.facts:
self.facts[comp] = dict()
- if kind in self.facts[comp] is False:
+ if kind not in self.facts[comp]:
self.facts[comp][kind] = dict()
if name:
self.facts[comp][kind][name] = facts
diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml
index e16071e46..740e490e1 100644
--- a/roles/openshift_logging/tasks/generate_certs.yaml
+++ b/roles/openshift_logging/tasks/generate_certs.yaml
@@ -85,133 +85,15 @@
loop_control:
loop_var: node_name
-- name: Check for jks-generator service account
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get serviceaccount/jks-generator --no-headers -n {{openshift_logging_namespace}}
- register: serviceaccount_result
- ignore_errors: yes
- when: not ansible_check_mode
- changed_when: no
-
-- name: Create jks-generator service account
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create serviceaccount jks-generator -n {{openshift_logging_namespace}}
- when: not ansible_check_mode and "not found" in serviceaccount_result.stderr
-
-- name: Check for hostmount-anyuid scc entry
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get scc hostmount-anyuid -o jsonpath='{.users}'
- register: scc_result
- when: not ansible_check_mode
- changed_when: no
-
-- name: Add to hostmount-anyuid scc
- command: >
- {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig policy add-scc-to-user hostmount-anyuid -z jks-generator -n {{openshift_logging_namespace}}
- when:
- - not ansible_check_mode
- - scc_result.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:jks-generator") == -1
-
-- name: Copy JKS generation script
- copy:
- src: generate-jks.sh
- dest: "{{generated_certs_dir}}/generate-jks.sh"
- check_mode: no
+- name: Creating necessary JKS certs
+ include: generate_jks.yaml
-- name: Generate JKS pod template
- template:
- src: jks_pod.j2
- dest: "{{mktemp.stdout}}/jks_pod.yaml"
- check_mode: no
- changed_when: no
-
-# check if pod generated files exist -- if they all do don't run the pod
-- name: Checking for elasticsearch.jks
- stat: path="{{generated_certs_dir}}/elasticsearch.jks"
- register: elasticsearch_jks
- check_mode: no
-
-- name: Checking for logging-es.jks
- stat: path="{{generated_certs_dir}}/logging-es.jks"
- register: logging_es_jks
- check_mode: no
-
-- name: Checking for system.admin.jks
- stat: path="{{generated_certs_dir}}/system.admin.jks"
- register: system_admin_jks
- check_mode: no
-
-- name: Checking for truststore.jks
- stat: path="{{generated_certs_dir}}/truststore.jks"
- register: truststore_jks
- check_mode: no
-
-- name: create JKS generation pod
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{mktemp.stdout}}/jks_pod.yaml -n {{openshift_logging_namespace}} -o name
- register: podoutput
- check_mode: no
- when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists
-
-- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get {{podoutput.stdout}} -o jsonpath='{.status.phase}' -n {{openshift_logging_namespace}}
- register: result
- until: result.stdout.find("Succeeded") != -1
- retries: 5
- delay: 10
- changed_when: no
- when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists
-
-# check for secret/logging-kibana-proxy
-- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get secret/logging-kibana-proxy -n {{openshift_logging_namespace}} -o jsonpath='{.data.oauth-secret}'
- register: kibana_secret_oauth_check
- ignore_errors: yes
- changed_when: no
- check_mode: no
-
-- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get secret/logging-kibana-proxy -n {{openshift_logging_namespace}} -o jsonpath='{.data.session-secret}'
- register: kibana_secret_session_check
- ignore_errors: yes
- changed_when: no
- check_mode: no
-
-# check for oauthclient secret
-- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get oauthclient/kibana-proxy -n {{openshift_logging_namespace}} -o jsonpath='{.secret}'
- register: oauth_secret_check
- ignore_errors: yes
- changed_when: no
- check_mode: no
-
-# set or generate as needed
+# TODO: make idempotent
- name: Generate proxy session
set_fact: session_secret={{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(200)}}
check_mode: no
- when:
- - kibana_secret_session_check.stdout is not defined or kibana_secret_session_check.stdout == ''
-
-- name: Generate proxy session
- set_fact: session_secret={{kibana_secret_session_check.stdout | b64decode }}
- check_mode: no
- when:
- - kibana_secret_session_check.stdout is defined
- - kibana_secret_session_check.stdout != ''
+# TODO: make idempotent
- name: Generate oauth client secret
set_fact: oauth_secret={{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(64)}}
check_mode: no
- when: kibana_secret_oauth_check.stdout is not defined or kibana_secret_oauth_check.stdout == ''
- or oauth_secret_check.stdout is not defined or oauth_secret_check.stdout == ''
- or kibana_secret_oauth_check.stdout | b64decode != oauth_secret_check.stdout
-
-- name: Generate oauth client secret
- set_fact: oauth_secret={{kibana_secret_oauth_check.stdout | b64decode}}
- check_mode: no
- when:
- - kibana_secret_oauth_check is defined
- - kibana_secret_oauth_check.stdout != ''
- - oauth_secret_check.stdout is defined
- - oauth_secret_check.stdout != ''
- - kibana_secret_oauth_check.stdout | b64decode == oauth_secret_check.stdout
diff --git a/roles/openshift_logging/tasks/generate_configmaps.yaml b/roles/openshift_logging/tasks/generate_configmaps.yaml
index b24a7c342..8fcf517ad 100644
--- a/roles/openshift_logging/tasks/generate_configmaps.yaml
+++ b/roles/openshift_logging/tasks/generate_configmaps.yaml
@@ -49,7 +49,7 @@
- copy:
content: "{{curator_config_contents}}"
dest: "{{mktemp.stdout}}/curator.yml"
- when: curator_config_contenets is defined
+ when: curator_config_contents is defined
changed_when: no
- command: >
diff --git a/roles/openshift_logging/tasks/generate_jks.yaml b/roles/openshift_logging/tasks/generate_jks.yaml
new file mode 100644
index 000000000..c6e2ccbc0
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_jks.yaml
@@ -0,0 +1,98 @@
+---
+# check if pod generated files exist -- if they all do don't run the pod
+- name: Checking for elasticsearch.jks
+ stat: path="{{generated_certs_dir}}/elasticsearch.jks"
+ register: elasticsearch_jks
+ check_mode: no
+
+- name: Checking for logging-es.jks
+ stat: path="{{generated_certs_dir}}/logging-es.jks"
+ register: logging_es_jks
+ check_mode: no
+
+- name: Checking for system.admin.jks
+ stat: path="{{generated_certs_dir}}/system.admin.jks"
+ register: system_admin_jks
+ check_mode: no
+
+- name: Checking for truststore.jks
+ stat: path="{{generated_certs_dir}}/truststore.jks"
+ register: truststore_jks
+ check_mode: no
+
+- name: Create temp directory for doing work in
+ local_action: command mktemp -d /tmp/openshift-logging-ansible-XXXXXX
+ register: local_tmp
+ changed_when: False
+ check_mode: no
+
+- name: Create placeholder for previously created JKS certs to prevent recreating...
+ local_action: file path="{{local_tmp.stdout}}/elasticsearch.jks" state=touch mode="u=rw,g=r,o=r"
+ when: elasticsearch_jks.stat.exists
+ changed_when: False
+
+- name: Create placeholder for previously created JKS certs to prevent recreating...
+ local_action: file path="{{local_tmp.stdout}}/logging-es.jks" state=touch mode="u=rw,g=r,o=r"
+ when: logging_es_jks.stat.exists
+ changed_when: False
+
+- name: Create placeholder for previously created JKS certs to prevent recreating...
+ local_action: file path="{{local_tmp.stdout}}/system.admin.jks" state=touch mode="u=rw,g=r,o=r"
+ when: system_admin_jks.stat.exists
+ changed_when: False
+
+- name: Create placeholder for previously created JKS certs to prevent recreating...
+ local_action: file path="{{local_tmp.stdout}}/truststore.jks" state=touch mode="u=rw,g=r,o=r"
+ when: truststore_jks.stat.exists
+ changed_when: False
+
+- name: pulling down signing items from host
+ fetch:
+ src: "{{generated_certs_dir}}/{{item}}"
+ dest: "{{local_tmp.stdout}}/{{item}}"
+ flat: yes
+ with_items:
+ - ca.crt
+ - ca.key
+ - ca.serial.txt
+ - ca.crl.srl
+ - ca.db
+ when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists
+
+- local_action: template src=signing.conf.j2 dest={{local_tmp.stdout}}/signing.conf
+ vars:
+ - top_dir: "{{local_tmp.stdout}}"
+ when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists
+
+- name: Run JKS generation script
+ local_action: script generate-jks.sh {{local_tmp.stdout}} {{openshift_logging_namespace}}
+ check_mode: no
+ when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists
+
+- name: Pushing locally generated JKS certs to remote host...
+ copy:
+ src: "{{local_tmp.stdout}}/elasticsearch.jks"
+ dest: "{{generated_certs_dir}}/elasticsearch.jks"
+ when: not elasticsearch_jks.stat.exists
+
+- name: Pushing locally generated JKS certs to remote host...
+ copy:
+ src: "{{local_tmp.stdout}}/logging-es.jks"
+ dest: "{{generated_certs_dir}}/logging-es.jks"
+ when: not logging_es_jks.stat.exists
+
+- name: Pushing locally generated JKS certs to remote host...
+ copy:
+ src: "{{local_tmp.stdout}}/system.admin.jks"
+ dest: "{{generated_certs_dir}}/system.admin.jks"
+ when: not system_admin_jks.stat.exists
+
+- name: Pushing locally generated JKS certs to remote host...
+ copy:
+ src: "{{local_tmp.stdout}}/truststore.jks"
+ dest: "{{generated_certs_dir}}/truststore.jks"
+ when: not truststore_jks.stat.exists
+
+- name: Cleaning up temp dir
+ local_action: file path="{{local_tmp.stdout}}" state=absent
+ changed_when: False
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index af03e9371..a9699adb8 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -23,23 +23,30 @@
loop_control:
loop_var: install_component
+- find: paths={{ mktemp.stdout }}/templates patterns=*.yaml
+ register: object_def_files
+ changed_when: no
+
+- slurp: src={{item}}
+ register: object_defs
+ with_items: "{{object_def_files.files | map(attribute='path') | list | sort}}"
+ changed_when: no
+
- name: Create objects
include: oc_apply.yaml
vars:
- kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
- namespace: "{{ openshift_logging_namespace }}"
- - file_name: "{{ file }}"
- - file_content: "{{ lookup('file', file) | from_yaml }}"
- with_fileglob:
- - "{{ mktemp.stdout }}/templates/*.yaml"
+ - file_name: "{{ file.source }}"
+ - file_content: "{{ file.content | b64decode | from_yaml }}"
+ with_items: "{{ object_defs.results }}"
loop_control:
loop_var: file
when: not ansible_check_mode
- name: Printing out objects to create
- debug: msg="{{lookup('file', file)|quote}}"
- with_fileglob:
- - "{{mktemp.stdout}}/templates/*.yaml"
+ debug: msg={{file.content | b64decode }}
+ with_items: "{{ object_defs.results }}"
loop_control:
loop_var: file
when: ansible_check_mode
diff --git a/roles/openshift_logging/tasks/label_node.yaml b/roles/openshift_logging/tasks/label_node.yaml
index aecb5d81b..bd5073381 100644
--- a/roles/openshift_logging/tasks/label_node.yaml
+++ b/roles/openshift_logging/tasks/label_node.yaml
@@ -1,11 +1,34 @@
---
- command: >
{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get node {{host}}
+ -o jsonpath='{.metadata.labels}'
+ register: node_labels
+ when: not ansible_check_mode
+ changed_when: no
+
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}={{value}}
+ register: label_result
+ failed_when: label_result.rc == 1 and 'exists' not in label_result.stderr
+ when:
+ - value is defined
+ - node_labels.stdout is defined
+ - label not in node_labels.stdout
+ - unlabel is not defined or not unlabel
+ - not ansible_check_mode
+
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get node {{host}}
-o jsonpath='{.metadata.labels.{{ label }}}'
register: label_value
- failed_when: label_value.rc == 1 and 'exists' not in label_value.stderr
- when: not ansible_check_mode
+ ignore_errors: yes
changed_when: no
+ when:
+ - value is defined
+ - node_labels.stdout is defined
+ - label in node_labels.stdout
+ - unlabel is not defined or not unlabel
+ - not ansible_check_mode
- command: >
{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}={{value}} --overwrite
diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml
index c4ec1b255..4c718805e 100644
--- a/roles/openshift_logging/tasks/main.yaml
+++ b/roles/openshift_logging/tasks/main.yaml
@@ -3,7 +3,6 @@
msg: Only one Fluentd nodeselector key pair should be provided
when: "{{ openshift_logging_fluentd_nodeselector.keys() | count }} > 1"
-
- name: Create temp directory for doing work in
command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
register: mktemp
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index e9b7de330..4620dd877 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -30,7 +30,6 @@
| oo_collect(attribute='stat.exists')
| list)) }}"
-
- name: Ensure the generated_configs directory present
file:
path: "{{ openshift_master_generated_config_dir }}"
@@ -39,30 +38,50 @@
when: master_certs_missing | bool and inventory_hostname != openshift_ca_host
delegate_to: "{{ openshift_ca_host }}"
-- file:
- src: "{{ openshift_master_config_dir }}/{{ item }}"
- dest: "{{ openshift_master_generated_config_dir }}/{{ item }}"
- state: hard
- with_items:
- - ca.crt
- - ca.key
- - ca.serial.txt
- when: master_certs_missing | bool and inventory_hostname != openshift_ca_host
- delegate_to: "{{ openshift_ca_host }}"
-
-- name: Create the master certificates if they do not already exist
+- name: Create the master server certificate
command: >
- {{ openshift.common.client_binary }} adm create-master-certs
+ {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm ca create-server-cert
{% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
--certificate-authority {{ named_ca_certificate }}
{% endfor %}
- --hostnames={{ openshift.common.all_hostnames | join(',') }}
- --master={{ openshift.master.api_url }}
- --public-master={{ openshift.master.public_api_url }}
- --cert-dir={{ openshift_master_generated_config_dir }}
+ --hostnames={{ hostvars[item].openshift.common.all_hostnames | join(',') }}
+ --cert={{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}/master.server.crt
+ --key={{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}/master.server.key
+ --signer-cert={{ openshift_ca_cert }}
+ --signer-key={{ openshift_ca_key }}
+ --signer-serial={{ openshift_ca_serial }}
--overwrite=false
- when: master_certs_missing | bool and inventory_hostname != openshift_ca_host
+ with_items: "{{ hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True})
+ | difference([openshift_ca_host])}}"
+ delegate_to: "{{ openshift_ca_host }}"
+ run_once: true
+
+- name: Generate the master client config
+ command: >
+ {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config
+ {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
+ --certificate-authority {{ named_ca_certificate }}
+ {% endfor %}
+ --certificate-authority={{ openshift_ca_cert }}
+ --client-dir={{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}
+ --groups=system:masters,system:openshift-master
+ --master={{ openshift.master.api_url }}
+ --public-master={{ openshift.master.public_api_url }}
+ --signer-cert={{ openshift_ca_cert }}
+ --signer-key={{ openshift_ca_key }}
+ --signer-serial={{ openshift_ca_serial }}
+ --user=system:openshift-master
+ --basename=openshift-master
+ args:
+ creates: "{{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}/openshift-master.kubeconfig"
+ with_items: "{{ hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True})
+ | difference([openshift_ca_host])}}"
delegate_to: "{{ openshift_ca_host }}"
+ run_once: true
- file:
src: "{{ openshift_master_config_dir }}/{{ item }}"
@@ -86,7 +105,7 @@
- name: Create local temp directory for syncing certs
local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: g_master_mktemp
+ register: g_master_certs_mktemp
changed_when: False
when: master_certs_missing | bool
delegate_to: localhost
@@ -104,7 +123,7 @@
- name: Retrieve the master cert tarball from the master
fetch:
src: "{{ openshift_master_generated_config_dir }}.tgz"
- dest: "{{ g_master_mktemp.stdout }}/"
+ dest: "{{ g_master_certs_mktemp.stdout }}/"
flat: yes
fail_on_missing: yes
validate_checksum: yes
@@ -119,11 +138,11 @@
- name: Unarchive the tarball on the master
unarchive:
- src: "{{ g_master_mktemp.stdout }}/{{ openshift_master_cert_subdir }}.tgz"
+ src: "{{ g_master_certs_mktemp.stdout }}/{{ openshift_master_cert_subdir }}.tgz"
dest: "{{ openshift_master_config_dir }}"
when: master_certs_missing | bool and inventory_hostname != openshift_ca_host
-- file: name={{ g_master_mktemp.stdout }} state=absent
+- file: name={{ g_master_certs_mktemp.stdout }} state=absent
changed_when: False
when: master_certs_missing | bool
delegate_to: localhost
diff --git a/roles/openshift_metrics/README.md b/roles/openshift_metrics/README.md
index f4c47c7bb..a61b0db5e 100644
--- a/roles/openshift_metrics/README.md
+++ b/roles/openshift_metrics/README.md
@@ -5,10 +5,14 @@ OpenShift Metrics Installation
Requirements
------------
+This role has the following dependencies:
+
+- Java is required on the control node to generate keystores for the Java components
+- httpd-tools is required on the control node to generate various passwords for the metrics components
The following variables need to be set and will be validated:
-- `openshift_metrics_hostname`: hostname used on the hawkular metrics route.
+- `openshift_metrics_hawkular_hostname`: hostname used on the hawkular metrics route.
- `openshift_metrics_project`: project (i.e. namespace) where the components will be
deployed.
diff --git a/roles/openshift_metrics/files/import_jks_certs.sh b/roles/openshift_metrics/files/import_jks_certs.sh
index bb046df87..f4315ef34 100755
--- a/roles/openshift_metrics/files/import_jks_certs.sh
+++ b/roles/openshift_metrics/files/import_jks_certs.sh
@@ -114,5 +114,3 @@ function import_certs() {
}
import_certs
-
-exit 0
diff --git a/roles/openshift_metrics/tasks/import_jks_certs.yaml b/roles/openshift_metrics/tasks/import_jks_certs.yaml
index f6bf6c1a6..f5192b005 100644
--- a/roles/openshift_metrics/tasks/import_jks_certs.yaml
+++ b/roles/openshift_metrics/tasks/import_jks_certs.yaml
@@ -1,76 +1,4 @@
---
-- name: Check for jks-generator service account
- command: >
- {{ openshift.common.client_binary }}
- --config={{ mktemp.stdout }}/admin.kubeconfig
- -n {{openshift_metrics_project}}
- get serviceaccount/jks-generator --no-headers
- register: serviceaccount_result
- ignore_errors: yes
- when: not ansible_check_mode
- changed_when: no
-
-- name: Create jks-generator service account
- command: >
- {{ openshift.common.client_binary }}
- --config={{ mktemp.stdout }}/admin.kubeconfig
- -n {{openshift_metrics_project}}
- create serviceaccount jks-generator
- when: not ansible_check_mode and "not found" in serviceaccount_result.stderr
-
-- name: Check for hostmount-anyuid scc entry
- command: >
- {{ openshift.common.client_binary }}
- --config={{ mktemp.stdout }}/admin.kubeconfig
- get scc hostmount-anyuid
- -o jsonpath='{.users}'
- register: scc_result
- when: not ansible_check_mode
- changed_when: no
-
-- name: Add to hostmount-anyuid scc
- command: >
- {{ openshift.common.admin_binary }}
- --config={{ mktemp.stdout }}/admin.kubeconfig
- -n {{openshift_metrics_project}}
- policy add-scc-to-user hostmount-anyuid
- -z jks-generator
- when:
- - not ansible_check_mode
- - scc_result.stdout.find("system:serviceaccount:{{openshift_metrics_project}}:jks-generator") == -1
-
-- name: Copy JKS generation script
- copy:
- src: import_jks_certs.sh
- dest: "{{openshift_metrics_certs_dir}}/import_jks_certs.sh"
- check_mode: no
-
-- slurp: src={{ openshift_metrics_certs_dir }}/hawkular-metrics-keystore.pwd
- register: metrics_keystore_password
-
-- slurp: src={{ openshift_metrics_certs_dir }}/hawkular-cassandra-keystore.pwd
- register: cassandra_keystore_password
-
-- slurp: src={{ openshift_metrics_certs_dir }}/hawkular-jgroups-keystore.pwd
- register: jgroups_keystore_password
-
-- name: Generate JKS pod template
- template:
- src: jks_pod.j2
- dest: "{{mktemp.stdout}}/jks_pod.yaml"
- vars:
- metrics_keystore_passwd: "{{metrics_keystore_password.content}}"
- cassandra_keystore_passwd: "{{cassandra_keystore_password.content}}"
- metrics_truststore_passwd: "{{hawkular_truststore_password.content}}"
- cassandra_truststore_passwd: "{{cassandra_truststore_password.content}}"
- jgroups_passwd: "{{jgroups_keystore_password.content}}"
- check_mode: no
- changed_when: no
-
-- stat: path="{{openshift_metrics_certs_dir}}/hawkular-metrics.keystore"
- register: metrics_keystore
- check_mode: no
-
- stat: path="{{openshift_metrics_certs_dir}}/hawkular-cassandra.keystore"
register: cassandra_keystore
check_mode: no
@@ -79,6 +7,10 @@
register: cassandra_truststore
check_mode: no
+- stat: path="{{openshift_metrics_certs_dir}}/hawkular-metrics.keystore"
+ register: metrics_keystore
+ check_mode: no
+
- stat: path="{{openshift_metrics_certs_dir}}/hawkular-metrics.truststore"
register: metrics_truststore
check_mode: no
@@ -87,32 +19,52 @@
register: jgroups_keystore
check_mode: no
-- name: create JKS pod
- command: >
- {{ openshift.common.client_binary }}
- --config={{ mktemp.stdout }}/admin.kubeconfig
- -n {{openshift_metrics_project}}
- create -f {{mktemp.stdout}}/jks_pod.yaml
- -o name
- register: podoutput
- check_mode: no
- when: not metrics_keystore.stat.exists or
- not metrics_truststore.stat.exists or
- not cassandra_keystore.stat.exists or
- not cassandra_truststore.stat.exists or
- not jgroups_keystore.stat.exists
+- block:
+ - slurp: src={{ openshift_metrics_certs_dir }}/hawkular-metrics-keystore.pwd
+ register: metrics_keystore_password
+
+ - slurp: src={{ openshift_metrics_certs_dir }}/hawkular-cassandra-keystore.pwd
+ register: cassandra_keystore_password
+
+ - slurp: src={{ openshift_metrics_certs_dir }}/hawkular-jgroups-keystore.pwd
+ register: jgroups_keystore_password
+
+ - local_action: command mktemp -d
+ register: local_tmp
+ changed_when: False
+
+ - fetch:
+ dest: "{{local_tmp.stdout}}/"
+ src: "{{ openshift_metrics_certs_dir }}/{{item}}"
+ flat: yes
+ changed_when: False
+ with_items:
+ - hawkular-metrics.pkcs12
+ - hawkular-cassandra.pkcs12
+ - hawkular-metrics.crt
+ - hawkular-cassandra.crt
+ - ca.crt
+
+ - local_action: command {{role_path}}/files/import_jks_certs.sh
+ environment:
+ CERT_DIR: "{{local_tmp.stdout}}"
+ METRICS_KEYSTORE_PASSWD: "{{metrics_keystore_password.content}}"
+ CASSANDRA_KEYSTORE_PASSWD: "{{cassandra_keystore_password.content}}"
+ METRICS_TRUSTSTORE_PASSWD: "{{hawkular_truststore_password.content}}"
+ CASSANDRA_TRUSTSTORE_PASSWD: "{{cassandra_truststore_password.content}}"
+ JGROUPS_PASSWD: "{{jgroups_keystore_password.content}}"
+ changed_when: False
+
+ - copy:
+ dest: "{{openshift_metrics_certs_dir}}/"
+ src: "{{item}}"
+ with_fileglob: "{{local_tmp.stdout}}/*.*store"
+
+ - file:
+ path: "{{local_tmp.stdout}}"
+ state: absent
+ changed_when: False
-- command: >
- {{ openshift.common.client_binary }}
- --config={{ mktemp.stdout }}/admin.kubeconfig
- -n {{openshift_metrics_project}}
- get {{podoutput.stdout}}
- -o jsonpath='{.status.phase}'
- register: result
- until: result.stdout.find("Succeeded") != -1
- retries: 5
- delay: 10
- changed_when: no
when: not metrics_keystore.stat.exists or
not metrics_truststore.stat.exists or
not cassandra_keystore.stat.exists or
diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml
index bab37dbfb..ddaa54438 100644
--- a/roles/openshift_metrics/tasks/install_metrics.yaml
+++ b/roles/openshift_metrics/tasks/install_metrics.yaml
@@ -20,15 +20,23 @@
loop_control:
loop_var: include_file
+- find: paths={{ mktemp.stdout }}/templates patterns=*.yaml
+ register: object_def_files
+ changed_when: no
+
+- slurp: src={{item.path}}
+ register: object_defs
+ with_items: "{{object_def_files.files}}"
+ changed_when: no
+
- name: Create objects
include: oc_apply.yaml
vars:
kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
namespace: "{{ openshift_metrics_project }}"
- file_name: "{{ item }}"
- file_content: "{{ lookup('file',item) | from_yaml }}"
- with_fileglob:
- - "{{ mktemp.stdout }}/templates/*.yaml"
+ file_name: "{{ item.source }}"
+ file_content: "{{ item.content | b64decode | from_yaml }}"
+ with_items: "{{ object_defs.results }}"
- name: Scaling up cluster
include: start_metrics.yaml
diff --git a/roles/openshift_metrics/tasks/install_support.yaml b/roles/openshift_metrics/tasks/install_support.yaml
index b0e4bec80..cc5acc6e5 100644
--- a/roles/openshift_metrics/tasks/install_support.yaml
+++ b/roles/openshift_metrics/tasks/install_support.yaml
@@ -1,4 +1,22 @@
---
+- name: Check control node to see if htpasswd is installed
+ local_action: command which htpasswd
+ register: htpasswd_check
+ failed_when: no
+ changed_when: no
+
+- fail: msg="'htpasswd' is unavailable. Please install httpd-tools on the control node"
+ when: htpasswd_check.rc == 1
+
+- name: Check control node to see if keytool is installed
+ local_action: command which htpasswd
+ register: keytool_check
+ failed_when: no
+ changed_when: no
+
+- fail: msg="'keytool' is unavailable. Please install java-1.8.0-openjdk-headless on the control node"
+ when: keytool_check.rc == 1
+
- include: generate_certificates.yaml
- include: generate_serviceaccounts.yaml
- include: generate_services.yaml
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index c42440130..1808db5d5 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -7,6 +7,7 @@
- name: Create temp directory for all our templates
file: path={{mktemp.stdout}}/templates state=directory mode=0755
changed_when: False
+ when: "{{ openshift_metrics_install_metrics | bool }}"
- name: Copy the admin client config(s)
command: >
@@ -15,8 +16,4 @@
check_mode: no
tags: metrics_init
-- include: install_metrics.yaml
- when: openshift_metrics_install_metrics | default(false) | bool
-
-- include: uninstall_metrics.yaml
- when: not openshift_metrics_install_metrics | default(false) | bool
+- include: "{{ (openshift_metrics_install_metrics | bool) | ternary('install_metrics.yaml','uninstall_metrics.yaml') }}"
diff --git a/roles/openshift_metrics/templates/jks_pod.j2 b/roles/openshift_metrics/templates/jks_pod.j2
deleted file mode 100644
index e86fe38a4..000000000
--- a/roles/openshift_metrics/templates/jks_pod.j2
+++ /dev/null
@@ -1,38 +0,0 @@
-apiVersion: v1
-kind: Pod
-metadata:
- labels:
- metrics-infra: support
- generateName: jks-cert-gen-
-spec:
- containers:
- - name: jks-cert-gen
- image: {{openshift_metrics_image_prefix}}metrics-deployer:{{openshift_metrics_image_version}}
- imagePullPolicy: Always
- command: ["sh", "{{openshift_metrics_certs_dir}}/import_jks_certs.sh"]
- securityContext:
- runAsUser: 0
- volumeMounts:
- - mountPath: {{openshift_metrics_certs_dir}}
- name: certmount
- env:
- - name: CERT_DIR
- value: {{openshift_metrics_certs_dir}}
- - name: METRICS_KEYSTORE_PASSWD
- value: {{metrics_keystore_passwd}}
- - name: CASSANDRA_KEYSTORE_PASSWD
- value: {{cassandra_keystore_passwd}}
- - name: METRICS_TRUSTSTORE_PASSWD
- value: {{metrics_truststore_passwd}}
- - name: CASSANDRA_TRUSTSTORE_PASSWD
- value: {{cassandra_truststore_passwd}}
- - name: hawkular_cassandra_alias
- value: {{cassandra_keystore_passwd}}
- - name: JGROUPS_PASSWD
- value: {{jgroups_passwd}}
- restartPolicy: Never
- serviceAccount: jks-generator
- volumes:
- - hostPath:
- path: "{{openshift_metrics_certs_dir}}"
- name: certmount
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index 91f118191..10036abed 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -17,8 +17,6 @@ dependencies:
- role: openshift_docker
- role: openshift_node_certificates
- role: openshift_cloud_provider
-- role: openshift_node_dnsmasq
- when: openshift.common.use_dnsmasq | bool
- role: os_firewall
os_firewall_allow:
- service: Kubernetes kubelet
@@ -43,3 +41,5 @@ dependencies:
- service: Kubernetes service NodePort UDP
port: "{{ openshift_node_port_range | default('') }}/udp"
when: openshift_node_port_range is defined
+- role: openshift_node_dnsmasq
+ when: openshift.common.use_dnsmasq | bool
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index 3b5865a50..e33d5d497 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -15,7 +15,7 @@ After={{ openshift.common.service_type }}-node-dep.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION}
+ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
SyslogIdentifier={{ openshift.common.service_type }}-node
diff --git a/roles/openshift_node_certificates/tasks/main.yml b/roles/openshift_node_certificates/tasks/main.yml
index 717bf3cea..a263f4f3a 100644
--- a/roles/openshift_node_certificates/tasks/main.yml
+++ b/roles/openshift_node_certificates/tasks/main.yml
@@ -49,32 +49,38 @@
--certificate-authority {{ named_ca_certificate }}
{% endfor %}
--certificate-authority={{ openshift_ca_cert }}
- --client-dir={{ openshift_node_generated_config_dir }}
+ --client-dir={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}
--groups=system:nodes
--master={{ hostvars[openshift_ca_host].openshift.master.api_url }}
--signer-cert={{ openshift_ca_cert }}
--signer-key={{ openshift_ca_key }}
--signer-serial={{ openshift_ca_serial }}
- --user=system:node:{{ openshift.common.hostname }}
+ --user=system:node:{{ hostvars[item].openshift.common.hostname }}
args:
- creates: "{{ openshift_node_generated_config_dir }}"
- when: node_certs_missing | bool
+ creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}"
+ with_items: "{{ hostvars
+ | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_collect(attribute='inventory_hostname', filters={'node_certs_missing':True}) }}"
delegate_to: "{{ openshift_ca_host }}"
+ run_once: true
- name: Generate the node server certificate
command: >
{{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm ca create-server-cert
- --cert={{ openshift_node_generated_config_dir }}/server.crt
- --key={{ openshift_generated_configs_dir }}/node-{{ openshift.common.hostname }}/server.key
+ --cert={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.crt
+ --key={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.key
--overwrite=true
- --hostnames={{ openshift.common.hostname }},{{ openshift.common.public_hostname }},{{ openshift.common.ip }},{{ openshift.common.public_ip }}
+ --hostnames={{ hostvars[item].openshift.common.hostname }},{{ hostvars[item].openshift.common.public_hostname }},{{ hostvars[item].openshift.common.ip }},{{ hostvars[item].openshift.common.public_ip }}
--signer-cert={{ openshift_ca_cert }}
--signer-key={{ openshift_ca_key }}
--signer-serial={{ openshift_ca_serial }}
args:
- creates: "{{ openshift_node_generated_config_dir }}/server.crt"
- when: node_certs_missing | bool
- delegate_to: "{{ openshift_ca_host}}"
+ creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.crt"
+ with_items: "{{ hostvars
+ | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_collect(attribute='inventory_hostname', filters={'node_certs_missing':True}) }}"
+ delegate_to: "{{ openshift_ca_host }}"
+ run_once: true
- name: Create local temp directory for syncing certs
local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/os_firewall/library/os_firewall_manage_iptables.py
index 8ba650994..8d4878fa7 100755
--- a/roles/os_firewall/library/os_firewall_manage_iptables.py
+++ b/roles/os_firewall/library/os_firewall_manage_iptables.py
@@ -223,7 +223,9 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
def gen_cmd(self):
cmd = 'iptables' if self.ip_version == 'ipv4' else 'ip6tables'
- return ["/usr/sbin/%s" % cmd]
+ # Include -w (wait for xtables lock) in default arguments.
+ default_args = ['-w']
+ return ["/usr/sbin/%s" % cmd] + default_args
def gen_save_cmd(self): # pylint: disable=no-self-use
return ['/usr/libexec/iptables/iptables.init', 'save']
diff --git a/roles/os_firewall/tasks/firewall/firewalld.yml b/roles/os_firewall/tasks/firewall/firewalld.yml
index 1101870be..c4db197ca 100644
--- a/roles/os_firewall/tasks/firewall/firewalld.yml
+++ b/roles/os_firewall/tasks/firewall/firewalld.yml
@@ -1,7 +1,8 @@
---
- name: Install firewalld packages
- package: name=firewalld state=present
- when: not openshift.common.is_containerized | bool
+ package:
+ name: firewalld
+ state: present
- name: Ensure iptables services are not enabled
systemd:
diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml
index ba3b9a923..28c3c7080 100644
--- a/roles/rhel_subscribe/tasks/main.yml
+++ b/roles/rhel_subscribe/tasks/main.yml
@@ -27,6 +27,11 @@
creates: /etc/rhsm/ca/katello-server-ca.pem
when: rhel_subscription_server is defined and rhel_subscription_server
+- name: Install Red Hat Subscription manager
+ yum:
+ name: subscription-manager
+ state: present
+
- name: RedHat subscriptions
redhat_subscription:
username: "{{ rhel_subscription_user }}"