diff options
Diffstat (limited to 'roles')
173 files changed, 6210 insertions, 699 deletions
diff --git a/roles/ansible_service_broker/vars/openshift-enterprise.yml b/roles/ansible_service_broker/vars/openshift-enterprise.yml index 0b3a2a69d..ce2ae8365 100644 --- a/roles/ansible_service_broker/vars/openshift-enterprise.yml +++ b/roles/ansible_service_broker/vars/openshift-enterprise.yml @@ -1,7 +1,7 @@  ---  __ansible_service_broker_image_prefix: registry.access.redhat.com/openshift3/ose- -__ansible_service_broker_image_tag: latest +__ansible_service_broker_image_tag: v3.6  __ansible_service_broker_etcd_image_prefix: rhel7/  __ansible_service_broker_etcd_image_tag: latest diff --git a/roles/cockpit/defaults/main.yml b/roles/cockpit/defaults/main.yml index cbe5bb92b..15c40e3b5 100644 --- a/roles/cockpit/defaults/main.yml +++ b/roles/cockpit/defaults/main.yml @@ -1,6 +1,6 @@  --- -r_cockpit_firewall_enabled: True -r_cockpit_use_firewalld: False +r_cockpit_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_cockpit_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"  r_cockpit_os_firewall_deny: []  r_cockpit_os_firewall_allow: diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 1f9ac5059..78c6671d8 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -7,8 +7,8 @@  - set_fact:      l_use_system_container: "{{ openshift.docker.use_system_container | default(False) }}" -    l_use_crio: "{{ openshift.docker.use_crio | default(False) }}" -    l_use_crio_only: "{{ openshift.docker.use_crio_only | default(False) }}" +    l_use_crio: "{{ openshift_use_crio | default(False) }}" +    l_use_crio_only: "{{ openshift_use_crio_only | default(False) }}"  - name: Use Package Docker if Requested    include: package_docker.yml diff --git a/roles/docker/templates/crio.conf.j2 b/roles/docker/templates/crio.conf.j2 index eae1759ab..5b31932b1 100644 --- a/roles/docker/templates/crio.conf.j2 +++ b/roles/docker/templates/crio.conf.j2 @@ -43,7 +43,7 @@ stream_port = "10010"  # This is a mandatory setting as this runtime will be the default one  # and will also be used for untrusted container workloads if  # runtime_untrusted_workload is not set. -runtime = "/usr/libexec/crio/runc" +runtime = "/usr/bin/runc"  # runtime_untrusted_workload is the OCI compatible runtime used for untrusted  # container workloads. This is an optional setting, except if diff --git a/roles/lib_openshift/library/oc_adm_csr.py b/roles/lib_openshift/library/oc_adm_csr.py new file mode 100644 index 000000000..231857cca --- /dev/null +++ b/roles/lib_openshift/library/oc_adm_csr.py @@ -0,0 +1,1649 @@ +#!/usr/bin/env python +# pylint: disable=missing-docstring +# flake8: noqa: T001 +#     ___ ___ _  _ ___ ___    _ _____ ___ ___ +#    / __| __| \| | __| _ \  /_\_   _| __|   \ +#   | (_ | _|| .` | _||   / / _ \| | | _|| |) | +#    \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____ +#   |   \ / _ \  | \| |/ _ \_   _| | __|   \_ _|_   _| +#   | |) | (_) | | .` | (_) || |   | _|| |) | |  | | +#   |___/ \___/  |_|\_|\___/ |_|   |___|___/___| |_| +# +# Copyright 2016 Red Hat, Inc. and/or its affiliates +# and other contributors as indicated by the @author tags. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +#    http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*- +''' +   OpenShiftCLI class that wraps the oc commands in a subprocess +''' +# pylint: disable=too-many-lines + +from __future__ import print_function +import atexit +import copy +import json +import os +import re +import shutil +import subprocess +import tempfile +# pylint: disable=import-error +try: +    import ruamel.yaml as yaml +except ImportError: +    import yaml + +from ansible.module_utils.basic import AnsibleModule + +# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: doc/csr -*- -*- -*- + +DOCUMENTATION = ''' +--- +module: oc_adm_csr +short_description: Module to approve or deny openshift certificate signing requests +description: +  - Wrapper around the openshift `oc adm certificate approve|deny <csr>` command. +options: +  state: +    description: +    - approve|deny|list Approve, deny, and list are the only supported states for certificates +    required: false +    default: present +    choices: +    - present +    aliases: [] +  kubeconfig: +    description: +    - The path for the kubeconfig file to use for authentication +    required: false +    default: /etc/origin/master/admin.kubeconfig +    aliases: [] +  debug: +    description: +    - Turn on debug output. +    required: false +    default: False +    aliases: [] +  nodes: +    description: +    - A list of the names of the nodes in which to accept the certificates +    required: false +    default: None +    aliases: [] +  timeout: +    description: +    - This flag allows for a timeout value when approving nodes. +    required: false +    default: 30 +    aliases: [] +  timeout: +    description: +    - This flag allows for a timeout value when doing node approvals. +    - A zero value for the timeout will block until the nodes have been accepted +    required: false +    default: 30 +    aliases: [] +  approve_all: +    description: +    - This flag allows for the module to approve all CSRs that are found. +    - This facilitates testing. +    required: false +    default: False +    aliases: [] +  service_account: +    description: +    - This parameter tells the approval process which service account is being used for the requests +    required: false +    default: node-bootstrapper +    aliases: [] +author: +- "Kenny Woodson <kwoodson@redhat.com>" +extends_documentation_fragment: [] +''' + +EXAMPLES = ''' +- name: Approve certificates for node xyz +  oc_adm_scr: +    nodes: +    - xyz +    timeout: 300 + +- name: Approve certificates for node xyz +  oc_adm_scr: +    nodes: +    - xyz +    timeout: 0 +''' + +# -*- -*- -*- End included fragment: doc/csr -*- -*- -*- + +# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- + + +class YeditException(Exception):  # pragma: no cover +    ''' Exception class for Yedit ''' +    pass + + +# pylint: disable=too-many-public-methods +class Yedit(object):  # pragma: no cover +    ''' Class to modify yaml files ''' +    re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" +    re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" +    com_sep = set(['.', '#', '|', ':']) + +    # pylint: disable=too-many-arguments +    def __init__(self, +                 filename=None, +                 content=None, +                 content_type='yaml', +                 separator='.', +                 backup=False): +        self.content = content +        self._separator = separator +        self.filename = filename +        self.__yaml_dict = content +        self.content_type = content_type +        self.backup = backup +        self.load(content_type=self.content_type) +        if self.__yaml_dict is None: +            self.__yaml_dict = {} + +    @property +    def separator(self): +        ''' getter method for separator ''' +        return self._separator + +    @separator.setter +    def separator(self, inc_sep): +        ''' setter method for separator ''' +        self._separator = inc_sep + +    @property +    def yaml_dict(self): +        ''' getter method for yaml_dict ''' +        return self.__yaml_dict + +    @yaml_dict.setter +    def yaml_dict(self, value): +        ''' setter method for yaml_dict ''' +        self.__yaml_dict = value + +    @staticmethod +    def parse_key(key, sep='.'): +        '''parse the key allowing the appropriate separator''' +        common_separators = list(Yedit.com_sep - set([sep])) +        return re.findall(Yedit.re_key.format(''.join(common_separators)), key) + +    @staticmethod +    def valid_key(key, sep='.'): +        '''validate the incoming key''' +        common_separators = list(Yedit.com_sep - set([sep])) +        if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key): +            return False + +        return True + +    @staticmethod +    def remove_entry(data, key, sep='.'): +        ''' remove data at location key ''' +        if key == '' and isinstance(data, dict): +            data.clear() +            return True +        elif key == '' and isinstance(data, list): +            del data[:] +            return True + +        if not (key and Yedit.valid_key(key, sep)) and \ +           isinstance(data, (list, dict)): +            return None + +        key_indexes = Yedit.parse_key(key, sep) +        for arr_ind, dict_key in key_indexes[:-1]: +            if dict_key and isinstance(data, dict): +                data = data.get(dict_key) +            elif (arr_ind and isinstance(data, list) and +                  int(arr_ind) <= len(data) - 1): +                data = data[int(arr_ind)] +            else: +                return None + +        # process last index for remove +        # expected list entry +        if key_indexes[-1][0]: +            if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:  # noqa: E501 +                del data[int(key_indexes[-1][0])] +                return True + +        # expected dict entry +        elif key_indexes[-1][1]: +            if isinstance(data, dict): +                del data[key_indexes[-1][1]] +                return True + +    @staticmethod +    def add_entry(data, key, item=None, sep='.'): +        ''' Get an item from a dictionary with key notation a.b.c +            d = {'a': {'b': 'c'}}} +            key = a#b +            return c +        ''' +        if key == '': +            pass +        elif (not (key and Yedit.valid_key(key, sep)) and +              isinstance(data, (list, dict))): +            return None + +        key_indexes = Yedit.parse_key(key, sep) +        for arr_ind, dict_key in key_indexes[:-1]: +            if dict_key: +                if isinstance(data, dict) and dict_key in data and data[dict_key]:  # noqa: E501 +                    data = data[dict_key] +                    continue + +                elif data and not isinstance(data, dict): +                    raise YeditException("Unexpected item type found while going through key " + +                                         "path: {} (at key: {})".format(key, dict_key)) + +                data[dict_key] = {} +                data = data[dict_key] + +            elif (arr_ind and isinstance(data, list) and +                  int(arr_ind) <= len(data) - 1): +                data = data[int(arr_ind)] +            else: +                raise YeditException("Unexpected item type found while going through key path: {}".format(key)) + +        if key == '': +            data = item + +        # process last index for add +        # expected list entry +        elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:  # noqa: E501 +            data[int(key_indexes[-1][0])] = item + +        # expected dict entry +        elif key_indexes[-1][1] and isinstance(data, dict): +            data[key_indexes[-1][1]] = item + +        # didn't add/update to an existing list, nor add/update key to a dict +        # so we must have been provided some syntax like a.b.c[<int>] = "data" for a +        # non-existent array +        else: +            raise YeditException("Error adding to object at path: {}".format(key)) + +        return data + +    @staticmethod +    def get_entry(data, key, sep='.'): +        ''' Get an item from a dictionary with key notation a.b.c +            d = {'a': {'b': 'c'}}} +            key = a.b +            return c +        ''' +        if key == '': +            pass +        elif (not (key and Yedit.valid_key(key, sep)) and +              isinstance(data, (list, dict))): +            return None + +        key_indexes = Yedit.parse_key(key, sep) +        for arr_ind, dict_key in key_indexes: +            if dict_key and isinstance(data, dict): +                data = data.get(dict_key) +            elif (arr_ind and isinstance(data, list) and +                  int(arr_ind) <= len(data) - 1): +                data = data[int(arr_ind)] +            else: +                return None + +        return data + +    @staticmethod +    def _write(filename, contents): +        ''' Actually write the file contents to disk. This helps with mocking. ''' + +        tmp_filename = filename + '.yedit' + +        with open(tmp_filename, 'w') as yfd: +            yfd.write(contents) + +        os.rename(tmp_filename, filename) + +    def write(self): +        ''' write to file ''' +        if not self.filename: +            raise YeditException('Please specify a filename.') + +        if self.backup and self.file_exists(): +            shutil.copy(self.filename, self.filename + '.orig') + +        # Try to set format attributes if supported +        try: +            self.yaml_dict.fa.set_block_style() +        except AttributeError: +            pass + +        # Try to use RoundTripDumper if supported. +        try: +            Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper)) +        except AttributeError: +            Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False)) + +        return (True, self.yaml_dict) + +    def read(self): +        ''' read from file ''' +        # check if it exists +        if self.filename is None or not self.file_exists(): +            return None + +        contents = None +        with open(self.filename) as yfd: +            contents = yfd.read() + +        return contents + +    def file_exists(self): +        ''' return whether file exists ''' +        if os.path.exists(self.filename): +            return True + +        return False + +    def load(self, content_type='yaml'): +        ''' return yaml file ''' +        contents = self.read() + +        if not contents and not self.content: +            return None + +        if self.content: +            if isinstance(self.content, dict): +                self.yaml_dict = self.content +                return self.yaml_dict +            elif isinstance(self.content, str): +                contents = self.content + +        # check if it is yaml +        try: +            if content_type == 'yaml' and contents: +                # Try to set format attributes if supported +                try: +                    self.yaml_dict.fa.set_block_style() +                except AttributeError: +                    pass + +                # Try to use RoundTripLoader if supported. +                try: +                    self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader) +                except AttributeError: +                    self.yaml_dict = yaml.safe_load(contents) + +                # Try to set format attributes if supported +                try: +                    self.yaml_dict.fa.set_block_style() +                except AttributeError: +                    pass + +            elif content_type == 'json' and contents: +                self.yaml_dict = json.loads(contents) +        except yaml.YAMLError as err: +            # Error loading yaml or json +            raise YeditException('Problem with loading yaml file. {}'.format(err)) + +        return self.yaml_dict + +    def get(self, key): +        ''' get a specified key''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, key, self.separator) +        except KeyError: +            entry = None + +        return entry + +    def pop(self, path, key_or_item): +        ''' remove a key, value pair from a dict or an item for a list''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry is None: +            return (False, self.yaml_dict) + +        if isinstance(entry, dict): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            if key_or_item in entry: +                entry.pop(key_or_item) +                return (True, self.yaml_dict) +            return (False, self.yaml_dict) + +        elif isinstance(entry, list): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            ind = None +            try: +                ind = entry.index(key_or_item) +            except ValueError: +                return (False, self.yaml_dict) + +            entry.pop(ind) +            return (True, self.yaml_dict) + +        return (False, self.yaml_dict) + +    def delete(self, path): +        ''' remove path from a dict''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry is None: +            return (False, self.yaml_dict) + +        result = Yedit.remove_entry(self.yaml_dict, path, self.separator) +        if not result: +            return (False, self.yaml_dict) + +        return (True, self.yaml_dict) + +    def exists(self, path, value): +        ''' check if value exists at path''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if isinstance(entry, list): +            if value in entry: +                return True +            return False + +        elif isinstance(entry, dict): +            if isinstance(value, dict): +                rval = False +                for key, val in value.items(): +                    if entry[key] != val: +                        rval = False +                        break +                else: +                    rval = True +                return rval + +            return value in entry + +        return entry == value + +    def append(self, path, value): +        '''append value to a list''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry is None: +            self.put(path, []) +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        if not isinstance(entry, list): +            return (False, self.yaml_dict) + +        # AUDIT:maybe-no-member makes sense due to loading data from +        # a serialized format. +        # pylint: disable=maybe-no-member +        entry.append(value) +        return (True, self.yaml_dict) + +    # pylint: disable=too-many-arguments +    def update(self, path, value, index=None, curr_value=None): +        ''' put path, value into a dict ''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if isinstance(entry, dict): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            if not isinstance(value, dict): +                raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' + +                                     'value=[{}] type=[{}]'.format(value, type(value))) + +            entry.update(value) +            return (True, self.yaml_dict) + +        elif isinstance(entry, list): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            ind = None +            if curr_value: +                try: +                    ind = entry.index(curr_value) +                except ValueError: +                    return (False, self.yaml_dict) + +            elif index is not None: +                ind = index + +            if ind is not None and entry[ind] != value: +                entry[ind] = value +                return (True, self.yaml_dict) + +            # see if it exists in the list +            try: +                ind = entry.index(value) +            except ValueError: +                # doesn't exist, append it +                entry.append(value) +                return (True, self.yaml_dict) + +            # already exists, return +            if ind is not None: +                return (False, self.yaml_dict) +        return (False, self.yaml_dict) + +    def put(self, path, value): +        ''' put path, value into a dict ''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry == value: +            return (False, self.yaml_dict) + +        # deepcopy didn't work +        # Try to use ruamel.yaml and fallback to pyyaml +        try: +            tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, +                                                      default_flow_style=False), +                                 yaml.RoundTripLoader) +        except AttributeError: +            tmp_copy = copy.deepcopy(self.yaml_dict) + +        # set the format attributes if available +        try: +            tmp_copy.fa.set_block_style() +        except AttributeError: +            pass + +        result = Yedit.add_entry(tmp_copy, path, value, self.separator) +        if result is None: +            return (False, self.yaml_dict) + +        # When path equals "" it is a special case. +        # "" refers to the root of the document +        # Only update the root path (entire document) when its a list or dict +        if path == '': +            if isinstance(result, list) or isinstance(result, dict): +                self.yaml_dict = result +                return (True, self.yaml_dict) + +            return (False, self.yaml_dict) + +        self.yaml_dict = tmp_copy + +        return (True, self.yaml_dict) + +    def create(self, path, value): +        ''' create a yaml file ''' +        if not self.file_exists(): +            # deepcopy didn't work +            # Try to use ruamel.yaml and fallback to pyyaml +            try: +                tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, +                                                          default_flow_style=False), +                                     yaml.RoundTripLoader) +            except AttributeError: +                tmp_copy = copy.deepcopy(self.yaml_dict) + +            # set the format attributes if available +            try: +                tmp_copy.fa.set_block_style() +            except AttributeError: +                pass + +            result = Yedit.add_entry(tmp_copy, path, value, self.separator) +            if result is not None: +                self.yaml_dict = tmp_copy +                return (True, self.yaml_dict) + +        return (False, self.yaml_dict) + +    @staticmethod +    def get_curr_value(invalue, val_type): +        '''return the current value''' +        if invalue is None: +            return None + +        curr_value = invalue +        if val_type == 'yaml': +            curr_value = yaml.load(invalue) +        elif val_type == 'json': +            curr_value = json.loads(invalue) + +        return curr_value + +    @staticmethod +    def parse_value(inc_value, vtype=''): +        '''determine value type passed''' +        true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE', +                      'on', 'On', 'ON', ] +        false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE', +                       'off', 'Off', 'OFF'] + +        # It came in as a string but you didn't specify value_type as string +        # we will convert to bool if it matches any of the above cases +        if isinstance(inc_value, str) and 'bool' in vtype: +            if inc_value not in true_bools and inc_value not in false_bools: +                raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype)) +        elif isinstance(inc_value, bool) and 'str' in vtype: +            inc_value = str(inc_value) + +        # There is a special case where '' will turn into None after yaml loading it so skip +        if isinstance(inc_value, str) and inc_value == '': +            pass +        # If vtype is not str then go ahead and attempt to yaml load it. +        elif isinstance(inc_value, str) and 'str' not in vtype: +            try: +                inc_value = yaml.safe_load(inc_value) +            except Exception: +                raise YeditException('Could not determine type of incoming value. ' + +                                     'value=[{}] vtype=[{}]'.format(type(inc_value), vtype)) + +        return inc_value + +    @staticmethod +    def process_edits(edits, yamlfile): +        '''run through a list of edits and process them one-by-one''' +        results = [] +        for edit in edits: +            value = Yedit.parse_value(edit['value'], edit.get('value_type', '')) +            if edit.get('action') == 'update': +                # pylint: disable=line-too-long +                curr_value = Yedit.get_curr_value( +                    Yedit.parse_value(edit.get('curr_value')), +                    edit.get('curr_value_format')) + +                rval = yamlfile.update(edit['key'], +                                       value, +                                       edit.get('index'), +                                       curr_value) + +            elif edit.get('action') == 'append': +                rval = yamlfile.append(edit['key'], value) + +            else: +                rval = yamlfile.put(edit['key'], value) + +            if rval[0]: +                results.append({'key': edit['key'], 'edit': rval[1]}) + +        return {'changed': len(results) > 0, 'results': results} + +    # pylint: disable=too-many-return-statements,too-many-branches +    @staticmethod +    def run_ansible(params): +        '''perform the idempotent crud operations''' +        yamlfile = Yedit(filename=params['src'], +                         backup=params['backup'], +                         separator=params['separator']) + +        state = params['state'] + +        if params['src']: +            rval = yamlfile.load() + +            if yamlfile.yaml_dict is None and state != 'present': +                return {'failed': True, +                        'msg': 'Error opening file [{}].  Verify that the '.format(params['src']) + +                               'file exists, that it is has correct permissions, and is valid yaml.'} + +        if state == 'list': +            if params['content']: +                content = Yedit.parse_value(params['content'], params['content_type']) +                yamlfile.yaml_dict = content + +            if params['key']: +                rval = yamlfile.get(params['key']) or {} + +            return {'changed': False, 'result': rval, 'state': state} + +        elif state == 'absent': +            if params['content']: +                content = Yedit.parse_value(params['content'], params['content_type']) +                yamlfile.yaml_dict = content + +            if params['update']: +                rval = yamlfile.pop(params['key'], params['value']) +            else: +                rval = yamlfile.delete(params['key']) + +            if rval[0] and params['src']: +                yamlfile.write() + +            return {'changed': rval[0], 'result': rval[1], 'state': state} + +        elif state == 'present': +            # check if content is different than what is in the file +            if params['content']: +                content = Yedit.parse_value(params['content'], params['content_type']) + +                # We had no edits to make and the contents are the same +                if yamlfile.yaml_dict == content and \ +                   params['value'] is None: +                    return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state} + +                yamlfile.yaml_dict = content + +            # If we were passed a key, value then +            # we enapsulate it in a list and process it +            # Key, Value passed to the module : Converted to Edits list # +            edits = [] +            _edit = {} +            if params['value'] is not None: +                _edit['value'] = params['value'] +                _edit['value_type'] = params['value_type'] +                _edit['key'] = params['key'] + +                if params['update']: +                    _edit['action'] = 'update' +                    _edit['curr_value'] = params['curr_value'] +                    _edit['curr_value_format'] = params['curr_value_format'] +                    _edit['index'] = params['index'] + +                elif params['append']: +                    _edit['action'] = 'append' + +                edits.append(_edit) + +            elif params['edits'] is not None: +                edits = params['edits'] + +            if edits: +                results = Yedit.process_edits(edits, yamlfile) + +                # if there were changes and a src provided to us we need to write +                if results['changed'] and params['src']: +                    yamlfile.write() + +                return {'changed': results['changed'], 'result': results['results'], 'state': state} + +            # no edits to make +            if params['src']: +                # pylint: disable=redefined-variable-type +                rval = yamlfile.write() +                return {'changed': rval[0], +                        'result': rval[1], +                        'state': state} + +            # We were passed content but no src, key or value, or edits.  Return contents in memory +            return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state} +        return {'failed': True, 'msg': 'Unkown state passed'} + +# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*- +# pylint: disable=too-many-lines +# noqa: E301,E302,E303,T001 + + +class OpenShiftCLIError(Exception): +    '''Exception class for openshiftcli''' +    pass + + +ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')] + + +def locate_oc_binary(): +    ''' Find and return oc binary file ''' +    # https://github.com/openshift/openshift-ansible/issues/3410 +    # oc can be in /usr/local/bin in some cases, but that may not +    # be in $PATH due to ansible/sudo +    paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS + +    oc_binary = 'oc' + +    # Use shutil.which if it is available, otherwise fallback to a naive path search +    try: +        which_result = shutil.which(oc_binary, path=os.pathsep.join(paths)) +        if which_result is not None: +            oc_binary = which_result +    except AttributeError: +        for path in paths: +            if os.path.exists(os.path.join(path, oc_binary)): +                oc_binary = os.path.join(path, oc_binary) +                break + +    return oc_binary + + +# pylint: disable=too-few-public-methods +class OpenShiftCLI(object): +    ''' Class to wrap the command line tools ''' +    def __init__(self, +                 namespace, +                 kubeconfig='/etc/origin/master/admin.kubeconfig', +                 verbose=False, +                 all_namespaces=False): +        ''' Constructor for OpenshiftCLI ''' +        self.namespace = namespace +        self.verbose = verbose +        self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig) +        self.all_namespaces = all_namespaces +        self.oc_binary = locate_oc_binary() + +    # Pylint allows only 5 arguments to be passed. +    # pylint: disable=too-many-arguments +    def _replace_content(self, resource, rname, content, force=False, sep='.'): +        ''' replace the current object with the content ''' +        res = self._get(resource, rname) +        if not res['results']: +            return res + +        fname = Utils.create_tmpfile(rname + '-') + +        yed = Yedit(fname, res['results'][0], separator=sep) +        changes = [] +        for key, value in content.items(): +            changes.append(yed.put(key, value)) + +        if any([change[0] for change in changes]): +            yed.write() + +            atexit.register(Utils.cleanup, [fname]) + +            return self._replace(fname, force) + +        return {'returncode': 0, 'updated': False} + +    def _replace(self, fname, force=False): +        '''replace the current object with oc replace''' +        # We are removing the 'resourceVersion' to handle +        # a race condition when modifying oc objects +        yed = Yedit(fname) +        results = yed.delete('metadata.resourceVersion') +        if results[0]: +            yed.write() + +        cmd = ['replace', '-f', fname] +        if force: +            cmd.append('--force') +        return self.openshift_cmd(cmd) + +    def _create_from_content(self, rname, content): +        '''create a temporary file and then call oc create on it''' +        fname = Utils.create_tmpfile(rname + '-') +        yed = Yedit(fname, content=content) +        yed.write() + +        atexit.register(Utils.cleanup, [fname]) + +        return self._create(fname) + +    def _create(self, fname): +        '''call oc create on a filename''' +        return self.openshift_cmd(['create', '-f', fname]) + +    def _delete(self, resource, name=None, selector=None): +        '''call oc delete on a resource''' +        cmd = ['delete', resource] +        if selector is not None: +            cmd.append('--selector={}'.format(selector)) +        elif name is not None: +            cmd.append(name) +        else: +            raise OpenShiftCLIError('Either name or selector is required when calling delete.') + +        return self.openshift_cmd(cmd) + +    def _process(self, template_name, create=False, params=None, template_data=None):  # noqa: E501 +        '''process a template + +           template_name: the name of the template to process +           create: whether to send to oc create after processing +           params: the parameters for the template +           template_data: the incoming template's data; instead of a file +        ''' +        cmd = ['process'] +        if template_data: +            cmd.extend(['-f', '-']) +        else: +            cmd.append(template_name) +        if params: +            param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()] +            cmd.append('-v') +            cmd.extend(param_str) + +        results = self.openshift_cmd(cmd, output=True, input_data=template_data) + +        if results['returncode'] != 0 or not create: +            return results + +        fname = Utils.create_tmpfile(template_name + '-') +        yed = Yedit(fname, results['results']) +        yed.write() + +        atexit.register(Utils.cleanup, [fname]) + +        return self.openshift_cmd(['create', '-f', fname]) + +    def _get(self, resource, name=None, selector=None): +        '''return a resource by name ''' +        cmd = ['get', resource] +        if selector is not None: +            cmd.append('--selector={}'.format(selector)) +        elif name is not None: +            cmd.append(name) + +        cmd.extend(['-o', 'json']) + +        rval = self.openshift_cmd(cmd, output=True) + +        # Ensure results are retuned in an array +        if 'items' in rval: +            rval['results'] = rval['items'] +        elif not isinstance(rval['results'], list): +            rval['results'] = [rval['results']] + +        return rval + +    def _schedulable(self, node=None, selector=None, schedulable=True): +        ''' perform oadm manage-node scheduable ''' +        cmd = ['manage-node'] +        if node: +            cmd.extend(node) +        else: +            cmd.append('--selector={}'.format(selector)) + +        cmd.append('--schedulable={}'.format(schedulable)) + +        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')  # noqa: E501 + +    def _list_pods(self, node=None, selector=None, pod_selector=None): +        ''' perform oadm list pods + +            node: the node in which to list pods +            selector: the label selector filter if provided +            pod_selector: the pod selector filter if provided +        ''' +        cmd = ['manage-node'] +        if node: +            cmd.extend(node) +        else: +            cmd.append('--selector={}'.format(selector)) + +        if pod_selector: +            cmd.append('--pod-selector={}'.format(pod_selector)) + +        cmd.extend(['--list-pods', '-o', 'json']) + +        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') + +    # pylint: disable=too-many-arguments +    def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False): +        ''' perform oadm manage-node evacuate ''' +        cmd = ['manage-node'] +        if node: +            cmd.extend(node) +        else: +            cmd.append('--selector={}'.format(selector)) + +        if dry_run: +            cmd.append('--dry-run') + +        if pod_selector: +            cmd.append('--pod-selector={}'.format(pod_selector)) + +        if grace_period: +            cmd.append('--grace-period={}'.format(int(grace_period))) + +        if force: +            cmd.append('--force') + +        cmd.append('--evacuate') + +        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') + +    def _version(self): +        ''' return the openshift version''' +        return self.openshift_cmd(['version'], output=True, output_type='raw') + +    def _import_image(self, url=None, name=None, tag=None): +        ''' perform image import ''' +        cmd = ['import-image'] + +        image = '{0}'.format(name) +        if tag: +            image += ':{0}'.format(tag) + +        cmd.append(image) + +        if url: +            cmd.append('--from={0}/{1}'.format(url, image)) + +        cmd.append('-n{0}'.format(self.namespace)) + +        cmd.append('--confirm') +        return self.openshift_cmd(cmd) + +    def _run(self, cmds, input_data): +        ''' Actually executes the command. This makes mocking easier. ''' +        curr_env = os.environ.copy() +        curr_env.update({'KUBECONFIG': self.kubeconfig}) +        proc = subprocess.Popen(cmds, +                                stdin=subprocess.PIPE, +                                stdout=subprocess.PIPE, +                                stderr=subprocess.PIPE, +                                env=curr_env) + +        stdout, stderr = proc.communicate(input_data) + +        return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') + +    # pylint: disable=too-many-arguments,too-many-branches +    def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): +        '''Base command for oc ''' +        cmds = [self.oc_binary] + +        if oadm: +            cmds.append('adm') + +        cmds.extend(cmd) + +        if self.all_namespaces: +            cmds.extend(['--all-namespaces']) +        elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501 +            cmds.extend(['-n', self.namespace]) + +        if self.verbose: +            print(' '.join(cmds)) + +        try: +            returncode, stdout, stderr = self._run(cmds, input_data) +        except OSError as ex: +            returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) + +        rval = {"returncode": returncode, +                "cmd": ' '.join(cmds)} + +        if output_type == 'json': +            rval['results'] = {} +            if output and stdout: +                try: +                    rval['results'] = json.loads(stdout) +                except ValueError as verr: +                    if "No JSON object could be decoded" in verr.args: +                        rval['err'] = verr.args +        elif output_type == 'raw': +            rval['results'] = stdout if output else '' + +        if self.verbose: +            print("STDOUT: {0}".format(stdout)) +            print("STDERR: {0}".format(stderr)) + +        if 'err' in rval or returncode != 0: +            rval.update({"stderr": stderr, +                         "stdout": stdout}) + +        return rval + + +class Utils(object):  # pragma: no cover +    ''' utilities for openshiftcli modules ''' + +    @staticmethod +    def _write(filename, contents): +        ''' Actually write the file contents to disk. This helps with mocking. ''' + +        with open(filename, 'w') as sfd: +            sfd.write(contents) + +    @staticmethod +    def create_tmp_file_from_contents(rname, data, ftype='yaml'): +        ''' create a file in tmp with name and contents''' + +        tmp = Utils.create_tmpfile(prefix=rname) + +        if ftype == 'yaml': +            # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage +            # pylint: disable=no-member +            if hasattr(yaml, 'RoundTripDumper'): +                Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper)) +            else: +                Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False)) + +        elif ftype == 'json': +            Utils._write(tmp, json.dumps(data)) +        else: +            Utils._write(tmp, data) + +        # Register cleanup when module is done +        atexit.register(Utils.cleanup, [tmp]) +        return tmp + +    @staticmethod +    def create_tmpfile_copy(inc_file): +        '''create a temporary copy of a file''' +        tmpfile = Utils.create_tmpfile('lib_openshift-') +        Utils._write(tmpfile, open(inc_file).read()) + +        # Cleanup the tmpfile +        atexit.register(Utils.cleanup, [tmpfile]) + +        return tmpfile + +    @staticmethod +    def create_tmpfile(prefix='tmp'): +        ''' Generates and returns a temporary file name ''' + +        with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp: +            return tmp.name + +    @staticmethod +    def create_tmp_files_from_contents(content, content_type=None): +        '''Turn an array of dict: filename, content into a files array''' +        if not isinstance(content, list): +            content = [content] +        files = [] +        for item in content: +            path = Utils.create_tmp_file_from_contents(item['path'] + '-', +                                                       item['data'], +                                                       ftype=content_type) +            files.append({'name': os.path.basename(item['path']), +                          'path': path}) +        return files + +    @staticmethod +    def cleanup(files): +        '''Clean up on exit ''' +        for sfile in files: +            if os.path.exists(sfile): +                if os.path.isdir(sfile): +                    shutil.rmtree(sfile) +                elif os.path.isfile(sfile): +                    os.remove(sfile) + +    @staticmethod +    def exists(results, _name): +        ''' Check to see if the results include the name ''' +        if not results: +            return False + +        if Utils.find_result(results, _name): +            return True + +        return False + +    @staticmethod +    def find_result(results, _name): +        ''' Find the specified result by name''' +        rval = None +        for result in results: +            if 'metadata' in result and result['metadata']['name'] == _name: +                rval = result +                break + +        return rval + +    @staticmethod +    def get_resource_file(sfile, sfile_type='yaml'): +        ''' return the service file ''' +        contents = None +        with open(sfile) as sfd: +            contents = sfd.read() + +        if sfile_type == 'yaml': +            # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage +            # pylint: disable=no-member +            if hasattr(yaml, 'RoundTripLoader'): +                contents = yaml.load(contents, yaml.RoundTripLoader) +            else: +                contents = yaml.safe_load(contents) +        elif sfile_type == 'json': +            contents = json.loads(contents) + +        return contents + +    @staticmethod +    def filter_versions(stdout): +        ''' filter the oc version output ''' + +        version_dict = {} +        version_search = ['oc', 'openshift', 'kubernetes'] + +        for line in stdout.strip().split('\n'): +            for term in version_search: +                if not line: +                    continue +                if line.startswith(term): +                    version_dict[term] = line.split()[-1] + +        # horrible hack to get openshift version in Openshift 3.2 +        #  By default "oc version in 3.2 does not return an "openshift" version +        if "openshift" not in version_dict: +            version_dict["openshift"] = version_dict["oc"] + +        return version_dict + +    @staticmethod +    def add_custom_versions(versions): +        ''' create custom versions strings ''' + +        versions_dict = {} + +        for tech, version in versions.items(): +            # clean up "-" from version +            if "-" in version: +                version = version.split("-")[0] + +            if version.startswith('v'): +                versions_dict[tech + '_numeric'] = version[1:].split('+')[0] +                # "v3.3.0.33" is what we have, we want "3.3" +                versions_dict[tech + '_short'] = version[1:4] + +        return versions_dict + +    @staticmethod +    def openshift_installed(): +        ''' check if openshift is installed ''' +        import rpm + +        transaction_set = rpm.TransactionSet() +        rpmquery = transaction_set.dbMatch("name", "atomic-openshift") + +        return rpmquery.count() > 0 + +    # Disabling too-many-branches.  This is a yaml dictionary comparison function +    # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements +    @staticmethod +    def check_def_equal(user_def, result_def, skip_keys=None, debug=False): +        ''' Given a user defined definition, compare it with the results given back by our query.  ''' + +        # Currently these values are autogenerated and we do not need to check them +        skip = ['metadata', 'status'] +        if skip_keys: +            skip.extend(skip_keys) + +        for key, value in result_def.items(): +            if key in skip: +                continue + +            # Both are lists +            if isinstance(value, list): +                if key not in user_def: +                    if debug: +                        print('User data does not have key [%s]' % key) +                        print('User data: %s' % user_def) +                    return False + +                if not isinstance(user_def[key], list): +                    if debug: +                        print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])) +                    return False + +                if len(user_def[key]) != len(value): +                    if debug: +                        print("List lengths are not equal.") +                        print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))) +                        print("user_def: %s" % user_def[key]) +                        print("value: %s" % value) +                    return False + +                for values in zip(user_def[key], value): +                    if isinstance(values[0], dict) and isinstance(values[1], dict): +                        if debug: +                            print('sending list - list') +                            print(type(values[0])) +                            print(type(values[1])) +                        result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug) +                        if not result: +                            print('list compare returned false') +                            return False + +                    elif value != user_def[key]: +                        if debug: +                            print('value should be identical') +                            print(user_def[key]) +                            print(value) +                        return False + +            # recurse on a dictionary +            elif isinstance(value, dict): +                if key not in user_def: +                    if debug: +                        print("user_def does not have key [%s]" % key) +                    return False +                if not isinstance(user_def[key], dict): +                    if debug: +                        print("dict returned false: not instance of dict") +                    return False + +                # before passing ensure keys match +                api_values = set(value.keys()) - set(skip) +                user_values = set(user_def[key].keys()) - set(skip) +                if api_values != user_values: +                    if debug: +                        print("keys are not equal in dict") +                        print(user_values) +                        print(api_values) +                    return False + +                result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug) +                if not result: +                    if debug: +                        print("dict returned false") +                        print(result) +                    return False + +            # Verify each key, value pair is the same +            else: +                if key not in user_def or value != user_def[key]: +                    if debug: +                        print("value not equal; user_def does not have key") +                        print(key) +                        print(value) +                        if key in user_def: +                            print(user_def[key]) +                    return False + +        if debug: +            print('returning true') +        return True + +class OpenShiftCLIConfig(object): +    '''Generic Config''' +    def __init__(self, rname, namespace, kubeconfig, options): +        self.kubeconfig = kubeconfig +        self.name = rname +        self.namespace = namespace +        self._options = options + +    @property +    def config_options(self): +        ''' return config options ''' +        return self._options + +    def to_option_list(self, ascommalist=''): +        '''return all options as a string +           if ascommalist is set to the name of a key, and +           the value of that key is a dict, format the dict +           as a list of comma delimited key=value pairs''' +        return self.stringify(ascommalist) + +    def stringify(self, ascommalist=''): +        ''' return the options hash as cli params in a string +            if ascommalist is set to the name of a key, and +            the value of that key is a dict, format the dict +            as a list of comma delimited key=value pairs ''' +        rval = [] +        for key in sorted(self.config_options.keys()): +            data = self.config_options[key] +            if data['include'] \ +               and (data['value'] or isinstance(data['value'], int)): +                if key == ascommalist: +                    val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) +                else: +                    val = data['value'] +                rval.append('--{}={}'.format(key.replace('_', '-'), val)) + +        return rval + + +# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: class/oc_adm_csr.py -*- -*- -*- + + +class OCcsr(OpenShiftCLI): +    ''' Class to wrap the oc adm certificate command line''' +    kind = 'csr' + +    # pylint: disable=too-many-arguments +    def __init__(self, +                 nodes=None, +                 approve_all=False, +                 service_account=None, +                 kubeconfig='/etc/origin/master/admin.kubeconfig', +                 verbose=False): +        ''' Constructor for oc adm certificate ''' +        super(OCcsr, self).__init__(None, kubeconfig, verbose) +        self.service_account = service_account +        self.nodes = self.create_nodes(nodes) +        self._csrs = [] +        self.approve_all = approve_all +        self.verbose = verbose + +    @property +    def csrs(self): +        '''property for managing csrs''' +        # any processing needed?? +        self._csrs = self._get(resource=self.kind)['results'][0]['items'] +        return self._csrs + +    def create_nodes(self, nodes): +        '''create a node object to track csr signing status''' +        nodes_list = [] + +        if nodes is None: +            return nodes_list + +        results = self._get(resource='nodes')['results'][0]['items'] + +        for node in nodes: +            nodes_list.append(dict(name=node, csrs={}, accepted=False, denied=False)) + +            for ocnode in results: +                if node in ocnode['metadata']['name']: +                    nodes_list[-1]['accepted'] = True + +        return nodes_list + +    def get(self): +        '''get the current certificate signing requests''' +        return self.csrs + +    @staticmethod +    def action_needed(csr, action): +        '''check to see if csr is in desired state''' +        if csr['status'] == {}: +            return True + +        state = csr['status']['conditions'][0]['type'] + +        if action == 'approve' and state != 'Approved': +            return True + +        elif action == 'deny' and state != 'Denied': +            return True + +        return False + +    def match_node(self, csr): +        '''match an inc csr to a node in self.nodes''' +        for node in self.nodes: +            # we have a match +            if node['name'] in csr['metadata']['name']: +                node['csrs'][csr['metadata']['name']] = csr + +                # check that the username is the node and type is 'Approved' +                if node['name'] in csr['spec']['username'] and csr['status']: +                    if csr['status']['conditions'][0]['type'] == 'Approved': +                        node['accepted'] = True +                # check type is 'Denied' and mark node as such +                if csr['status'] and csr['status']['conditions'][0]['type'] == 'Denied': +                    node['denied'] = True + +                return node + +        return None + +    def finished(self): +        '''determine if there are more csrs to sign''' +        # if nodes is set and we have nodes then return if all nodes are 'accepted' +        if self.nodes is not None and len(self.nodes) > 0: +            return all([node['accepted'] or node['denied'] for node in self.nodes]) + +        # we are approving everything or we still have nodes outstanding +        return False + +    def manage(self, action): +        '''run openshift oc adm ca create-server-cert cmd and store results into self.nodes + +           we attempt to verify if the node is one that was given to us to accept. + +           action - (allow | deny) +        ''' + +        results = [] +        # There are 2 types of requests: +        # - node-bootstrapper-client-ip-172-31-51-246-ec2-internal +        #   The client request allows the client to talk to the api/controller +        # - node-bootstrapper-server-ip-172-31-51-246-ec2-internal +        #   The server request allows the server to join the cluster +        # Here we need to determine how to approve/deny +        # we should query the csrs and verify they are from the nodes we thought +        for csr in self.csrs: +            node = self.match_node(csr) +            # oc adm certificate <approve|deny> csr +            # there are 3 known states: Denied, Aprroved, {} +            # verify something is needed by OCcsr.action_needed +            # if approve_all, then do it +            # if you passed in nodes, you must have a node that matches +            if self.approve_all or (node and OCcsr.action_needed(csr, action)): +                result = self.openshift_cmd(['certificate', action, csr['metadata']['name']], oadm=True) +                # client should have service account name in username field +                # server should have node name in username field +                if node and csr['metadata']['name'] not in node['csrs']: +                    node['csrs'][csr['metadata']['name']] = csr + +                    # accept node in cluster +                    if node['name'] in csr['spec']['username']: +                        node['accepted'] = True + +                results.append(result) + +        return results + +    @staticmethod +    def run_ansible(params, check_mode=False): +        '''run the idempotent ansible code''' + +        client = OCcsr(params['nodes'], +                       params['approve_all'], +                       params['service_account'], +                       params['kubeconfig'], +                       params['debug']) + +        state = params['state'] + +        api_rval = client.get() + +        if state == 'list': +            return {'changed': False, 'results': api_rval, 'state': state} + +        if state in ['approve', 'deny']: +            if check_mode: +                return {'changed': True, +                        'msg': "CHECK_MODE: Would have {} the certificate.".format(params['state']), +                        'state': state} + +            all_results = [] +            finished = False +            timeout = False +            import time +            # loop for timeout or block until all nodes pass +            ctr = 0 +            while True: + +                all_results.extend(client.manage(params['state'])) +                if client.finished(): +                    finished = True +                    break + +                if params['timeout'] == 0: +                    if not params['approve_all']: +                        ctr = 0 + +                if ctr * 2 > params['timeout']: +                    timeout = True +                    break + +                # This provides time for the nodes to send their csr requests between approvals +                time.sleep(2) + +                ctr += 1 + +            for result in all_results: +                if result['returncode'] != 0: +                    return {'failed': True, 'msg': all_results} + +            return dict(changed=len(all_results) > 0, +                        results=all_results, +                        nodes=client.nodes, +                        state=state, +                        finished=finished, +                        timeout=timeout) + +        return {'failed': True, +                'msg': 'Unknown state passed. %s' % state} + + +# -*- -*- -*- End included fragment: class/oc_adm_csr.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: ansible/oc_adm_csr.py -*- -*- -*- + +def main(): +    ''' +    ansible oc module for approving certificate signing requests +    ''' + +    module = AnsibleModule( +        argument_spec=dict( +            kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), +            state=dict(default='approve', type='str', +                       choices=['approve', 'deny', 'list']), +            debug=dict(default=False, type='bool'), +            nodes=dict(default=None, type='list'), +            timeout=dict(default=30, type='int'), +            approve_all=dict(default=False, type='bool'), +            service_account=dict(default='node-bootstrapper', type='str'), +        ), +        supports_check_mode=True, +        mutually_exclusive=[['approve_all', 'nodes']], +    ) + +    if module.params['nodes'] == []: +        module.fail_json(**dict(failed=True, msg='Please specify hosts.')) + +    rval = OCcsr.run_ansible(module.params, module.check_mode) + +    if 'failed' in rval: +        return module.fail_json(**rval) + +    return module.exit_json(**rval) + + +if __name__ == '__main__': +    main() + +# -*- -*- -*- End included fragment: ansible/oc_adm_csr.py -*- -*- -*- diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py index d6db75e1e..8f8e46e1e 100644 --- a/roles/lib_openshift/library/oc_route.py +++ b/roles/lib_openshift/library/oc_route.py @@ -1665,9 +1665,6 @@ class OCRoute(OpenShiftCLI):      @staticmethod      def get_cert_data(path, content):          '''get the data for a particular value''' -        if not path and not content: -            return None -          rval = None          if path and os.path.exists(path) and os.access(path, os.R_OK):              rval = open(path).read() @@ -1706,14 +1703,14 @@ class OCRoute(OpenShiftCLI):          if params['tls_termination'] and params['tls_termination'].lower() != 'passthrough':  # E501              for key, option in files.items(): -                if key == 'destcacert' and params['tls_termination'] != 'reencrypt': +                if not option['path'] and not option['content']:                      continue                  option['value'] = OCRoute.get_cert_data(option['path'], option['content'])  # E501                  if not option['value']:                      return {'failed': True, -                            'msg': 'Verify that you pass a value for %s' % key} +                            'msg': 'Verify that you pass a correct value for %s' % key}          rconfig = RouteConfig(params['name'],                                params['namespace'], diff --git a/roles/lib_openshift/src/ansible/oc_adm_csr.py b/roles/lib_openshift/src/ansible/oc_adm_csr.py new file mode 100644 index 000000000..9e43a810b --- /dev/null +++ b/roles/lib_openshift/src/ansible/oc_adm_csr.py @@ -0,0 +1,36 @@ +# pylint: skip-file +# flake8: noqa + +def main(): +    ''' +    ansible oc module for approving certificate signing requests +    ''' + +    module = AnsibleModule( +        argument_spec=dict( +            kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), +            state=dict(default='approve', type='str', +                       choices=['approve', 'deny', 'list']), +            debug=dict(default=False, type='bool'), +            nodes=dict(default=None, type='list'), +            timeout=dict(default=30, type='int'), +            approve_all=dict(default=False, type='bool'), +            service_account=dict(default='node-bootstrapper', type='str'), +        ), +        supports_check_mode=True, +        mutually_exclusive=[['approve_all', 'nodes']], +    ) + +    if module.params['nodes'] == []: +        module.fail_json(**dict(failed=True, msg='Please specify hosts.')) + +    rval = OCcsr.run_ansible(module.params, module.check_mode) + +    if 'failed' in rval: +        return module.fail_json(**rval) + +    return module.exit_json(**rval) + + +if __name__ == '__main__': +    main() diff --git a/roles/lib_openshift/src/class/oc_adm_csr.py b/roles/lib_openshift/src/class/oc_adm_csr.py new file mode 100644 index 000000000..ea11c6ca9 --- /dev/null +++ b/roles/lib_openshift/src/class/oc_adm_csr.py @@ -0,0 +1,197 @@ +# pylint: skip-file +# flake8: noqa + + +class OCcsr(OpenShiftCLI): +    ''' Class to wrap the oc adm certificate command line''' +    kind = 'csr' + +    # pylint: disable=too-many-arguments +    def __init__(self, +                 nodes=None, +                 approve_all=False, +                 service_account=None, +                 kubeconfig='/etc/origin/master/admin.kubeconfig', +                 verbose=False): +        ''' Constructor for oc adm certificate ''' +        super(OCcsr, self).__init__(None, kubeconfig, verbose) +        self.service_account = service_account +        self.nodes = self.create_nodes(nodes) +        self._csrs = [] +        self.approve_all = approve_all +        self.verbose = verbose + +    @property +    def csrs(self): +        '''property for managing csrs''' +        # any processing needed?? +        self._csrs = self._get(resource=self.kind)['results'][0]['items'] +        return self._csrs + +    def create_nodes(self, nodes): +        '''create a node object to track csr signing status''' +        nodes_list = [] + +        if nodes is None: +            return nodes_list + +        results = self._get(resource='nodes')['results'][0]['items'] + +        for node in nodes: +            nodes_list.append(dict(name=node, csrs={}, accepted=False, denied=False)) + +            for ocnode in results: +                if node in ocnode['metadata']['name']: +                    nodes_list[-1]['accepted'] = True + +        return nodes_list + +    def get(self): +        '''get the current certificate signing requests''' +        return self.csrs + +    @staticmethod +    def action_needed(csr, action): +        '''check to see if csr is in desired state''' +        if csr['status'] == {}: +            return True + +        state = csr['status']['conditions'][0]['type'] + +        if action == 'approve' and state != 'Approved': +            return True + +        elif action == 'deny' and state != 'Denied': +            return True + +        return False + +    def match_node(self, csr): +        '''match an inc csr to a node in self.nodes''' +        for node in self.nodes: +            # we have a match +            if node['name'] in csr['metadata']['name']: +                node['csrs'][csr['metadata']['name']] = csr + +                # check that the username is the node and type is 'Approved' +                if node['name'] in csr['spec']['username'] and csr['status']: +                    if csr['status']['conditions'][0]['type'] == 'Approved': +                        node['accepted'] = True +                # check type is 'Denied' and mark node as such +                if csr['status'] and csr['status']['conditions'][0]['type'] == 'Denied': +                    node['denied'] = True + +                return node + +        return None + +    def finished(self): +        '''determine if there are more csrs to sign''' +        # if nodes is set and we have nodes then return if all nodes are 'accepted' +        if self.nodes is not None and len(self.nodes) > 0: +            return all([node['accepted'] or node['denied'] for node in self.nodes]) + +        # we are approving everything or we still have nodes outstanding +        return False + +    def manage(self, action): +        '''run openshift oc adm ca create-server-cert cmd and store results into self.nodes + +           we attempt to verify if the node is one that was given to us to accept. + +           action - (allow | deny) +        ''' + +        results = [] +        # There are 2 types of requests: +        # - node-bootstrapper-client-ip-172-31-51-246-ec2-internal +        #   The client request allows the client to talk to the api/controller +        # - node-bootstrapper-server-ip-172-31-51-246-ec2-internal +        #   The server request allows the server to join the cluster +        # Here we need to determine how to approve/deny +        # we should query the csrs and verify they are from the nodes we thought +        for csr in self.csrs: +            node = self.match_node(csr) +            # oc adm certificate <approve|deny> csr +            # there are 3 known states: Denied, Aprroved, {} +            # verify something is needed by OCcsr.action_needed +            # if approve_all, then do it +            # if you passed in nodes, you must have a node that matches +            if self.approve_all or (node and OCcsr.action_needed(csr, action)): +                result = self.openshift_cmd(['certificate', action, csr['metadata']['name']], oadm=True) +                # client should have service account name in username field +                # server should have node name in username field +                if node and csr['metadata']['name'] not in node['csrs']: +                    node['csrs'][csr['metadata']['name']] = csr + +                    # accept node in cluster +                    if node['name'] in csr['spec']['username']: +                        node['accepted'] = True + +                results.append(result) + +        return results + +    @staticmethod +    def run_ansible(params, check_mode=False): +        '''run the idempotent ansible code''' + +        client = OCcsr(params['nodes'], +                       params['approve_all'], +                       params['service_account'], +                       params['kubeconfig'], +                       params['debug']) + +        state = params['state'] + +        api_rval = client.get() + +        if state == 'list': +            return {'changed': False, 'results': api_rval, 'state': state} + +        if state in ['approve', 'deny']: +            if check_mode: +                return {'changed': True, +                        'msg': "CHECK_MODE: Would have {} the certificate.".format(params['state']), +                        'state': state} + +            all_results = [] +            finished = False +            timeout = False +            import time +            # loop for timeout or block until all nodes pass +            ctr = 0 +            while True: + +                all_results.extend(client.manage(params['state'])) +                if client.finished(): +                    finished = True +                    break + +                if params['timeout'] == 0: +                    if not params['approve_all']: +                        ctr = 0 + +                if ctr * 2 > params['timeout']: +                    timeout = True +                    break + +                # This provides time for the nodes to send their csr requests between approvals +                time.sleep(2) + +                ctr += 1 + +            for result in all_results: +                if result['returncode'] != 0: +                    return {'failed': True, 'msg': all_results} + +            return dict(changed=len(all_results) > 0, +                        results=all_results, +                        nodes=client.nodes, +                        state=state, +                        finished=finished, +                        timeout=timeout) + +        return {'failed': True, +                'msg': 'Unknown state passed. %s' % state} + diff --git a/roles/lib_openshift/src/class/oc_route.py b/roles/lib_openshift/src/class/oc_route.py index 3935525f1..3a1bd732f 100644 --- a/roles/lib_openshift/src/class/oc_route.py +++ b/roles/lib_openshift/src/class/oc_route.py @@ -68,9 +68,6 @@ class OCRoute(OpenShiftCLI):      @staticmethod      def get_cert_data(path, content):          '''get the data for a particular value''' -        if not path and not content: -            return None -          rval = None          if path and os.path.exists(path) and os.access(path, os.R_OK):              rval = open(path).read() @@ -109,14 +106,14 @@ class OCRoute(OpenShiftCLI):          if params['tls_termination'] and params['tls_termination'].lower() != 'passthrough':  # E501              for key, option in files.items(): -                if key == 'destcacert' and params['tls_termination'] != 'reencrypt': +                if not option['path'] and not option['content']:                      continue                  option['value'] = OCRoute.get_cert_data(option['path'], option['content'])  # E501                  if not option['value']:                      return {'failed': True, -                            'msg': 'Verify that you pass a value for %s' % key} +                            'msg': 'Verify that you pass a correct value for %s' % key}          rconfig = RouteConfig(params['name'],                                params['namespace'], diff --git a/roles/lib_openshift/src/doc/csr b/roles/lib_openshift/src/doc/csr new file mode 100644 index 000000000..db72dbda3 --- /dev/null +++ b/roles/lib_openshift/src/doc/csr @@ -0,0 +1,80 @@ +# flake8: noqa +# pylint: skip-file + +DOCUMENTATION = ''' +--- +module: oc_adm_csr +short_description: Module to approve or deny openshift certificate signing requests +description: +  - Wrapper around the openshift `oc adm certificate approve|deny <csr>` command. +options: +  state: +    description: +    - approve|deny|list Approve, deny, and list are the only supported states for certificates +    required: false +    default: present +    choices: +    - present +    aliases: [] +  kubeconfig: +    description: +    - The path for the kubeconfig file to use for authentication +    required: false +    default: /etc/origin/master/admin.kubeconfig +    aliases: [] +  debug: +    description: +    - Turn on debug output. +    required: false +    default: False +    aliases: [] +  nodes: +    description: +    - A list of the names of the nodes in which to accept the certificates +    required: false +    default: None +    aliases: [] +  timeout: +    description: +    - This flag allows for a timeout value when approving nodes. +    required: false +    default: 30 +    aliases: [] +  timeout: +    description: +    - This flag allows for a timeout value when doing node approvals. +    - A zero value for the timeout will block until the nodes have been accepted +    required: false +    default: 30 +    aliases: [] +  approve_all: +    description: +    - This flag allows for the module to approve all CSRs that are found. +    - This facilitates testing. +    required: false +    default: False +    aliases: [] +  service_account: +    description: +    - This parameter tells the approval process which service account is being used for the requests +    required: false +    default: node-bootstrapper +    aliases: [] +author: +- "Kenny Woodson <kwoodson@redhat.com>" +extends_documentation_fragment: [] +''' + +EXAMPLES = ''' +- name: Approve certificates for node xyz +  oc_adm_scr: +    nodes: +    - xyz +    timeout: 300 + +- name: Approve certificates for node xyz +  oc_adm_scr: +    nodes: +    - xyz +    timeout: 0 +''' diff --git a/roles/lib_openshift/src/sources.yml b/roles/lib_openshift/src/sources.yml index e9b6bf261..4636f6b9b 100644 --- a/roles/lib_openshift/src/sources.yml +++ b/roles/lib_openshift/src/sources.yml @@ -9,6 +9,16 @@ oc_adm_ca_server_cert.py:  - class/oc_adm_ca_server_cert.py  - ansible/oc_adm_ca_server_cert.py +oc_adm_csr.py: +- doc/generated +- doc/license +- lib/import.py +- doc/csr +- ../../lib_utils/src/class/yedit.py +- lib/base.py +- class/oc_adm_csr.py +- ansible/oc_adm_csr.py +  oc_adm_manage_node.py:  - doc/generated  - doc/license diff --git a/roles/lib_openshift/src/test/integration/oc_adm_csr.yml b/roles/lib_openshift/src/test/integration/oc_adm_csr.yml new file mode 100755 index 000000000..cad8e36f5 --- /dev/null +++ b/roles/lib_openshift/src/test/integration/oc_adm_csr.yml @@ -0,0 +1,28 @@ +#!/usr/bin/ansible-playbook --module-path=../../../library/ +# ./oc_adm_csr.yml -M ../../../library -e "cli_master_test=$OPENSHIFT_MASTER +--- +- hosts: masters +  gather_facts: no +  user: root +  tasks: +  - name: list csrs +    oc_adm_csr: +      state: list +    register: csrout + +  - debug: var=csrout + +  - name: list csrs +    oc_adm_csr: +      state: approve +      nodes: +      - ip-172-31-51-0-ec2-internal +      - ip-172-31-51-246-ec2-internal +      - ip-172-31-54-12-ec2-internal +      - ip-172-31-58-173-ec2-internal +      - ip-172-31-58-212-ec2-internal +      - ip-172-31-51-246-ec2-internal +      - ip-172-31-54-12-ec2-internal + +    register: csrout +  - debug: var=csrout diff --git a/roles/lib_utils/library/iam_cert23.py b/roles/lib_utils/library/iam_cert23.py new file mode 100644 index 000000000..07b3d3bdf --- /dev/null +++ b/roles/lib_utils/library/iam_cert23.py @@ -0,0 +1,314 @@ +#!/usr/bin/python +# pylint: skip-file +# flake8: noqa +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible.  If not, see <http://www.gnu.org/licenses/>. +ANSIBLE_METADATA = {'metadata_version': '1.1', +                    'status': ['preview'], +                    'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: iam_cert +short_description: Manage server certificates for use on ELBs and CloudFront +description: +     - Allows for the management of server certificates +version_added: "2.0" +options: +  name: +    description: +      - Name of certificate to add, update or remove. +    required: true +  new_name: +    description: +      - When state is present, this will update the name of the cert. +      - The cert, key and cert_chain parameters will be ignored if this is defined. +  new_path: +    description: +      - When state is present, this will update the path of the cert. +      - The cert, key and cert_chain parameters will be ignored if this is defined. +  state: +    description: +      - Whether to create(or update) or delete certificate. +      - If new_path or new_name is defined, specifying present will attempt to make an update these. +    required: true +    choices: [ "present", "absent" ] +  path: +    description: +      - When creating or updating, specify the desired path of the certificate. +    default: "/" +  cert_chain: +    description: +      - The path to, or content of the CA certificate chain in PEM encoded format. +        As of 2.4 content is accepted. If the parameter is not a file, it is assumed to be content. +  cert: +    description: +      - The path to, or content of the certificate body in PEM encoded format. +        As of 2.4 content is accepted. If the parameter is not a file, it is assumed to be content. +  key: +    description: +      - The path to, or content of the private key in PEM encoded format. +        As of 2.4 content is accepted. If the parameter is not a file, it is assumed to be content. +  dup_ok: +    description: +      - By default the module will not upload a certificate that is already uploaded into AWS. +        If set to True, it will upload the certificate as long as the name is unique. +    default: False + + +requirements: [ "boto" ] +author: Jonathan I. Davila +extends_documentation_fragment: +    - aws +    - ec2 +''' + +EXAMPLES = ''' +# Basic server certificate upload from local file +- iam_cert: +    name: very_ssl +    state: present +    cert: "{{ lookup('file', 'path/to/cert') }}" +    key: "{{ lookup('file', 'path/to/key') }}" +    cert_chain: "{{ lookup('file', 'path/to/certchain') }}" + +# Basic server certificate upload +- iam_cert: +    name: very_ssl +    state: present +    cert: path/to/cert +    key: path/to/key +    cert_chain: path/to/certchain + +# Server certificate upload using key string +- iam_cert: +    name: very_ssl +    state: present +    path: "/a/cert/path/" +    cert: body_of_somecert +    key: vault_body_of_privcertkey +    cert_chain: body_of_myverytrustedchain + +# Basic rename of existing certificate +- iam_cert: +    name: very_ssl +    new_name: new_very_ssl +    state: present + +''' +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, connect_to_aws +import os + +try: +    import boto +    import boto.iam +    import boto.ec2 +    HAS_BOTO = True +except ImportError: +    HAS_BOTO = False + + +def boto_exception(err): +    '''generic error message handler''' +    if hasattr(err, 'error_message'): +        error = err.error_message +    elif hasattr(err, 'message'): +        error = err.message +    else: +        error = '%s: %s' % (Exception, err) + +    return error + + +def cert_meta(iam, name): +    certificate = iam.get_server_certificate(name).get_server_certificate_result.server_certificate +    ocert = certificate.certificate_body +    opath = certificate.server_certificate_metadata.path +    ocert_id = certificate.server_certificate_metadata.server_certificate_id +    upload_date = certificate.server_certificate_metadata.upload_date +    exp = certificate.server_certificate_metadata.expiration +    arn = certificate.server_certificate_metadata.arn +    return opath, ocert, ocert_id, upload_date, exp, arn + + +def dup_check(module, iam, name, new_name, cert, orig_cert_names, orig_cert_bodies, dup_ok): +    update = False + +    # IAM cert names are case insensitive +    names_lower = [n.lower() for n in [name, new_name] if n is not None] +    orig_cert_names_lower = [ocn.lower() for ocn in orig_cert_names] + +    if any(ct in orig_cert_names_lower for ct in names_lower): +        for i_name in names_lower: +            if cert is not None: +                try: +                    c_index = orig_cert_names_lower.index(i_name) +                except NameError: +                    continue +                else: +                    # NOTE: remove the carriage return to strictly compare the cert bodies. +                    slug_cert = cert.replace('\r', '') +                    slug_orig_cert_bodies = orig_cert_bodies[c_index].replace('\r', '') +                    if slug_orig_cert_bodies == slug_cert: +                        update = True +                        break +                    elif slug_cert.startswith(slug_orig_cert_bodies): +                        update = True +                        break +                    elif slug_orig_cert_bodies != slug_cert: +                        module.fail_json(changed=False, msg='A cert with the name %s already exists and' +                                         ' has a different certificate body associated' +                                         ' with it. Certificates cannot have the same name' % orig_cert_names[c_index]) +            else: +                update = True +                break +    elif cert in orig_cert_bodies and not dup_ok: +        for crt_name, crt_body in zip(orig_cert_names, orig_cert_bodies): +            if crt_body == cert: +                module.fail_json(changed=False, msg='This certificate already' +                                                    ' exists under the name %s' % crt_name) + +    return update + + +def cert_action(module, iam, name, cpath, new_name, new_path, state, +                cert, key, cert_chain, orig_cert_names, orig_cert_bodies, dup_ok): +    if state == 'present': +        update = dup_check(module, iam, name, new_name, cert, orig_cert_names, +                           orig_cert_bodies, dup_ok) +        if update: +            opath, ocert, ocert_id, upload_date, exp, arn = cert_meta(iam, name) +            changed = True +            if new_name and new_path: +                iam.update_server_cert(name, new_cert_name=new_name, new_path=new_path) +                module.exit_json(changed=changed, original_name=name, new_name=new_name, +                                 original_path=opath, new_path=new_path, cert_body=ocert, +                                 upload_date=upload_date, expiration_date=exp, arn=arn) +            elif new_name and not new_path: +                iam.update_server_cert(name, new_cert_name=new_name) +                module.exit_json(changed=changed, original_name=name, new_name=new_name, +                                 cert_path=opath, cert_body=ocert, +                                 upload_date=upload_date, expiration_date=exp, arn=arn) +            elif not new_name and new_path: +                iam.update_server_cert(name, new_path=new_path) +                module.exit_json(changed=changed, name=new_name, +                                 original_path=opath, new_path=new_path, cert_body=ocert, +                                 upload_date=upload_date, expiration_date=exp, arn=arn) +            else: +                changed = False +                module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert, +                                 upload_date=upload_date, expiration_date=exp, arn=arn, +                                 msg='No new path or name specified. No changes made') +        else: +            changed = True +            iam.upload_server_cert(name, cert, key, cert_chain=cert_chain, path=cpath) +            opath, ocert, ocert_id, upload_date, exp, arn = cert_meta(iam, name) +            module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert, +                             upload_date=upload_date, expiration_date=exp, arn=arn) +    elif state == 'absent': +        if name in orig_cert_names: +            changed = True +            iam.delete_server_cert(name) +            module.exit_json(changed=changed, deleted_cert=name) +        else: +            changed = False +            module.exit_json(changed=changed, msg='Certificate with the name %s already absent' % name) + + +def load_data(cert, key, cert_chain): +    # if paths are provided rather than lookups read the files and return the contents +    if cert and os.path.isfile(cert): +        cert = open(cert, 'r').read().rstrip() +    if key and os.path.isfile(key): +        key = open(key, 'r').read().rstrip() +    if cert_chain and os.path.isfile(cert_chain): +        cert_chain = open(cert_chain, 'r').read() +    return cert, key, cert_chain + + +def main(): +    argument_spec = ec2_argument_spec() +    argument_spec.update(dict( +        state=dict(required=True, choices=['present', 'absent']), +        name=dict(), +        cert=dict(), +        key=dict(no_log=True), +        cert_chain=dict(), +        new_name=dict(), +        path=dict(default='/'), +        new_path=dict(), +        dup_ok=dict(type='bool') +    ) +    ) + +    module = AnsibleModule( +        argument_spec=argument_spec, +        mutually_exclusive=[ +            ['new_path', 'key'], +            ['new_path', 'cert'], +            ['new_path', 'cert_chain'], +            ['new_name', 'key'], +            ['new_name', 'cert'], +            ['new_name', 'cert_chain'], +        ], +    ) + +    if not HAS_BOTO: +        module.fail_json(msg="Boto is required for this module") + +    region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) + +    try: +        if region: +            iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs) +        else: +            iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs) +    except boto.exception.NoAuthHandlerFound as e: +        module.fail_json(msg=str(e)) + +    state = module.params.get('state') +    name = module.params.get('name') +    path = module.params.get('path') +    new_name = module.params.get('new_name') +    new_path = module.params.get('new_path') +    dup_ok = module.params.get('dup_ok') +    if state == 'present' and not new_name and not new_path: +        cert, key, cert_chain = load_data(cert=module.params.get('cert'), +                                          key=module.params.get('key'), +                                          cert_chain=module.params.get('cert_chain')) +    else: +        cert = key = cert_chain = None + +    orig_cert_names = [ctb['server_certificate_name'] for ctb in +                       iam.get_all_server_certs().list_server_certificates_result.server_certificate_metadata_list] +    orig_cert_bodies = [iam.get_server_certificate(thing).get_server_certificate_result.certificate_body +                        for thing in orig_cert_names] +    if new_name == name: +        new_name = None +    if new_path == path: +        new_path = None + +    changed = False +    try: +        cert_action(module, iam, name, path, new_name, new_path, state, +                    cert, key, cert_chain, orig_cert_names, orig_cert_bodies, dup_ok) +    except boto.exception.BotoServerError as err: +        module.fail_json(changed=changed, msg=str(err), debug=[cert, key]) + + +if __name__ == '__main__': +    main() diff --git a/roles/lib_utils/library/oo_iam_kms.py b/roles/lib_utils/library/oo_iam_kms.py new file mode 100644 index 000000000..c85745f01 --- /dev/null +++ b/roles/lib_utils/library/oo_iam_kms.py @@ -0,0 +1,172 @@ +#!/usr/bin/env python +''' +ansible module for creating AWS IAM KMS keys +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +#   AWS IAM KMS ansible module +# +# +#   Copyright 2016 Red Hat Inc. +# +#   Licensed under the Apache License, Version 2.0 (the "License"); +#   you may not use this file except in compliance with the License. +#   You may obtain a copy of the License at +# +#       http://www.apache.org/licenses/LICENSE-2.0 +# +#   Unless required by applicable law or agreed to in writing, software +#   distributed under the License is distributed on an "AS IS" BASIS, +#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#   See the License for the specific language governing permissions and +#   limitations under the License. +# +# Jenkins environment doesn't have all the required libraries +# pylint: disable=import-error +import time +import boto3 +# Ansible modules need this wildcard import +# pylint: disable=unused-wildcard-import, wildcard-import, redefined-builtin +from ansible.module_utils.basic import AnsibleModule + +AWS_ALIAS_URL = "http://docs.aws.amazon.com/kms/latest/developerguide/programming-aliases.html" + + +class AwsIamKms(object): +    ''' +    ansible module for AWS IAM KMS +    ''' + +    def __init__(self): +        ''' constructor ''' +        self.module = None +        self.kms_client = None +        self.aliases = None + +    @staticmethod +    def valid_alias_name(user_alias): +        ''' AWS KMS aliases must start with 'alias/' ''' +        valid_start = 'alias/' +        if user_alias.startswith(valid_start): +            return True + +        return False + +    def get_all_kms_info(self): +        '''fetch all kms info and return them + +        list_keys doesn't have information regarding aliases +        list_aliases doesn't have the full kms arn + +        fetch both and join them on the targetKeyId +        ''' +        aliases = self.kms_client.list_aliases()['Aliases'] +        keys = self.kms_client.list_keys()['Keys'] + +        for alias in aliases: +            for key in keys: +                if 'TargetKeyId' in alias and 'KeyId' in key: +                    if alias['TargetKeyId'] == key['KeyId']: +                        alias.update(key) + +        return aliases + +    def get_kms_entry(self, user_alias, alias_list): +        ''' return single alias details from list of aliases ''' +        for alias in alias_list: +            if user_alias == alias.get('AliasName', False): +                return alias + +        msg = "Did not find alias {}".format(user_alias) +        self.module.exit_json(failed=True, results=msg) + +    @staticmethod +    def exists(user_alias, alias_list): +        ''' Check if KMS alias already exists ''' +        for alias in alias_list: +            if user_alias == alias.get('AliasName'): +                return True + +        return False + +    def main(self): +        ''' entry point for module ''' + +        self.module = AnsibleModule( +            argument_spec=dict( +                state=dict(default='list', choices=['list', 'present'], type='str'), +                region=dict(default=None, required=True, type='str'), +                alias=dict(default=None, type='str'), +                # description default cannot be None +                description=dict(default='', type='str'), +                aws_access_key=dict(default=None, type='str'), +                aws_secret_key=dict(default=None, type='str'), +            ), +        ) + +        state = self.module.params['state'] +        aws_access_key = self.module.params['aws_access_key'] +        aws_secret_key = self.module.params['aws_secret_key'] +        if aws_access_key and aws_secret_key: +            boto3.setup_default_session(aws_access_key_id=aws_access_key, +                                        aws_secret_access_key=aws_secret_key, +                                        region_name=self.module.params['region']) +        else: +            boto3.setup_default_session(region_name=self.module.params['region']) + +        self.kms_client = boto3.client('kms') + +        aliases = self.get_all_kms_info() + +        if state == 'list': +            if self.module.params['alias'] is not None: +                user_kms = self.get_kms_entry(self.module.params['alias'], +                                              aliases) +                self.module.exit_json(changed=False, results=user_kms, +                                      state="list") +            else: +                self.module.exit_json(changed=False, results=aliases, +                                      state="list") + +        if state == 'present': + +            # early sanity check to make sure the alias name conforms with +            # AWS alias name requirements +            if not self.valid_alias_name(self.module.params['alias']): +                self.module.exit_json(failed=True, changed=False, +                                      results="Alias must start with the prefix " + +                                      "'alias/'. Please see " + AWS_ALIAS_URL, +                                      state='present') + +            if not self.exists(self.module.params['alias'], aliases): +                # if we didn't find it, create it +                response = self.kms_client.create_key(KeyUsage='ENCRYPT_DECRYPT', +                                                      Description=self.module.params['description']) +                kid = response['KeyMetadata']['KeyId'] +                response = self.kms_client.create_alias(AliasName=self.module.params['alias'], +                                                        TargetKeyId=kid) +                # sleep for a bit so that the KMS data can be queried +                time.sleep(10) +                # get details for newly created KMS entry +                new_alias_list = self.kms_client.list_aliases()['Aliases'] +                user_kms = self.get_kms_entry(self.module.params['alias'], +                                              new_alias_list) + +                self.module.exit_json(changed=True, results=user_kms, +                                      state='present') + +            # already exists, normally we would check whether we need to update it +            # but this module isn't written to allow changing the alias name +            # or changing whether the key is enabled/disabled +            user_kms = self.get_kms_entry(self.module.params['alias'], aliases) +            self.module.exit_json(changed=False, results=user_kms, +                                  state="present") + +        self.module.exit_json(failed=True, +                              changed=False, +                              results='Unknown state passed. %s' % state, +                              state="unknown") + + +if __name__ == '__main__': +    AwsIamKms().main() diff --git a/roles/nuage_common/defaults/main.yaml b/roles/nuage_common/defaults/main.yaml index a7803c0ee..919e3aa7b 100644 --- a/roles/nuage_common/defaults/main.yaml +++ b/roles/nuage_common/defaults/main.yaml @@ -10,5 +10,8 @@ nuage_ca_serial: "{{ nuage_ca_dir }}/nuageMonCA.serial.txt"  nuage_master_mon_dir: /usr/share/nuage-openshift-monitor  nuage_node_plugin_dir: /usr/share/vsp-openshift +nuage_node_cni_bin_dir: /opt/cni/bin +nuage_node_cni_netconf_dir: /etc/cni/net.d +  nuage_mon_rest_server_port: "{{ nuage_openshift_monitor_rest_server_port | default('9443') }}"  nuage_mon_cert_validity_period: "{{ nuage_cert_validity_period | default('3650') }}" diff --git a/roles/nuage_common/tasks/main.yml b/roles/nuage_common/tasks/main.yml new file mode 100644 index 000000000..6c8c9f8d2 --- /dev/null +++ b/roles/nuage_common/tasks/main.yml @@ -0,0 +1,27 @@ +--- +- name: Set the Nuage plugin openshift directory fact to handle Atomic host install +  set_fact: +    nuage_node_plugin_dir: /var/usr/share/vsp-openshift +  when: openshift.common.is_atomic | bool + +- name: Set the Nuage CNI network config directory fact to handle Atomic host install +  set_fact: +    nuage_node_cni_netconf_dir: /var/etc/cni/net.d/ +  when: openshift.common.is_atomic | bool + +- name: Set the Nuage CNI binary directory fact to handle Atomic host install +  set_fact: +    nuage_node_cni_bin_dir: /var/opt/cni/bin/ +  when: openshift.common.is_atomic | bool + +- name: Assure CNI plugin config dir exists before daemon set install +  become: yes +  file: path="{{ nuage_node_plugin_dir }}" state=directory + +- name: Assure CNI netconf directory exists before daemon set install +  become: yes +  file: path="{{ nuage_node_cni_netconf_dir }}" state=directory + +- name: Assure CNI plugin binary directory exists before daemon set install +  become: yes +  file: path="{{ nuage_node_cni_bin_dir }}" state=directory diff --git a/roles/nuage_master/defaults/main.yml b/roles/nuage_master/defaults/main.yml index ffab25775..5f1d8686a 100644 --- a/roles/nuage_master/defaults/main.yml +++ b/roles/nuage_master/defaults/main.yml @@ -1,6 +1,6 @@  --- -r_nuage_master_firewall_enabled: True -r_nuage_master_use_firewalld: False +r_nuage_master_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_nuage_master_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"  nuage_mon_rest_server_port: '9443' diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml index ad7bbb111..21da6b953 100644 --- a/roles/nuage_master/handlers/main.yaml +++ b/roles/nuage_master/handlers/main.yaml @@ -1,8 +1,4 @@  --- -- name: restart nuage-openshift-monitor -  become: yes -  systemd: name=nuage-openshift-monitor state=restarted -  - name: restart master api    systemd: name={{ openshift.common.service_type }}-master-api state=restarted    when: > diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml index d0363c981..f3c487132 100644 --- a/roles/nuage_master/tasks/main.yaml +++ b/roles/nuage_master/tasks/main.yaml @@ -3,17 +3,64 @@    include: firewall.yml    static: yes +- name: Set the Nuage certificate directory fact for Atomic hosts +  set_fact: +    cert_output_dir: /var/usr/share/nuage-openshift-monitor +  when: openshift.common.is_atomic | bool + +- name: Set the Nuage kubeconfig file path fact for Atomic hosts +  set_fact: +    kube_config: /var/usr/share/nuage-openshift-monitor/nuage.kubeconfig +  when: openshift.common.is_atomic | bool + +- name: Set the Nuage monitor yaml location fact for Atomic hosts +  set_fact: +    kubemon_yaml: /var/usr/share/nuage-openshift-monitor/nuage-openshift-monitor.yaml +  when: openshift.common.is_atomic | bool + +- name: Set the Nuage monitor certs location fact for Atomic hosts +  set_fact: +    nuage_master_crt_dir: /var/usr/share/nuage-openshift-monitor/ +  when: openshift.common.is_atomic | bool + +- name: Set the Nuage master config directory for daemon sets install +  set_fact: +    nuage_master_config_dsets_mount_dir: /var/usr/share/ +  when: master_host_type == "is_atomic" + +- name: Set the Nuage node config directory for daemon sets install +  set_fact: +    nuage_node_config_dsets_mount_dir: /var/usr/share/ +  when: slave_host_type == "is_atomic" + +- name: Set the Nuage CNI plugin binary directory for daemon sets install +  set_fact: +    nuage_cni_bin_dsets_mount_dir: /var/opt/cni/bin +  when: openshift.common.is_atomic | bool +  - name: Create directory /usr/share/nuage-openshift-monitor    become: yes    file: path=/usr/share/nuage-openshift-monitor state=directory +  when: not openshift.common.is_atomic | bool -- name: Create the log directory +- name: Create directory /var/usr/share/nuage-openshift-monitor    become: yes -  file: path={{ nuage_mon_rest_server_logdir }} state=directory +  file: path=/var/usr/share/nuage-openshift-monitor state=directory +  when: openshift.common.is_atomic | bool + +- name: Create directory /var/usr/bin for monitor binary on atomic +  become: yes +  file: path=/var/usr/bin state=directory +  when: openshift.common.is_atomic | bool -- name: Install Nuage Openshift Monitor +- name: Create CNI bin directory /var/opt/cni/bin    become: yes -  yum: name={{ nuage_openshift_rpm }} state=present +  file: path=/var/opt/cni/bin state=directory +  when: openshift.common.is_atomic | bool + +- name: Create the log directory +  become: yes +  file: path={{ nuage_mon_rest_server_logdir }} state=directory  - include: serviceaccount.yml @@ -45,10 +92,32 @@    become: yes    copy: src="{{ vsd_user_key_file }}" dest="{{ cert_output_dir }}/{{ vsd_user_key_file | basename }}" -- name: Create nuage-openshift-monitor.yaml +- name: Create Nuage master daemon set yaml file +  become: yes +  template: src=nuage-master-config-daemonset.j2 dest=/etc/nuage-master-config-daemonset.yaml owner=root mode=0644 + +- name: Create Nuage node daemon set yaml file    become: yes -  template: src=nuage-openshift-monitor.j2 dest=/usr/share/nuage-openshift-monitor/nuage-openshift-monitor.yaml owner=root mode=0644 +  template: src=nuage-node-config-daemonset.j2 dest=/etc/nuage-node-config-daemonset.yaml owner=root mode=0644 + +- name: Add the service account to the privileged scc to have root permissions +  shell: oc adm policy add-scc-to-user privileged system:serviceaccount:openshift-infra:daemonset-controller +  ignore_errors: true +  when: inventory_hostname == groups.oo_first_master.0 + +- name: Spawn Nuage Master monitor daemon sets pod +  shell: oc create -f /etc/nuage-master-config-daemonset.yaml +  ignore_errors: true +  when: inventory_hostname == groups.oo_first_master.0 + +- name: Spawn Nuage CNI daemon sets pod +  shell: oc create -f /etc/nuage-node-config-daemonset.yaml +  ignore_errors: true +  when: inventory_hostname == groups.oo_first_master.0 + +- name: Restart daemons +  command: /bin/true    notify:      - restart master api      - restart master controllers -    - restart nuage-openshift-monitor +  ignore_errors: true diff --git a/roles/nuage_master/templates/nuage-master-config-daemonset.j2 b/roles/nuage_master/templates/nuage-master-config-daemonset.j2 new file mode 100755 index 000000000..612d689c2 --- /dev/null +++ b/roles/nuage_master/templates/nuage-master-config-daemonset.j2 @@ -0,0 +1,111 @@ +# This ConfigMap is used to configure Nuage VSP configuration on master nodes +kind: ConfigMap +apiVersion: v1 +metadata: +  name: nuage-master-config +  namespace: kube-system +data: +  # This will generate the required Nuage configuration +  # on master nodes +  monitor_yaml_config: | + +      # .kubeconfig that includes the nuage service account +      kubeConfig: {{ nuage_master_crt_dir }}/nuage.kubeconfig +      # name of the nuage service account, or another account with 'cluster-reader' +      # permissions +      # Openshift master config file +      masterConfig: /etc/origin/master/master-config.yaml +      # URL of the VSD Architect +      vsdApiUrl: {{ vsd_api_url }} +      # API version to query against.  Usually "v3_2" +      vspVersion: {{ vsp_version }} +      # Name of the enterprise in which pods will reside +      enterpriseName: {{ enterprise }} +      # Name of the domain in which pods will reside +      domainName: {{ domain }} +      # VSD generated user certificate file location on master node +      userCertificateFile: {{ nuage_master_crt_dir }}/{{ vsd_user }}.pem +      # VSD generated user key file location on master node +      userKeyFile: {{ nuage_master_crt_dir }}/{{ vsd_user }}-Key.pem +      # Location where logs should be saved +      log_dir: /var/log/nuage-openshift-monitor +      # Monitor rest server parameters +      # Logging level for the nuage openshift monitor +      # allowed options are: 0 => INFO, 1 => WARNING, 2 => ERROR, 3 => FATAL +      logLevel: 0 +      # Parameters related to the nuage monitor REST server +      nuageMonServer: +          URL: 0.0.0.0:9443 +          certificateDirectory: {{ nuage_master_crt_dir }} +      # etcd config required for HA +      etcdClientConfig: +          ca: {{ nuage_master_crt_dir }}/nuageMonCA.crt +          certFile: {{ nuage_master_crt_dir }}/nuageMonServer.crt +          keyFile: {{ nuage_master_crt_dir }}/master.etcd-client.key +          urls: +      {% for etcd_url in openshift.master.etcd_urls %} +              - {{ etcd_url }} +      {% endfor %} + +--- + +# This manifest installs Nuage master node configuration on +# each Nuage master node in a cluster. +kind: DaemonSet +apiVersion: extensions/v1beta1 +metadata: +  name: nuage-master-config +  namespace: kube-system +  labels: +    k8s-app: nuage-master-config +spec: +  selector: +    matchLabels: +      k8s-app: nuage-master-config +  template: +    metadata: +      labels: +        k8s-app: nuage-master-config +    spec: +      hostNetwork: true +      tolerations: +        - key: node-role.kubernetes.io/master +          effect: NoSchedule +          operator: Exists +      nodeSelector: +        install-monitor: "true" +      containers: +        # This container configures Nuage Master node +        - name: install-nuage-master-config +          image: nuage/master:{{ nuage_monitor_container_image_version }} +          ports: +            - containerPort: 9443 +              hostPort: 9443 +          command: ["/configure-master.sh"] +          args: ["ose", "{{ master_host_type }}"] +          securityContext: +            privileged: true +          env: +            # nuage-openshift-monitor.yaml config to install on each slave node. +            - name: NUAGE_MASTER_VSP_CONFIG +              valueFrom: +                configMapKeyRef: +                  name: nuage-master-config +                  key: monitor_yaml_config +          volumeMounts: +            - mountPath: /var/log +              name: cni-log-dir +            - mountPath: {{ nuage_master_config_dsets_mount_dir }} +              name: usr-share-dir +            - mountPath: /etc/origin/ +              name: master-config-dir +      volumes: +        - name: cni-log-dir +          hostPath: +            path: /var/log +        - name: usr-share-dir +          hostPath: +            path: {{ nuage_master_config_dsets_mount_dir }} +        - name: master-config-dir +          hostPath: +            path: /etc/origin/ diff --git a/roles/nuage_master/templates/nuage-node-config-daemonset.j2 b/roles/nuage_master/templates/nuage-node-config-daemonset.j2 new file mode 100755 index 000000000..02e9a1563 --- /dev/null +++ b/roles/nuage_master/templates/nuage-node-config-daemonset.j2 @@ -0,0 +1,206 @@ +# This ConfigMap is used to configure Nuage VSP configuration +kind: ConfigMap +apiVersion: v1 +metadata: +  name: nuage-config +  namespace: kube-system +data: +  # This will generate the required Nuage vsp-openshift.yaml +  # config on each slave node +  plugin_yaml_config: | +      clientCert: {{ nuage_node_config_dsets_mount_dir }}/vsp-openshift/client.crt +      # The key to the certificate in clientCert above +      clientKey: {{ nuage_node_config_dsets_mount_dir }}/vsp-openshift/client.key +      # The certificate authority's certificate for the local kubelet.  Usually the +      # same as the CA cert used to create the client Cert/Key pair. +      CACert: {{ nuage_node_config_dsets_mount_dir }}/vsp-openshift/ca.crt +      # Name of the enterprise in which pods will reside +      enterpriseName: {{ enterprise }} +      # Name of the domain in which pods will reside +      domainName: {{ domain }} +      # Name of the VSD user in admin group +      vsdUser: {{ vsd_user }} +      # IP address and port number of master API server +      masterApiServer: {{ api_server_url }} +      # REST server URL  +      nuageMonRestServer: {{ nuage_mon_rest_server_url }} +      # Bridge name for the docker bridge +      dockerBridgeName: docker0 +      # Certificate for connecting to the openshift monitor REST api +      nuageMonClientCert: {{ nuage_node_config_dsets_mount_dir }}/vsp-openshift/nuageMonClient.crt +      # Key to the certificate in restClientCert +      nuageMonClientKey: {{ nuage_node_config_dsets_mount_dir }}/vsp-openshift/nuageMonClient.key +      # CA certificate for verifying the master's rest server +      nuageMonServerCA: {{ nuage_node_config_dsets_mount_dir }}/vsp-openshift/nuageMonCA.crt +      # Nuage vport mtu size +      interfaceMTU: {{ nuage_vport_mtu  }} +      # Logging level for the plugin +      # allowed options are: "dbg", "info", "warn", "err", "emer", "off" +      logLevel: 3 + +  # This will generate the required Nuage CNI yaml configuration +  cni_yaml_config: | +      vrsendpoint: "/var/run/openvswitch/db.sock" +      vrsbridge: "alubr0" +      monitorinterval: 60 +      cniversion: 0.2.0 +      loglevel: "info" +      portresolvetimer: 60 +      logfilesize: 1 +      vrsconnectionchecktimer: 180 +      mtu: 1450 +      staleentrytimeout: 600 + +--- + +# This manifest installs Nuage CNI plugins and network config on +# each worker node in Openshift cluster +kind: DaemonSet +apiVersion: extensions/v1beta1 +metadata: +  name: nuage-cni-ds +  namespace: kube-system +  labels: +    k8s-app: nuage-cni-ds +spec: +  selector: +    matchLabels: +      k8s-app: nuage-cni-ds +  template: +    metadata: +      labels: +        k8s-app: nuage-cni-ds +    spec: +      hostNetwork: true +      tolerations: +        - key: node-role.kubernetes.io/master +          effect: NoSchedule +          operator: Exists +      containers: +        # This container installs Nuage CNI binaries +        # and CNI network config file on each node. +        - name: install-nuage-cni +          image: nuage/cni:{{ nuage_cni_container_image_version }} +          command: ["/install-cni.sh"] +          args: ["nuage-cni-openshift", "{{ slave_host_type }}"] +          securityContext: +            privileged: true +          env: +            # Nuage vsp-openshift.yaml config to install on each slave node. +            - name: NUAGE_VSP_CONFIG +              valueFrom: +                configMapKeyRef: +                  name: nuage-config +                  key: plugin_yaml_config +            # Nuage nuage-cni.yaml config to install on each slave node. +            - name: NUAGE_CNI_YAML_CONFIG +              valueFrom: +                configMapKeyRef: +                  name: nuage-config +                  key: cni_yaml_config +            # Nuage cluster network CIDR for iptables configuration +            - name: NUAGE_CLUSTER_NW_CIDR +              value: "{{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }}" +          volumeMounts: +            - mountPath: /host/opt/cni/bin +              name: cni-bin-dir +            - mountPath: /host/etc/cni/net.d +              name: cni-net-dir +            - mountPath: /etc/default +              name: cni-yaml-dir +            - mountPath: /var/run +              name: var-run-dir +            - mountPath: /var/log +              name: cni-log-dir +            - mountPath: {{ nuage_node_config_dsets_mount_dir }} +              name: usr-share-dir +      volumes: +        - name: cni-bin-dir +          hostPath: +            path: {{ nuage_cni_bin_dsets_mount_dir }} +        - name: cni-net-dir +          hostPath: +            path: {{ nuage_cni_netconf_dsets_mount_dir }} +        - name: cni-yaml-dir +          hostPath: +            path: /etc/default +        - name: var-run-dir +          hostPath: +            path: /var/run +        - name: cni-log-dir +          hostPath: +            path: /var/log +        - name: usr-share-dir +          hostPath: +            path: {{ nuage_node_config_dsets_mount_dir }} + +--- + +# This manifest installs Nuage VRS on +# each worker node in an Openshift cluster. +kind: DaemonSet +apiVersion: extensions/v1beta1 +metadata: +  name: nuage-vrs-ds +  namespace: kube-system +  labels: +    k8s-app: nuage-vrs-ds +spec: +  selector: +    matchLabels: +      k8s-app: nuage-vrs-ds +  updateStrategy: +    type: RollingUpdate +  template: +    metadata: +      labels: +        k8s-app: nuage-vrs-ds +    spec: +      hostNetwork: true +      tolerations: +        - key: node-role.kubernetes.io/master +          effect: NoSchedule +          operator: Exists +      containers: +        # This container installs Nuage VRS running as a +        # container on each worker node +        - name: install-nuage-vrs +          image: nuage/vrs:{{ nuage_vrs_container_image_version }} +          securityContext: +            privileged: true +          env: +            # Configure parameters for VRS openvswitch file +            - name: NUAGE_ACTIVE_CONTROLLER +              value: "{{ vsc_active_ip }}" +            - name: NUAGE_STANDBY_CONTROLLER +              value: "{{ vsc_standby_ip }}" +            - name: NUAGE_PLATFORM +              value: '"kvm, k8s"' +            - name: NUAGE_K8S_SERVICE_IPV4_SUBNET +              value: '192.168.0.0\/16' +            - name: NUAGE_NETWORK_UPLINK_INTF +              value: "eth0" +          volumeMounts: +            - mountPath: /var/run +              name: vrs-run-dir +            - mountPath: /var/log +              name: vrs-log-dir +            - mountPath: /sys/module +              name: sys-mod-dir +              readOnly: true +            - mountPath: /lib/modules +              name: lib-mod-dir +              readOnly: true +      volumes: +        - name: vrs-run-dir +          hostPath: +            path: /var/run +        - name: vrs-log-dir +          hostPath: +            path: /var/log +        - name: sys-mod-dir +          hostPath: +            path: /sys/module +        - name: lib-mod-dir +          hostPath: +            path: /lib/modules diff --git a/roles/nuage_master/templates/nuage-openshift-monitor.j2 b/roles/nuage_master/templates/nuage-openshift-monitor.j2 deleted file mode 100644 index e077128a4..000000000 --- a/roles/nuage_master/templates/nuage-openshift-monitor.j2 +++ /dev/null @@ -1,41 +0,0 @@ -# .kubeconfig that includes the nuage service account -kubeConfig: {{ kube_config }} -# name of the nuage service account, or another account with 'cluster-reader' -# permissions -# Openshift master config file -masterConfig: {{ master_config_yaml }}  -# URL of the VSD Architect -vsdApiUrl: {{ vsd_api_url }}  -# API version to query against.  Usually "v3_2" -vspVersion: {{ vsp_version }}  -# File containing a VSP license to install.  Only necessary if no license has -# been installed on the VSD Architect before, only valid for standalone vsd install -# licenseFile: "/path/to/base_vsp_license.txt" -# Name of the enterprise in which pods will reside -enterpriseName: {{ enterprise }}  -# Name of the domain in which pods will reside -domainName: {{ domain }} -# VSD generated user certificate file location on master node -userCertificateFile: {{ cert_output_dir }}/{{ vsd_user_cert_file | basename }} -# VSD generated user key file location on master node -userKeyFile: {{ cert_output_dir }}/{{ vsd_user_key_file | basename }} -# Location where logs should be saved -log_dir: {{ nuage_mon_rest_server_logdir }} -# Monitor rest server parameters -# Logging level for the nuage openshift monitor -# allowed options are: 0 => INFO, 1 => WARNING, 2 => ERROR, 3 => FATAL -logLevel: {{ nuage_mon_log_level }} -# Parameters related to the nuage monitor REST server -nuageMonServer: -    URL: {{ nuage_mon_rest_server_url }} -    certificateDirectory: {{ cert_output_dir }} -# etcd config required for HA -etcdClientConfig: -    ca: {{ openshift_master_config_dir }}/{{ "ca.crt" if (openshift.master.embedded_etcd | bool) else "master.etcd-ca.crt" }} -    certFile: {{ openshift_master_config_dir }}/master.etcd-client.crt -    keyFile: {{ openshift_master_config_dir }}/master.etcd-client.key -    urls: -{% for etcd_url in openshift.master.etcd_urls %} -        - {{ etcd_url }} -{% endfor %} - diff --git a/roles/nuage_master/vars/main.yaml b/roles/nuage_master/vars/main.yaml index 57d5d2595..114514d7c 100644 --- a/roles/nuage_master/vars/main.yaml +++ b/roles/nuage_master/vars/main.yaml @@ -22,6 +22,18 @@ nuage_mon_rest_server_host: "{{ openshift.master.cluster_hostname | default(open  nuage_master_crt_dir: /usr/share/nuage-openshift-monitor  nuage_service_account: system:serviceaccount:default:nuage +nuage_master_config_dsets_mount_dir: /usr/share/ +nuage_node_config_dsets_mount_dir: /usr/share/ +nuage_cni_bin_dsets_mount_dir: /opt/cni/bin +nuage_cni_netconf_dsets_mount_dir: /etc/cni/net.d +nuage_monitor_container_image_version: "{{ nuage_monitor_image_version | default('v5.1.1') }}" +nuage_vrs_container_image_version: "{{ nuage_vrs_image_version | default('v5.1.1') }}" +nuage_cni_container_image_version: "{{ nuage_cni_image_version | default('v5.1.1') }}" +api_server_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" +nuage_vport_mtu: "{{ nuage_interface_mtu | default('1460') }}" +master_host_type: "{{ master_base_host_type | default('is_rhel_server') }}" +slave_host_type: "{{ slave_base_host_type | default('is_rhel_server') }}" +  nuage_tasks:  - resource_kind: cluster-role    resource_name: cluster-reader diff --git a/roles/nuage_node/defaults/main.yml b/roles/nuage_node/defaults/main.yml index b3d2e3cec..9a2e34387 100644 --- a/roles/nuage_node/defaults/main.yml +++ b/roles/nuage_node/defaults/main.yml @@ -1,6 +1,6 @@  --- -r_nuage_node_firewall_enabled: True -r_nuage_node_use_firewalld: False +r_nuage_node_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_nuage_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"  nuage_mon_rest_server_port: '9443' diff --git a/roles/nuage_node/handlers/main.yaml b/roles/nuage_node/handlers/main.yaml index 8384856ff..e68ae74bd 100644 --- a/roles/nuage_node/handlers/main.yaml +++ b/roles/nuage_node/handlers/main.yaml @@ -1,11 +1,7 @@  --- -- name: restart vrs -  become: yes -  systemd: name=openvswitch state=restarted -  - name: restart node    become: yes -  systemd: name={{ openshift.common.service_type }}-node state=restarted +  systemd: name={{ openshift.common.service_type }}-node daemon-reload=yes state=restarted  - name: save iptable rules    become: yes diff --git a/roles/nuage_node/tasks/main.yaml b/roles/nuage_node/tasks/main.yaml index 66d6ef4ca..9db9dbb6a 100644 --- a/roles/nuage_node/tasks/main.yaml +++ b/roles/nuage_node/tasks/main.yaml @@ -1,28 +1,18 @@  --- -- name: Install Nuage VRS -  become: yes -  yum: name={{ vrs_rpm }} state=present - -- name: Set the uplink interface -  become: yes -  lineinfile: dest={{ vrs_config }} regexp=^NETWORK_UPLINK_INTF line='NETWORK_UPLINK_INTF={{ uplink_interface }}' - -- name: Set the Active Controller -  become: yes -  lineinfile: dest={{ vrs_config }} regexp=^ACTIVE_CONTROLLER line='ACTIVE_CONTROLLER={{ vsc_active_ip }}' - -- name: Set the K8S/OSE Cluster service CIDR -  become: yes -  lineinfile: dest={{ vrs_config }} regexp=^K8S_SERVICE_IPV4_SUBNET line='K8S_SERVICE_IPV4_SUBNET={{ k8s_cluster_service_cidr }}' +- name: Set the Nuage plugin openshift directory fact for Atomic hosts +  set_fact: +    vsp_openshift_dir: /var/usr/share/vsp-openshift +  when: openshift.common.is_atomic | bool -- name: Set the Standby Controller -  become: yes -  lineinfile: dest={{ vrs_config }} regexp=^STANDBY_CONTROLLER line='STANDBY_CONTROLLER={{ vsc_standby_ip }}' -  when: vsc_standby_ip is defined +- name: Set the Nuage CNI binary directory fact for Atomic hosts +  set_fact: +    cni_bin_dir: /var/opt/cni/bin/ +  when: openshift.common.is_atomic | bool -- name: Install plugin rpm -  become: yes -  yum: name={{ plugin_rpm }} state=present +- name: Set the Nuage plugin certs directory fact for Atomic hosts +  set_fact: +    nuage_plugin_crt_dir: /var/usr/share/vsp-openshift +  when: openshift.common.is_atomic | bool  - name: Assure CNI conf dir exists    become: yes @@ -32,13 +22,6 @@    become: yes    file: path="{{ cni_bin_dir }}" state=directory -- name: Install CNI loopback plugin -  become: yes -  copy: -    src: "{{ k8s_cni_loopback_plugin }}" -    dest: "{{ cni_bin_dir }}/{{ k8s_cni_loopback_plugin | basename }}" -    mode: 0755 -  - name: Copy the certificates and keys    become: yes    copy: src="/tmp/{{ item }}" dest="{{ vsp_openshift_dir }}/{{ item }}" @@ -50,12 +33,16 @@  - include: certificates.yml -- name: Set the vsp-openshift.yaml +- name: Add additional Docker mounts for Nuage for atomic hosts    become: yes -  template: src=vsp-openshift.j2 dest={{ vsp_openshift_yaml }} owner=root mode=0644 +  lineinfile: dest="{{ openshift_atomic_node_config_file }}" line="{{ nuage_atomic_docker_additional_mounts }}" +  when: openshift.common.is_atomic | bool + +- name: Restart node services +  command: /bin/true    notify: -    - restart vrs      - restart node +  ignore_errors: true  - include: iptables.yml diff --git a/roles/nuage_node/templates/vsp-openshift.j2 b/roles/nuage_node/templates/vsp-openshift.j2 deleted file mode 100644 index f6bccebc2..000000000 --- a/roles/nuage_node/templates/vsp-openshift.j2 +++ /dev/null @@ -1,29 +0,0 @@ -clientCert: {{ client_cert }}  -# The key to the certificate in clientCert above -clientKey: {{ client_key }} -# The certificate authority's certificate for the local kubelet.  Usually the -# same as the CA cert used to create the client Cert/Key pair. -CACert: {{ ca_cert }}  -# Name of the enterprise in which pods will reside -enterpriseName: {{ enterprise }}  -# Name of the domain in which pods will reside -domainName: {{ domain }} -# Name of the VSD user in admin group -vsdUser: {{ vsd_user }} -# IP address and port number of master API server -masterApiServer: {{ api_server }} -# REST server URL  -nuageMonRestServer: {{ nuage_mon_rest_server_url }} -# Bridge name for the docker bridge -dockerBridgeName: {{ docker_bridge }} -# Certificate for connecting to the kubemon REST API -nuageMonClientCert: {{ rest_client_cert }} -# Key to the certificate in restClientCert -nuageMonClientKey: {{ rest_client_key }}  -# CA certificate for verifying the master's rest server -nuageMonServerCA: {{ rest_server_ca_cert }} -# Nuage vport mtu size -interfaceMTU: {{ vport_mtu  }} -# Logging level for the plugin -# allowed options are: "dbg", "info", "warn", "err", "emer", "off" -logLevel: {{ plugin_log_level }} diff --git a/roles/nuage_node/vars/main.yaml b/roles/nuage_node/vars/main.yaml index 4cf68411f..d8bfca62a 100644 --- a/roles/nuage_node/vars/main.yaml +++ b/roles/nuage_node/vars/main.yaml @@ -23,3 +23,5 @@ cni_conf_dir: "/etc/cni/net.d/"  cni_bin_dir: "/opt/cni/bin/"  nuage_plugin_crt_dir: /usr/share/vsp-openshift +openshift_atomic_node_config_file: /etc/sysconfig/{{ openshift.common.service_type }}-node +nuage_atomic_docker_additional_mounts: "DOCKER_ADDTL_BIND_MOUNTS=-v /var/usr/share/vsp-openshift:/var/usr/share/vsp-openshift -v /etc/default:/etc/default -v /var/run:/var/run -v /opt/cni/bin:/opt/cni/bin -v /etc/cni/net.d:/etc/cni/net.d" diff --git a/roles/openshift_ami_prep/defaults/main.yml b/roles/openshift_ami_prep/defaults/main.yml new file mode 100644 index 000000000..2ba6d8eae --- /dev/null +++ b/roles/openshift_ami_prep/defaults/main.yml @@ -0,0 +1,50 @@ +--- + + +r_openshift_ami_prep_packages: +- atomic-openshift-master +- atomic-openshift-node +- atomic-openshift-docker-excluder +- atomic-openshift-sdn-ovs +- openvswitch +- docker +- etcd +#- pcs +- haproxy +- dnsmasq +- ntp +- logrotate +- httpd-tools +- bind +- firewalld +- libselinux-python +- conntrack-tools +- openssl +- cloud-init +- iproute +- python-dbus +- PyYAML +- yum-utils +- python2-boto +- python2-boto3 +- cloud-utils-growpart +# gluster +- glusterfs-fuse +- heketi-client +# nfs +- nfs-utils +- flannel +- bash-completion +# cockpit +- cockpit-ws +- cockpit-system +- cockpit-bridge +- cockpit-docker +# iscsi +- iscsi-initiator-utils +# ceph +- ceph-common +# systemcontainer +# - runc +# - container-selinux +# - atomic diff --git a/roles/openshift_ami_prep/tasks/main.yml b/roles/openshift_ami_prep/tasks/main.yml new file mode 100644 index 000000000..98f7bc0e2 --- /dev/null +++ b/roles/openshift_ami_prep/tasks/main.yml @@ -0,0 +1,42 @@ +--- +- name: install repositories +  include: yum_repos.yml +  static: yes + +- name: install needed rpm(s) +  package: +    name: "{{ item }}" +    state: present +  with_items: "{{ r_openshift_ami_prep_packages }}" + +- name: create the directory for node +  file: +    state: directory +    path: "/etc/systemd/system/{{ r_openshift_ami_prep_node }}.service.d" + +- name: laydown systemd override +  copy: +    dest: "/etc/systemd/system/{{ r_openshift_ami_prep_node }}.service.d/override.conf" +    content: | +      [Unit] +      After=cloud-init.service + +- name: update the sysconfig to have KUBECONFIG +  lineinfile: +    dest: "/etc/sysconfig/{{ r_openshift_ami_prep_node }}" +    line: "KUBECONFIG=/root/csr_kubeconfig" +    regexp: "^KUBECONFIG=.*" + +- name: update the ExecStart to have bootstrap +  lineinfile: +    dest: "/usr/lib/systemd/system/{{ r_openshift_ami_prep_node }}.service" +    line: "{% raw %}ExecStart=/usr/bin/openshift start node --bootstrap --kubeconfig=${KUBECONFIG} $OPTIONS{% endraw %}" +    regexp: "^ExecStart=.*" + +- name: systemctl enable origin-node +  systemd: +    name: "{{ item }}" +    enabled: no +  with_items: +  - "{{ r_openshift_ami_prep_node }}.service" +  - "{{ r_openshift_ami_prep_master }}.service" diff --git a/roles/openshift_ami_prep/tasks/yum_repos.yml b/roles/openshift_ami_prep/tasks/yum_repos.yml new file mode 100644 index 000000000..c48c67ac2 --- /dev/null +++ b/roles/openshift_ami_prep/tasks/yum_repos.yml @@ -0,0 +1,14 @@ +--- +- name: Create our install repository +  yum_repository: +    description: "{{ item.description | default(omit) }}" +    name: "{{ item.name }}" +    baseurl: "{{ item.baseurl }}" +    gpgkey: "{{ item.gpgkey | default(omit)}}" +    gpgcheck: "{{ item.gpgcheck | default(1) }}" +    sslverify: "{{ item.sslverify | default(1) }}" +    sslclientkey: "{{ item.sslclientkey | default(omit) }}" +    sslclientcert: "{{ item.sslclientcert | default(omit) }}" +    file: "{{ item.file }}" +    enabled: "{{ item.enabled }}" +  with_items: "{{ r_openshift_ami_prep_yum_repositories }}" diff --git a/roles/openshift_aws_ami_copy/README.md b/roles/openshift_aws_ami_copy/README.md new file mode 100644 index 000000000..111818451 --- /dev/null +++ b/roles/openshift_aws_ami_copy/README.md @@ -0,0 +1,50 @@ +openshift_aws_ami_perms +========= + +Ansible role for copying an AMI + +Requirements +------------ + +Ansible Modules: + + +Role Variables +-------------- + +- openshift_aws_ami_copy_src_ami: source AMI id to copy from +- openshift_aws_ami_copy_region: region where the AMI is found +- openshift_aws_ami_copy_name: name to assign to new AMI +- openshift_aws_ami_copy_kms_arn: AWS IAM KMS arn of the key to use for encryption +- openshift_aws_ami_copy_tags: dict with desired tags +- openshift_aws_ami_copy_wait: wait for the ami copy to achieve available status.  This fails due to boto waiters. + +Dependencies +------------ + + +Example Playbook +---------------- +```yaml +    - name: copy the ami for encrypted disks +      include_role: +        name: openshift_aws_ami_copy +      vars: +        r_openshift_aws_ami_copy_region: us-east-1 +        r_openshift_aws_ami_copy_name: myami +        r_openshift_aws_ami_copy_src_ami: ami-1234 +        r_openshift_aws_ami_copy_kms_arn: arn:xxxx +        r_openshift_aws_ami_copy_tags: {} +        r_openshift_aws_ami_copy_encrypt: False + +``` + +License +------- + +Apache 2.0 + +Author Information +------------------ + +Openshift diff --git a/roles/openshift_aws_ami_copy/tasks/main.yml b/roles/openshift_aws_ami_copy/tasks/main.yml new file mode 100644 index 000000000..bcccd4042 --- /dev/null +++ b/roles/openshift_aws_ami_copy/tasks/main.yml @@ -0,0 +1,26 @@ +--- +- fail: +    msg: "{{ item }} needs to be defined" +  when: item is not defined +  with_items: +  - r_openshift_aws_ami_copy_src_ami +  - r_openshift_aws_ami_copy_name +  - r_openshift_aws_ami_copy_region + +- name: "Create copied AMI image and wait: {{ r_openshift_aws_ami_copy_wait | default(False) }}" +  ec2_ami_copy: +    region: "{{ r_openshift_aws_ami_copy_region }}" +    source_region: "{{ r_openshift_aws_ami_copy_region }}" +    name: "{{ r_openshift_aws_ami_copy_name }}" +    source_image_id: "{{ r_openshift_aws_ami_copy_src_ami }}" +    encrypted: "{{ r_openshift_aws_ami_copy_encrypt | default(False) }}" +    kms_key_id: "{{ r_openshift_aws_ami_copy_kms_arn | default(omit) }}" +    wait: "{{ r_openshift_aws_ami_copy_wait | default(omit) }}" +    tags: "{{ r_openshift_aws_ami_copy_tags }}" +  register: copy_result + +- debug: var=copy_result + +- name: return AMI ID with setfact - openshift_aws_ami_copy_retval_custom_ami +  set_fact: +    r_openshift_aws_ami_copy_retval_custom_ami: "{{ copy_result.image_id }}" diff --git a/roles/openshift_aws_elb/README.md b/roles/openshift_aws_elb/README.md new file mode 100644 index 000000000..ecc45fa14 --- /dev/null +++ b/roles/openshift_aws_elb/README.md @@ -0,0 +1,75 @@ +openshift_aws_elb +========= + +Ansible role to provision and manage AWS ELB's for Openshift. + +Requirements +------------ + +Ansible Modules: + +- ec2_elb +- ec2_elb_lb + +python package: + +python-boto + +Role Variables +-------------- + +- r_openshift_aws_elb_instances: instances to put in ELB +- r_openshift_aws_elb_elb_name: name of elb +- r_openshift_aws_elb_security_group_names: list of SGs (by name) that the ELB will belong to +- r_openshift_aws_elb_region: AWS Region +- r_openshift_aws_elb_health_check: definition of the ELB health check. See ansible docs for ec2_elb +```yaml +  ping_protocol: tcp +  ping_port: 443 +  response_timeout: 5 +  interval: 30 +  unhealthy_threshold: 2 +  healthy_threshold: 2 +``` +- r_openshift_aws_elb_listeners: definition of the ELB listeners. See ansible docs for ec2_elb +```yaml +- protocol: tcp +  load_balancer_port: 80 +  instance_protocol: ssl +  instance_port: 443 +- protocol: ssl +  load_balancer_port: 443 +  instance_protocol: ssl +  instance_port: 443 +  # ssl certificate required for https or ssl +  ssl_certificate_id: "{{ r_openshift_aws_elb_cert_arn }}" +``` + +Dependencies +------------ + + +Example Playbook +---------------- +```yaml +- include_role: +    name: openshift_aws_elb +  vars: +    r_openshift_aws_elb_instances: aws_instances_to_put_in_elb +    r_openshift_aws_elb_elb_name: elb_name +    r_openshift_aws_elb_security_groups: security_group_names +    r_openshift_aws_elb_region: aws_region +    r_openshift_aws_elb_health_check: "{{ elb_health_check_definition }}" +    r_openshift_aws_elb_listeners: "{{ elb_listeners_definition }}" +``` + + +License +------- + +Apache 2.0 + +Author Information +------------------ + +Openshift diff --git a/roles/openshift_aws_elb/defaults/main.yml b/roles/openshift_aws_elb/defaults/main.yml new file mode 100644 index 000000000..ed5d38079 --- /dev/null +++ b/roles/openshift_aws_elb/defaults/main.yml @@ -0,0 +1,33 @@ +--- +r_openshift_aws_elb_health_check: +  ping_protocol: tcp +  ping_port: 443 +  response_timeout: 5 +  interval: 30 +  unhealthy_threshold: 2 +  healthy_threshold: 2 + +r_openshift_aws_elb_cert_arn: '' + +r_openshift_aws_elb_listeners: +  master: +    external: +    - protocol: tcp +      load_balancer_port: 80 +      instance_protocol: ssl +      instance_port: 443 +    - protocol: ssl +      load_balancer_port: 443 +      instance_protocol: ssl +      instance_port: 443 +      # ssl certificate required for https or ssl +      ssl_certificate_id: "{{ r_openshift_aws_elb_cert_arn }}" +    internal: +    - protocol: tcp +      load_balancer_port: 80 +      instance_protocol: tcp +      instance_port: 80 +    - protocol: tcp +      load_balancer_port: 443 +      instance_protocol: tcp +      instance_port: 443 diff --git a/roles/openshift_aws_elb/meta/main.yml b/roles/openshift_aws_elb/meta/main.yml new file mode 100644 index 000000000..58be652a5 --- /dev/null +++ b/roles/openshift_aws_elb/meta/main.yml @@ -0,0 +1,12 @@ +--- +galaxy_info: +  author: OpenShift +  description: Openshift ELB provisioning +  company: Red Hat, Inc +  license: ASL 2.0 +  min_ansible_version: 1.2 +  platforms: +  - name: EL +    versions: +    - 7 +dependencies: [] diff --git a/roles/openshift_aws_elb/tasks/main.yml b/roles/openshift_aws_elb/tasks/main.yml new file mode 100644 index 000000000..64ec18545 --- /dev/null +++ b/roles/openshift_aws_elb/tasks/main.yml @@ -0,0 +1,57 @@ +--- +- name: fetch the default subnet id +  ec2_remote_facts: +    region: "{{ r_openshift_aws_elb_region }}" +    filters: "{{ r_openshift_aws_elb_instance_filter }}" +  register: instancesout + +- name: fetch the default subnet id +  ec2_vpc_subnet_facts: +    region: "{{ r_openshift_aws_elb_region }}" +    filters: +      "tag:Name": "{{ r_openshift_aws_elb_subnet_name }}" +  register: subnetout + +- name: +  debug: +    msg: "{{ r_openshift_aws_elb_listeners[r_openshift_aws_elb_type][r_openshift_aws_elb_direction] +                   if 'master' in r_openshift_aws_elb_type  or 'infra' in r_openshift_aws_elb_type +                   else r_openshift_aws_elb_listeners }}" + +- name: "Create ELB {{ r_openshift_aws_elb_name }}" +  ec2_elb_lb: +    name: "{{ r_openshift_aws_elb_name }}" +    state: present +    security_group_names: "{{ r_openshift_aws_elb_security_groups }}" +    idle_timeout: "{{ r_openshift_aws_elb_idle_timout }}" +    region: "{{ r_openshift_aws_elb_region }}" +    subnets: +    - "{{ subnetout.subnets[0].id }}" +    health_check: "{{ r_openshift_aws_elb_health_check }}" +    listeners: "{{ r_openshift_aws_elb_listeners[r_openshift_aws_elb_type][r_openshift_aws_elb_direction] +                   if 'master' in r_openshift_aws_elb_type  or 'infra' in r_openshift_aws_elb_type +                   else r_openshift_aws_elb_listeners }}" +    scheme: "{{ r_openshift_aws_elb_scheme }}" +    tags: +      KubernetesCluster: "{{ r_openshift_aws_elb_clusterid }}" +  register: new_elb + +# It is necessary to ignore_errors here because the instances are not in 'ready' +#  state when first added to ELB +- name: "Add instances to ELB {{ r_openshift_aws_elb_name }}" +  ec2_elb: +    instance_id: "{{ item.id }}" +    ec2_elbs: "{{ r_openshift_aws_elb_name }}" +    state: present +    region: "{{ r_openshift_aws_elb_region }}" +    wait: False +  with_items: "{{ instancesout.instances }}" +  ignore_errors: True +  retries: 10 +  register: elb_call +  until: elb_call|succeeded + +- debug: +    msg: "{{ item }}" +  with_items: +  - "{{ new_elb }}" diff --git a/roles/openshift_aws_iam_kms/README.md b/roles/openshift_aws_iam_kms/README.md new file mode 100644 index 000000000..9468e785c --- /dev/null +++ b/roles/openshift_aws_iam_kms/README.md @@ -0,0 +1,43 @@ +openshift_aws_iam_kms +========= + +Ansible role to create AWS IAM KMS keys for encryption + +Requirements +------------ + +Ansible Modules: + +oo_iam_kms + +Role Variables +-------------- + +- r_openshift_aws_iam_kms_region: AWS region to create KMS key +- r_openshift_aws_iam_kms_alias: Alias name to assign to created KMS key + +Dependencies +------------ + +lib_utils + +Example Playbook +---------------- +```yaml +- include_role: +    name: openshift_aws_iam_kms +  vars: +    r_openshift_aws_iam_kms_region: 'us-east-1' +    r_openshift_aws_iam_kms_alias: 'alias/clusterABC_kms' +``` + + +License +------- + +Apache 2.0 + +Author Information +------------------ + +Openshift diff --git a/roles/lib_utils/tasks/main.yml b/roles/openshift_aws_iam_kms/defaults/main.yml index ed97d539c..ed97d539c 100644 --- a/roles/lib_utils/tasks/main.yml +++ b/roles/openshift_aws_iam_kms/defaults/main.yml diff --git a/roles/openshift_aws_iam_kms/meta/main.yml b/roles/openshift_aws_iam_kms/meta/main.yml new file mode 100644 index 000000000..e29aaf96b --- /dev/null +++ b/roles/openshift_aws_iam_kms/meta/main.yml @@ -0,0 +1,13 @@ +--- +galaxy_info: +  author: OpenShift +  description: AWS IAM KMS setup and management +  company: Red Hat, Inc +  license: ASL 2.0 +  min_ansible_version: 1.2 +  platforms: +  - name: EL +    versions: +    - 7 +dependencies: +- lib_utils diff --git a/roles/openshift_aws_iam_kms/tasks/main.yml b/roles/openshift_aws_iam_kms/tasks/main.yml new file mode 100644 index 000000000..32aac2666 --- /dev/null +++ b/roles/openshift_aws_iam_kms/tasks/main.yml @@ -0,0 +1,18 @@ +--- +- fail: +    msg: "{{ item.name }} needs to be defined." +  when: item.cond | bool +  with_items: +  - name: "{{ r_openshift_aws_iam_kms_alias }}" +    cond: "{{ r_openshift_aws_iam_kms_alias is undefined }}" +  - name: "{{ r_openshift_aws_iam_kms_region }}" +    cond: "{{ r_openshift_aws_iam_kms_region is undefined }}" + +- name: Create IAM KMS key with alias +  oo_iam_kms: +    state: present +    alias: "{{ r_openshift_aws_iam_kms_alias }}" +    region: "{{ r_openshift_aws_iam_kms_region }}" +  register: created_kms + +- debug: var=created_kms.results diff --git a/roles/openshift_aws_launch_config/README.md b/roles/openshift_aws_launch_config/README.md new file mode 100644 index 000000000..52b7e83b6 --- /dev/null +++ b/roles/openshift_aws_launch_config/README.md @@ -0,0 +1,72 @@ +openshift_aws_launch_config +========= + +Ansible role to create an AWS launch config for a scale group. + +This includes the AMI, volumes, user_data, etc. + +Requirements +------------ + +Ansible Modules: + + +Role Variables +-------------- +- r_openshift_aws_launch_config_name: "{{ launch_config_name }}" +- r_openshift_aws_launch_config_clusterid: "{{ clusterid }}" +- r_openshift_aws_launch_config_region: "{{ region }}" +- r_openshift_aws_launch_config: "{{ node_group_config }}" +```yaml +    master: +      instance_type: m4.xlarge +      ami: ami-cdeec8b6  # if using an encrypted AMI this will be replaced +      volumes: +      - device_name: /dev/sdb +        volume_size: 100 +        device_type: gp2 +        delete_on_termination: False +      health_check: +        period: 60 +        type: EC2 +      min_size: 3 +      max_size: 3 +      desired_size: 3 +      tags: +        host-type: master +        sub-host-type: default +      wait_for_instances: True +``` +- r_openshift_aws_launch_config_type: compute +- r_openshift_aws_launch_config_custom_image: ami-xxxxx +- r_openshift_aws_launch_config_bootstrap_token: <string of kubeconfig> + +Dependencies +------------ + + +Example Playbook +---------------- +```yaml +  - name: create compute nodes config +    include_role: +      name: openshift_aws_launch_config +    vars: +      r_openshift_aws_launch_config_name: "{{ launch_config_name }}" +      r_openshift_aws_launch_config_clusterid: "{{ clusterid }}" +      r_openshift_aws_launch_config_region: "{{ region }}" +      r_openshift_aws_launch_config: "{{ node_group_config }}" +      r_openshift_aws_launch_config_type: compute +      r_openshift_aws_launch_config_custom_image: ami-1234 +      r_openshift_aws_launch_config_bootstrap_token: abcd +``` + +License +------- + +Apache 2.0 + +Author Information +------------------ + +Openshift diff --git a/roles/openshift_aws_launch_config/defaults/main.yml b/roles/openshift_aws_launch_config/defaults/main.yml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/roles/openshift_aws_launch_config/defaults/main.yml @@ -0,0 +1 @@ +--- diff --git a/roles/openshift_aws_launch_config/meta/main.yml b/roles/openshift_aws_launch_config/meta/main.yml new file mode 100644 index 000000000..e61670cc2 --- /dev/null +++ b/roles/openshift_aws_launch_config/meta/main.yml @@ -0,0 +1,12 @@ +--- +galaxy_info: +  author: OpenShift +  description: Openshift AWS VPC creation +  company: Red Hat, Inc +  license: ASL 2.0 +  min_ansible_version: 2.3 +  platforms: +  - name: EL +    versions: +    - 7 +dependencies: [] diff --git a/roles/openshift_aws_launch_config/tasks/main.yml b/roles/openshift_aws_launch_config/tasks/main.yml new file mode 100644 index 000000000..437cf1f71 --- /dev/null +++ b/roles/openshift_aws_launch_config/tasks/main.yml @@ -0,0 +1,50 @@ +--- +- name: fail when params are not set +  fail: +    msg: Please specify the role parameters. +  when: +  - r_openshift_aws_launch_config_cluseterid is undefined +  - r_openshift_aws_launch_config_type is undefined +  - r_openshift_aws_launch_config_region is undefined +  - r_openshift_aws_launch_config is undefined + +- name: fetch the security groups for launch config +  ec2_group_facts: +    filters: +      group-name: +      - "{{ r_openshift_aws_launch_config_clusterid }}"  # default sg +      - "{{ r_openshift_aws_launch_config_clusterid }}_{{ r_openshift_aws_launch_config_type }}"  # node type sg +      - "{{ r_openshift_aws_launch_config_clusterid }}_{{ r_openshift_aws_launch_config_type }}_k8s"  # node type sg k8s +    region: "{{ r_openshift_aws_launch_config_region }}" +  register: ec2sgs + +# Create the scale group config +- name: Create the node scale group config +  ec2_lc: +    name: "{{ r_openshift_aws_launch_config_name }}" +    region: "{{ r_openshift_aws_launch_config_region }}" +    image_id: "{{ r_openshift_aws_launch_config_custom_image if 'ami-' in r_openshift_aws_launch_config_custom_image else r_openshift_aws_launch_config[r_openshift_aws_launch_config_type].ami }}" +    instance_type: "{{ r_openshift_aws_launch_config[r_openshift_aws_launch_config_type].instance_type }}" +    security_groups: "{{ ec2sgs.security_groups | map(attribute='group_id')| list }}" +    user_data: |- +      #cloud-config +      {%  if r_openshift_aws_launch_config_type != 'master' %} +      write_files: +      - path: /root/csr_kubeconfig +        owner: root:root +        permissions: '0640' +        content: {{ r_openshift_aws_launch_config_bootstrap_token | default('') | to_yaml }} +      - path: /root/openshift_settings +        owner: root:root +        permissions: '0640' +        content: +          openshift_type: "{{ r_openshift_aws_launch_config_type }}" +      runcmd: +      - [ systemctl, enable, atomic-openshift-node] +      - [ systemctl, start, atomic-openshift-node] +      {% endif %} +    key_name: "{{ r_openshift_aws_launch_config.ssh_key_name }}" +    ebs_optimized: False +    volumes: "{{ r_openshift_aws_launch_config[r_openshift_aws_launch_config_type].volumes }}" +    assign_public_ip: True +  register: test diff --git a/roles/openshift_aws_launch_config/templates/cloud-init.j2 b/roles/openshift_aws_launch_config/templates/cloud-init.j2 new file mode 100644 index 000000000..1a1e29550 --- /dev/null +++ b/roles/openshift_aws_launch_config/templates/cloud-init.j2 @@ -0,0 +1,9 @@ +{% if r_openshift_aws_launch_config_bootstrap_token is defined and r_openshift_aws_launch_config_bootstrap_token is not '' %} +#cloud-config +write_files: +- path: /root/csr_kubeconfig +  owner: root:root +  permissions: '0640' +  content: |- +  {{ r_openshift_aws_launch_config_bootstrap_token }} +{% endif %} diff --git a/roles/openshift_aws_node_group/README.md b/roles/openshift_aws_node_group/README.md new file mode 100644 index 000000000..c32c57bc5 --- /dev/null +++ b/roles/openshift_aws_node_group/README.md @@ -0,0 +1,77 @@ +openshift_aws_node_group +========= + +Ansible role to create an aws node group. + +This includes the security group, launch config, and scale group. + +Requirements +------------ + +Ansible Modules: + + +Role Variables +-------------- +```yaml +- r_openshift_aws_node_group_name: myscalegroup +- r_openshift_aws_node_group_clusterid: myclusterid +- r_openshift_aws_node_group_region: us-east-1 +- r_openshift_aws_node_group_lc_name: launch_config +- r_openshift_aws_node_group_type: master|infra|compute +- r_openshift_aws_node_group_config: "{{ node_group_config }}" +```yaml +master: +  instance_type: m4.xlarge +  ami: ami-cdeec8b6  # if using an encrypted AMI this will be replaced +  volumes: +  - device_name: /dev/sdb +    volume_size: 100 +    device_type: gp2 +    delete_on_termination: False +  health_check: +    period: 60 +    type: EC2 +  min_size: 3 +  max_size: 3 +  desired_size: 3 +  tags: +    host-type: master +    sub-host-type: default +  wait_for_instances: True +``` +- r_openshift_aws_node_group_subnet_name: "{{ subnet_name }}" + +```yaml +us-east-1a  # name of subnet +``` + +Dependencies +------------ + + +Example Playbook +---------------- +```yaml +  - name: "create {{ openshift_build_node_type }} node groups" +    include_role: +      name: openshift_aws_node_group +    vars: +      r_openshift_aws_node_group_name: "{{ clusterid }} openshift compute" +      r_openshift_aws_node_group_lc_name: "{{ launch_config_name }}" +      r_openshift_aws_node_group_clusterid: "{{ clusterid }}" +      r_openshift_aws_node_group_region: "{{ region }}" +      r_openshift_aws_node_group_config: "{{ node_group_config }}" +      r_openshift_aws_node_group_type: compute +      r_openshift_aws_node_group_subnet_name: "{{ subnet_name }}" +``` + +License +------- + +Apache 2.0 + +Author Information +------------------ + +Openshift diff --git a/roles/openshift_aws_node_group/defaults/main.yml b/roles/openshift_aws_node_group/defaults/main.yml new file mode 100644 index 000000000..44c5116a1 --- /dev/null +++ b/roles/openshift_aws_node_group/defaults/main.yml @@ -0,0 +1,58 @@ +--- +r_openshift_aws_node_group_type: master + +r_openshift_aws_node_group_config: +  tags: +    clusterid: "{{ r_openshift_aws_node_group_clusterid }}" +  master: +    instance_type: m4.xlarge +    ami: "{{ r_openshift_aws_node_group_ami }}" +    volumes: +    - device_name: /dev/sdb +      volume_size: 100 +      device_type: gp2 +      delete_on_termination: False +    health_check: +      period: 60 +      type: EC2 +    min_size: 3 +    max_size: 3 +    desired_size: 3 +    tags: +      host-type: master +      sub-host-type: default +    wait_for_instances: True +  compute: +    instance_type: m4.xlarge +    ami: "{{ r_openshift_aws_node_group_ami }}" +    volumes: +    - device_name: /dev/sdb +      volume_size: 100 +      device_type: gp2 +      delete_on_termination: True +    health_check: +      period: 60 +      type: EC2 +    min_size: 3 +    max_size: 100 +    desired_size: 3 +    tags: +      host-type: node +      sub-host-type: compute +  infra: +    instance_type: m4.xlarge +    ami: "{{ r_openshift_aws_node_group_ami }}" +    volumes: +    - device_name: /dev/sdb +      volume_size: 100 +      device_type: gp2 +      delete_on_termination: True +    health_check: +      period: 60 +      type: EC2 +    min_size: 2 +    max_size: 20 +    desired_size: 2 +    tags: +      host-type: node +      sub-host-type: infra diff --git a/roles/openshift_aws_node_group/tasks/main.yml b/roles/openshift_aws_node_group/tasks/main.yml new file mode 100644 index 000000000..6f5364b03 --- /dev/null +++ b/roles/openshift_aws_node_group/tasks/main.yml @@ -0,0 +1,32 @@ +--- +- name: validate role inputs +  fail: +    msg: Please pass in the required role variables +  when: +  - r_openshift_aws_node_group_clusterid is not defined +  - r_openshift_aws_node_group_region is not defined +  - r_openshift_aws_node_group_subnet_name is not defined + +- name: fetch the subnet to use in scale group +  ec2_vpc_subnet_facts: +    region: "{{ r_openshift_aws_node_group_region }}" +    filters: +      "tag:Name": "{{ r_openshift_aws_node_group_subnet_name }}" +  register: subnetout + +- name: Create the scale group +  ec2_asg: +    name: "{{ r_openshift_aws_node_group_name }}" +    launch_config_name: "{{ r_openshift_aws_node_group_lc_name }}" +    health_check_period: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].health_check.period }}" +    health_check_type: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].health_check.type }}" +    min_size: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].min_size }}" +    max_size: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].max_size }}" +    desired_capacity: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].desired_size }}" +    region: "{{ r_openshift_aws_node_group_region }}" +    termination_policies: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].termination_policy if 'termination_policy' in  r_openshift_aws_node_group_config[r_openshift_aws_node_group_type] else omit }}" +    load_balancers: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].elbs if 'elbs' in r_openshift_aws_node_group_config[r_openshift_aws_node_group_type] else omit }}" +    wait_for_instances: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].wait_for_instances | default(False)}}" +    vpc_zone_identifier: "{{ subnetout.subnets[0].id }}" +    tags: +    - "{{ r_openshift_aws_node_group_config.tags | combine(r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].tags) }}" diff --git a/roles/openshift_aws_s3/README.md b/roles/openshift_aws_s3/README.md new file mode 100644 index 000000000..afafe61cf --- /dev/null +++ b/roles/openshift_aws_s3/README.md @@ -0,0 +1,43 @@ +openshift_aws_s3 +========= + +Ansible role to create an s3 bucket + +Requirements +------------ + +Ansible Modules: + + +Role Variables +-------------- + +- r_openshift_aws_s3_clusterid: myclusterid +- r_openshift_aws_s3_region: us-east-1 +- r_openshift_aws_s3_mode:  create|delete + +Dependencies +------------ + + +Example Playbook +---------------- +```yaml +- name: create an s3 bucket +  include_role: +    name: openshift_aws_s3 +  vars: +    r_openshift_aws_s3_clusterid: mycluster +    r_openshift_aws_s3_region: us-east-1 +    r_openshift_aws_s3_mode: create +``` + +License +------- + +Apache 2.0 + +Author Information +------------------ + +Openshift diff --git a/roles/openshift_aws_s3/tasks/main.yml b/roles/openshift_aws_s3/tasks/main.yml new file mode 100644 index 000000000..46bd781bd --- /dev/null +++ b/roles/openshift_aws_s3/tasks/main.yml @@ -0,0 +1,6 @@ +--- +- name: Create an s3 bucket +  s3: +    bucket: "{{ r_openshift_aws_s3_clusterid }}" +    mode: "{{ r_openshift_aws_s3_mode }}" +    region: "{{ r_openshift_aws_s3_region }}" diff --git a/roles/openshift_aws_sg/README.md b/roles/openshift_aws_sg/README.md new file mode 100644 index 000000000..eeb76bbb6 --- /dev/null +++ b/roles/openshift_aws_sg/README.md @@ -0,0 +1,59 @@ +openshift_aws_sg +========= + +Ansible role to create an aws security groups + +Requirements +------------ + +Ansible Modules: + + +Role Variables +-------------- + +- r_openshift_aws_sg_clusterid: myclusterid +- r_openshift_aws_sg_region: us-east-1 +- r_openshift_aws_sg_type: master|infra|compute +```yaml +# defaults/main.yml +  default: +    name: "{{ r_openshift_aws_sg_clusterid }}" +    desc: "{{ r_openshift_aws_sg_clusterid }} default" +    rules: +    - proto: tcp +      from_port: 22 +      to_port: 22 +      cidr_ip: 0.0.0.0/0 +    - proto: all +      from_port: all +      to_port: all +      group_name: "{{ r_openshift_aws_sg_clusterid }}" +``` + + +Dependencies +------------ + + +Example Playbook +---------------- +```yaml +- name: create security groups for master +  include_role: +    name: openshift_aws_sg +  vars: +    r_openshift_aws_sg_clusterid: mycluster +    r_openshift_aws_sg_region: us-east-1 +    r_openshift_aws_sg_type: master +``` + +License +------- + +Apache 2.0 + +Author Information +------------------ + +Openshift diff --git a/roles/openshift_aws_sg/defaults/main.yml b/roles/openshift_aws_sg/defaults/main.yml new file mode 100644 index 000000000..9c480d337 --- /dev/null +++ b/roles/openshift_aws_sg/defaults/main.yml @@ -0,0 +1,48 @@ +--- +r_openshift_aws_sg_sg: +  default: +    name: "{{ r_openshift_aws_sg_clusterid }}" +    desc: "{{ r_openshift_aws_sg_clusterid }} default" +    rules: +    - proto: tcp +      from_port: 22 +      to_port: 22 +      cidr_ip: 0.0.0.0/0 +    - proto: all +      from_port: all +      to_port: all +      group_name: "{{ r_openshift_aws_sg_clusterid }}" +  master: +    name: "{{ r_openshift_aws_sg_clusterid }}_master" +    desc: "{{ r_openshift_aws_sg_clusterid }} master instances" +    rules: +    - proto: tcp +      from_port: 80 +      to_port: 80 +      cidr_ip: 0.0.0.0/0 +    - proto: tcp +      from_port: 443 +      to_port: 443 +      cidr_ip: 0.0.0.0/0 +  compute: +    name: "{{ r_openshift_aws_sg_clusterid }}_compute" +    desc: "{{ r_openshift_aws_sg_clusterid }} compute node instances" +  infra: +    name: "{{ r_openshift_aws_sg_clusterid }}_infra" +    desc: "{{ r_openshift_aws_sg_clusterid }} infra node instances" +    rules: +    - proto: tcp +      from_port: 80 +      to_port: 80 +      cidr_ip: 0.0.0.0/0 +    - proto: tcp +      from_port: 443 +      to_port: 443 +      cidr_ip: 0.0.0.0/0 +    - proto: tcp +      from_port: 30000 +      to_port: 32000 +      cidr_ip: 0.0.0.0/0 +  etcd: +    name: "{{ r_openshift_aws_sg_clusterid }}_etcd" +    desc: "{{ r_openshift_aws_sg_clusterid }} etcd instances" diff --git a/roles/openshift_aws_sg/tasks/main.yml b/roles/openshift_aws_sg/tasks/main.yml new file mode 100644 index 000000000..2294fdcc9 --- /dev/null +++ b/roles/openshift_aws_sg/tasks/main.yml @@ -0,0 +1,53 @@ +--- +- name: Validate role inputs +  fail: +    msg: Please ensure to pass the correct variables +  when: +  - r_openshift_aws_sg_region is undefined +  - r_openshift_aws_sg_region is undefined + + +- name: Fetch the VPC for vpc.id +  ec2_vpc_net_facts: +    region: "{{ r_openshift_aws_sg_region }}" +    filters: +      "tag:Name": "{{ r_openshift_aws_sg_clusterid }}" +  register: vpcout + +- name: Create default security group for cluster +  ec2_group: +    name: "{{ r_openshift_aws_sg_sg.default.name }}" +    description: "{{ r_openshift_aws_sg_sg.default.desc }}" +    region: "{{ r_openshift_aws_sg_region }}" +    vpc_id: "{{ vpcout.vpcs[0].id }}" +    rules: "{{ r_openshift_aws_sg_sg.default.rules | default(omit, True)}}" +  register: sg_default_created + +- name: create the node group sgs +  ec2_group: +    name: "{{ item.name}}" +    description: "{{ item.desc }}" +    rules: "{{ item.rules if 'rules' in item else [] }}" +    region: "{{ r_openshift_aws_sg_region }}" +    vpc_id: "{{ vpcout.vpcs[0].id }}" +  register: sg_create +  with_items: +  - "{{ r_openshift_aws_sg_sg[r_openshift_aws_sg_type]}}" + +- name: create the k8s sgs for the node group +  ec2_group: +    name: "{{ item.name }}_k8s" +    description: "{{ item.desc }} for k8s" +    region: "{{ r_openshift_aws_sg_region }}" +    vpc_id: "{{ vpcout.vpcs[0].id }}" +  register: k8s_sg_create +  with_items: +  - "{{ r_openshift_aws_sg_sg[r_openshift_aws_sg_type] }}" + +- name: tag sg groups with proper tags +  ec2_tag: +    tags: +      KubernetesCluster: "{{ r_openshift_aws_sg_clusterid }}" +    resource: "{{ item.group_id }}" +    region: "{{ r_openshift_aws_sg_region }}" +  with_items: "{{ k8s_sg_create.results }}" diff --git a/roles/openshift_aws_ssh_keys/README.md b/roles/openshift_aws_ssh_keys/README.md new file mode 100644 index 000000000..4f8667918 --- /dev/null +++ b/roles/openshift_aws_ssh_keys/README.md @@ -0,0 +1,49 @@ +openshift_aws_ssh_keys +========= + +Ansible role for sshind SSH keys + +Requirements +------------ + +Ansible Modules: + + +Role Variables +-------------- + +- r_openshift_aws_ssh_keys_users: list of dicts of users +- r_openshift_aws_ssh_keys_region: ec2_region to install the keys + +Dependencies +------------ + + +Example Playbook +---------------- +```yaml +users: +- username: user1 +  pub_key: <user1 ssh public key> +- username: user2 +  pub_key: <user2 ssh public key> + +region: us-east-1 + +- include_role: +    name: openshift_aws_ssh_keys +  vars: +    r_openshift_aws_ssh_keys_users: "{{ users }}" +    r_openshift_aws_ssh_keys_region: "{{ region }}" +``` + + +License +------- + +Apache 2.0 + +Author Information +------------------ + +Openshift diff --git a/roles/openshift_aws_ssh_keys/tasks/main.yml b/roles/openshift_aws_ssh_keys/tasks/main.yml new file mode 100644 index 000000000..232cf20ed --- /dev/null +++ b/roles/openshift_aws_ssh_keys/tasks/main.yml @@ -0,0 +1,8 @@ +--- +- name: Add the public keys for the users +  ec2_key: +    name: "{{ item.key_name }}" +    key_material: "{{ item.pub_key }}" +    region: "{{ r_openshift_aws_ssh_keys_region }}" +  with_items: "{{ r_openshift_aws_ssh_keys_users }}" +  no_log: True diff --git a/roles/openshift_aws_vpc/README.md b/roles/openshift_aws_vpc/README.md new file mode 100644 index 000000000..d88cf0581 --- /dev/null +++ b/roles/openshift_aws_vpc/README.md @@ -0,0 +1,62 @@ +openshift_aws_vpc +========= + +Ansible role to create a default AWS VPC + +Requirements +------------ + +Ansible Modules: + + +Role Variables +-------------- + +- r_openshift_aws_vpc_clusterid: "{{ clusterid }}" +- r_openshift_aws_vpc_cidr: 172.31.48.0/20 +- r_openshift_aws_vpc_subnets: "{{ subnets }}" +```yaml +    subnets: +      us-east-1:  # These are us-east-1 region defaults. Ensure this matches your region +      - cidr: 172.31.48.0/20 +        az: "us-east-1c" +      - cidr: 172.31.32.0/20 +        az: "us-east-1e" +      - cidr: 172.31.16.0/20 +        az: "us-east-1a" +``` +- r_openshift_aws_vpc_region: "{{ region }}" +- r_openshift_aws_vpc_tags: dict of tags to apply to vpc +- r_openshift_aws_vpc_name: "{{ vpc_name | default(clusterid) }}" + +Dependencies +------------ + + +Example Playbook +---------------- + +```yaml +  - name: create default vpc +    include_role: +      name: openshift_aws_vpc +    vars: +      r_openshift_aws_vpc_clusterid: mycluster +      r_openshift_aws_vpc_cidr: 172.31.48.0/20 +      r_openshift_aws_vpc_subnets: "{{ subnets }}" +      r_openshift_aws_vpc_region: us-east-1 +      r_openshift_aws_vpc_tags: {} +      r_openshift_aws_vpc_name: mycluster + +``` + + +License +------- + +Apache 2.0 + +Author Information +------------------ + +Openshift diff --git a/roles/openshift_aws_vpc/defaults/main.yml b/roles/openshift_aws_vpc/defaults/main.yml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/roles/openshift_aws_vpc/defaults/main.yml @@ -0,0 +1 @@ +--- diff --git a/roles/openshift_aws_vpc/tasks/main.yml b/roles/openshift_aws_vpc/tasks/main.yml new file mode 100644 index 000000000..cfe08dae5 --- /dev/null +++ b/roles/openshift_aws_vpc/tasks/main.yml @@ -0,0 +1,53 @@ +--- +- name: Create AWS VPC +  ec2_vpc_net: +    state: present +    cidr_block: "{{ r_openshift_aws_vpc_cidr }}" +    dns_support: True +    dns_hostnames: True +    region: "{{ r_openshift_aws_vpc_region }}" +    name: "{{ r_openshift_aws_vpc_clusterid }}" +    tags: +      Name: "{{ r_openshift_aws_vpc_clusterid }}" +  register: vpc + +- name: Sleep to avoid a race condition when creating the vpc +  pause: +    seconds: 5 +  when: vpc.changed + +- name: assign the vpc igw +  ec2_vpc_igw: +    region: "{{ r_openshift_aws_vpc_region }}" +    vpc_id: "{{ vpc.vpc.id }}" +  register: igw + +- name: assign the vpc subnets +  ec2_vpc_subnet: +    region: "{{ r_openshift_aws_vpc_region }}" +    vpc_id: "{{ vpc.vpc.id }}" +    cidr: "{{ item.cidr }}" +    az: "{{ item.az }}" +    resource_tags: +      Name: "{{ item.az }}" +  with_items: "{{ r_openshift_aws_vpc_subnets[r_openshift_aws_vpc_region] }}" + +- name: Grab the route tables from our VPC +  ec2_vpc_route_table_facts: +    region: "{{ r_openshift_aws_vpc_region }}" +    filters: +      vpc-id: "{{ vpc.vpc.id }}" +  register: route_table + +- name: update the route table in the vpc +  ec2_vpc_route_table: +    lookup: id +    route_table_id: "{{ route_table.route_tables[0].id }}" +    vpc_id: "{{ vpc.vpc.id }}" +    region: "{{ r_openshift_aws_vpc_region }}" +    tags: +      Name: "{{ r_openshift_aws_vpc_name }}" +    routes: +    - dest: 0.0.0.0/0 +      gateway_id: igw +  register: route_table_out diff --git a/roles/openshift_cli/meta/main.yml b/roles/openshift_cli/meta/main.yml index c1de367d9..04a1ce873 100644 --- a/roles/openshift_cli/meta/main.yml +++ b/roles/openshift_cli/meta/main.yml @@ -15,4 +15,4 @@ dependencies:  - role: openshift_docker    when: not skip_docker_role | default(False) | bool  - role: openshift_common -- role: openshift_cli_facts +- role: openshift_facts diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml index c716a0860..9e61805f9 100644 --- a/roles/openshift_cli/tasks/main.yml +++ b/roles/openshift_cli/tasks/main.yml @@ -1,6 +1,6 @@  ---  - set_fact: -    l_use_crio: "{{ openshift_docker_use_crio | default(false) }}" +    l_use_crio: "{{ openshift_use_crio | default(false) }}"  - name: Install clients    package: name={{ openshift.common.service_type }}-clients state=present diff --git a/roles/openshift_cli_facts/meta/main.yml b/roles/openshift_cli_facts/meta/main.yml deleted file mode 100644 index 59acde215..000000000 --- a/roles/openshift_cli_facts/meta/main.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -galaxy_info: -  author: Jason DeTiberus -  description: OpenShift CLI Facts -  company: Red Hat, Inc. -  license: Apache License, Version 2.0 -  min_ansible_version: 1.9 -  platforms: -  - name: EL -    versions: -    - 7 -  categories: -  - cloud -dependencies: -- role: openshift_facts diff --git a/roles/openshift_cli_facts/tasks/main.yml b/roles/openshift_cli_facts/tasks/main.yml deleted file mode 100644 index dd1ed8965..000000000 --- a/roles/openshift_cli_facts/tasks/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -# TODO: move this to a new 'cli' role -- openshift_facts: -    role: common -    local_facts: -      cli_image: "{{ osm_image | default(None) }}" diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml index 51313a258..a0bd6c860 100644 --- a/roles/openshift_common/tasks/main.yml +++ b/roles/openshift_common/tasks/main.yml @@ -40,8 +40,8 @@    when: openshift_use_calico | default(false) | bool and openshift_use_contiv | default(false) | bool  - fail: -    msg: openshift_hostname must be 64 characters or less -  when: openshift_hostname is defined and openshift_hostname | length > 64 +    msg: openshift_hostname must be 63 characters or less +  when: openshift_hostname is defined and openshift_hostname | length > 63  - name: Set common Cluster facts    openshift_facts: diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml index 516d7dc29..334150f63 100644 --- a/roles/openshift_docker_facts/tasks/main.yml +++ b/roles/openshift_docker_facts/tasks/main.yml @@ -17,7 +17,7 @@        hosted_registry_insecure: "{{ openshift_docker_hosted_registry_insecure | default(openshift.docker.hosted_registry_insecure | default(False)) }}"        hosted_registry_network: "{{ openshift_docker_hosted_registry_network | default(None) }}"        use_system_container: "{{ openshift_docker_use_system_container | default(False) }}" -      use_crio: "{{ openshift_docker_use_crio | default(False) }}" +      use_crio: "{{ openshift_use_crio | default(False) }}"    - role: node      local_facts:        sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh index 3a8ce55c4..ca3f219d8 100755 --- a/roles/openshift_examples/examples-sync.sh +++ b/roles/openshift_examples/examples-sync.sh @@ -40,5 +40,6 @@ popd  wget https://raw.githubusercontent.com/redhat-developer/s2i-dotnetcore/master/dotnet_imagestreams.json         -O ${EXAMPLES_BASE}/image-streams/dotnet_imagestreams.json  wget https://raw.githubusercontent.com/redhat-developer/s2i-dotnetcore/master/templates/dotnet-example.json           -O ${EXAMPLES_BASE}/quickstart-templates/dotnet-example.json  wget https://raw.githubusercontent.com/redhat-developer/s2i-dotnetcore/master/templates/dotnet-pgsql-persistent.json    -O ${EXAMPLES_BASE}/quickstart-templates/dotnet-pgsql-persistent.json +wget https://raw.githubusercontent.com/redhat-developer/s2i-dotnetcore/master/templates/dotnet-runtime-example.json    -O ${EXAMPLES_BASE}/quickstart-templates/dotnet-runtime-example.json  git diff files/examples diff --git a/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-template.yaml b/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-template.yaml index 3bc6c5813..fd57a864c 100644 --- a/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-template.yaml +++ b/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-template.yaml @@ -10,6 +10,12 @@ metadata:      iconClass: "icon-rails"  objects:  - apiVersion: v1 +  kind: Secret +  metadata: +    name: "${NAME}-secrets" +  stringData: +    pg-password: "${DATABASE_PASSWORD}" +- apiVersion: v1    kind: Service    metadata:      annotations: @@ -148,7 +154,10 @@ objects:                value: "${DATABASE_USER}"              -                name: "POSTGRESQL_PASSWORD" -              value: "${DATABASE_PASSWORD}" +              valueFrom: +                secretKeyRef: +                  name: "${NAME}-secrets" +                  key: "pg-password"              -                name: "POSTGRESQL_DATABASE"                value: "${DATABASE_NAME}" @@ -345,7 +354,10 @@ objects:                  value: "${DATABASE_USER}"                -                  name: "POSTGRESQL_PASSWORD" -                value: "${DATABASE_PASSWORD}" +                valueFrom: +                  secretKeyRef: +                    name: "${NAME}-secrets" +                    key: "pg-password"                -                  name: "POSTGRESQL_DATABASE"                  value: "${DATABASE_NAME}" @@ -386,7 +398,8 @@ parameters:      displayName: "PostgreSQL Password"      required: true      description: "Password for the PostgreSQL user." -    value: "smartvm" +    from: "[a-zA-Z0-9]{8}" +    generate: expression    -      name: "DATABASE_NAME"      required: true diff --git a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml index 3bc6c5813..fd57a864c 100644 --- a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml +++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml @@ -10,6 +10,12 @@ metadata:      iconClass: "icon-rails"  objects:  - apiVersion: v1 +  kind: Secret +  metadata: +    name: "${NAME}-secrets" +  stringData: +    pg-password: "${DATABASE_PASSWORD}" +- apiVersion: v1    kind: Service    metadata:      annotations: @@ -148,7 +154,10 @@ objects:                value: "${DATABASE_USER}"              -                name: "POSTGRESQL_PASSWORD" -              value: "${DATABASE_PASSWORD}" +              valueFrom: +                secretKeyRef: +                  name: "${NAME}-secrets" +                  key: "pg-password"              -                name: "POSTGRESQL_DATABASE"                value: "${DATABASE_NAME}" @@ -345,7 +354,10 @@ objects:                  value: "${DATABASE_USER}"                -                  name: "POSTGRESQL_PASSWORD" -                value: "${DATABASE_PASSWORD}" +                valueFrom: +                  secretKeyRef: +                    name: "${NAME}-secrets" +                    key: "pg-password"                -                  name: "POSTGRESQL_DATABASE"                  value: "${DATABASE_NAME}" @@ -386,7 +398,8 @@ parameters:      displayName: "PostgreSQL Password"      required: true      description: "Password for the PostgreSQL user." -    value: "smartvm" +    from: "[a-zA-Z0-9]{8}" +    generate: expression    -      name: "DATABASE_NAME"      required: true diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/OWNERS b/roles/openshift_examples/files/examples/v3.6/db-templates/OWNERS new file mode 100644 index 000000000..cbdc20f41 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/OWNERS @@ -0,0 +1,12 @@ +reviewers: +  - bparees +  - gabemontero +  - mfojtik +  - dinhxuanvu +  - jim-minter +  - spadgett +approvers: +  - bparees +  - mfojtik +  - spadgett +  - jupierce diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-ephemeral-template.json index 536f7275e..6500ed0d3 100644 --- a/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-ephemeral-template.json @@ -27,13 +27,15 @@          "annotations": {            "template.openshift.io/expose-username": "{.data['database-user']}",            "template.openshift.io/expose-password": "{.data['database-password']}", -          "template.openshift.io/expose-root_password": "{.data['database-root-password']}" +          "template.openshift.io/expose-root_password": "{.data['database-root-password']}", +          "template.openshift.io/expose-database_name": "{.data['database-name']}"          }        },        "stringData" : {          "database-user" : "${MYSQL_USER}",          "database-password" : "${MYSQL_PASSWORD}", -        "database-root-password" : "${MYSQL_ROOT_PASSWORD}" +        "database-root-password" : "${MYSQL_ROOT_PASSWORD}", +        "database-name" : "${MYSQL_DATABASE}"        }      },      { @@ -61,7 +63,10 @@        "kind": "DeploymentConfig",        "apiVersion": "v1",        "metadata": { -        "name": "${DATABASE_SERVICE_NAME}" +        "name": "${DATABASE_SERVICE_NAME}", +        "annotations": { +          "template.alpha.openshift.io/wait-for-ready": "true" +        }        },        "spec": {          "strategy": { @@ -151,7 +156,12 @@                    },                    {                      "name": "MYSQL_DATABASE", -                    "value": "${MYSQL_DATABASE}" +                    "valueFrom": { +                      "secretKeyRef" : { +                        "name" : "${DATABASE_SERVICE_NAME}", +                        "key" : "database-name" +                      } +                    }                    }                  ],                  "resources": { diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-persistent-template.json index 3b7fdccce..4378fa4a0 100644 --- a/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-persistent-template.json @@ -27,13 +27,15 @@          "annotations": {            "template.openshift.io/expose-username": "{.data['database-user']}",            "template.openshift.io/expose-password": "{.data['database-password']}", -          "template.openshift.io/expose-root_password": "{.data['database-root-password']}" +          "template.openshift.io/expose-root_password": "{.data['database-root-password']}", +          "template.openshift.io/expose-database_name": "{.data['database-name']}"          }        },        "stringData" : {          "database-user" : "${MYSQL_USER}",          "database-password" : "${MYSQL_PASSWORD}", -        "database-root-password" : "${MYSQL_ROOT_PASSWORD}" +        "database-root-password" : "${MYSQL_ROOT_PASSWORD}", +        "database-name" : "${MYSQL_DATABASE}"        }      },      { @@ -78,7 +80,10 @@        "kind": "DeploymentConfig",        "apiVersion": "v1",        "metadata": { -        "name": "${DATABASE_SERVICE_NAME}" +        "name": "${DATABASE_SERVICE_NAME}", +        "annotations": { +          "template.alpha.openshift.io/wait-for-ready": "true" +        }        },        "spec": {          "strategy": { @@ -168,7 +173,12 @@                    },                    {                      "name": "MYSQL_DATABASE", -                    "value": "${MYSQL_DATABASE}" +                    "valueFrom": { +                      "secretKeyRef" : { +                        "name" : "${DATABASE_SERVICE_NAME}", +                        "key" : "database-name" +                      } +                    }                    }                  ],                  "resources": { diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-ephemeral-template.json index ee274194f..7271a2c69 100644 --- a/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-ephemeral-template.json @@ -3,7 +3,6 @@    "apiVersion": "v1",    "metadata": {      "name": "mongodb-ephemeral", -    "creationTimestamp": null,      "annotations": {        "openshift.io/display-name": "MongoDB (Ephemeral)",        "description": "MongoDB database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", @@ -28,13 +27,15 @@          "annotations": {            "template.openshift.io/expose-username": "{.data['database-user']}",            "template.openshift.io/expose-password": "{.data['database-password']}", -          "template.openshift.io/expose-admin_password": "{.data['database-admin-password']}" +          "template.openshift.io/expose-admin_password": "{.data['database-admin-password']}", +          "template.openshift.io/expose-database_name": "{.data['database-name']}"          }        },        "stringData" : {          "database-user" : "${MONGODB_USER}",          "database-password" : "${MONGODB_PASSWORD}", -        "database-admin-password" : "${MONGODB_ADMIN_PASSWORD}" +        "database-admin-password" : "${MONGODB_ADMIN_PASSWORD}", +        "database-name" : "${MONGODB_DATABASE}"        }      },      { @@ -42,7 +43,6 @@        "apiVersion": "v1",        "metadata": {          "name": "${DATABASE_SERVICE_NAME}", -        "creationTimestamp": null,          "annotations": {            "template.openshift.io/expose-uri": "mongodb://{.spec.clusterIP}:{.spec.ports[?(.name==\"mongo\")].port}"          } @@ -72,7 +72,9 @@        "apiVersion": "v1",        "metadata": {          "name": "${DATABASE_SERVICE_NAME}", -        "creationTimestamp": null +        "annotations": { +          "template.alpha.openshift.io/wait-for-ready": "true" +        }        },        "spec": {          "strategy": { @@ -104,7 +106,6 @@          },          "template": {            "metadata": { -            "creationTimestamp": null,              "labels": {                "name": "${DATABASE_SERVICE_NAME}"              } @@ -164,7 +165,12 @@                    },                    {                      "name": "MONGODB_DATABASE", -                    "value": "${MONGODB_DATABASE}" +                    "valueFrom": { +                      "secretKeyRef" : { +                        "name" : "${DATABASE_SERVICE_NAME}", +                        "key" : "database-name" +                      } +                    }                    }                  ],                  "resources": { diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-persistent-template.json index e5ba43669..d70d2263f 100644 --- a/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-persistent-template.json @@ -3,7 +3,6 @@    "apiVersion": "v1",    "metadata": {      "name": "mongodb-persistent", -    "creationTimestamp": null,      "annotations": {        "openshift.io/display-name": "MongoDB (Persistent)",        "description": "MongoDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.", @@ -28,13 +27,15 @@          "annotations": {            "template.openshift.io/expose-username": "{.data['database-user']}",            "template.openshift.io/expose-password": "{.data['database-password']}", -          "template.openshift.io/expose-admin_password": "{.data['database-admin-password']}" +          "template.openshift.io/expose-admin_password": "{.data['database-admin-password']}", +          "template.openshift.io/expose-database_name": "{.data['database-name']}"          }        },        "stringData" : {          "database-user" : "${MONGODB_USER}",          "database-password" : "${MONGODB_PASSWORD}", -        "database-admin-password" : "${MONGODB_ADMIN_PASSWORD}" +        "database-admin-password" : "${MONGODB_ADMIN_PASSWORD}", +        "database-name" : "${MONGODB_DATABASE}"        }      },      { @@ -42,7 +43,6 @@        "apiVersion": "v1",        "metadata": {          "name": "${DATABASE_SERVICE_NAME}", -        "creationTimestamp": null,          "annotations": {            "template.openshift.io/expose-uri": "mongodb://{.spec.clusterIP}:{.spec.ports[?(.name==\"mongo\")].port}"          } @@ -89,7 +89,9 @@        "apiVersion": "v1",        "metadata": {          "name": "${DATABASE_SERVICE_NAME}", -        "creationTimestamp": null +        "annotations": { +          "template.alpha.openshift.io/wait-for-ready": "true" +        }        },        "spec": {          "strategy": { @@ -121,7 +123,6 @@          },          "template": {            "metadata": { -            "creationTimestamp": null,              "labels": {                "name": "${DATABASE_SERVICE_NAME}"              } @@ -181,7 +182,12 @@                    },                    {                      "name": "MONGODB_DATABASE", -                    "value": "${MONGODB_DATABASE}" +                    "valueFrom": { +                      "secretKeyRef" : { +                        "name" : "${DATABASE_SERVICE_NAME}", +                        "key" : "database-name" +                      } +                    }                    }                  ],                  "resources": { diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-ephemeral-template.json index 969e62ac5..54785993c 100644 --- a/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-ephemeral-template.json @@ -19,7 +19,7 @@      "template": "mysql-ephemeral-template"    },    "objects": [ -   { +    {        "kind": "Secret",        "apiVersion": "v1",        "metadata": { @@ -27,13 +27,15 @@          "annotations": {            "template.openshift.io/expose-username": "{.data['database-user']}",            "template.openshift.io/expose-password": "{.data['database-password']}", -          "template.openshift.io/expose-root_password": "{.data['database-root-password']}" +          "template.openshift.io/expose-root_password": "{.data['database-root-password']}", +          "template.openshift.io/expose-database_name": "{.data['database-name']}"          }        },        "stringData" : {          "database-user" : "${MYSQL_USER}",          "database-password" : "${MYSQL_PASSWORD}", -        "database-root-password" : "${MYSQL_ROOT_PASSWORD}" +        "database-root-password" : "${MYSQL_ROOT_PASSWORD}", +        "database-name" : "${MYSQL_DATABASE}"        }      },      { @@ -41,7 +43,6 @@        "apiVersion": "v1",        "metadata": {          "name": "${DATABASE_SERVICE_NAME}", -        "creationTimestamp": null,          "annotations": {            "template.openshift.io/expose-uri": "mysql://{.spec.clusterIP}:{.spec.ports[?(.name==\"mysql\")].port}"          } @@ -71,7 +72,9 @@        "apiVersion": "v1",        "metadata": {          "name": "${DATABASE_SERVICE_NAME}", -        "creationTimestamp": null +        "annotations": { +          "template.alpha.openshift.io/wait-for-ready": "true" +        }        },        "spec": {          "strategy": { @@ -103,7 +106,6 @@          },          "template": {            "metadata": { -            "creationTimestamp": null,              "labels": {                "name": "${DATABASE_SERVICE_NAME}"              } @@ -164,7 +166,12 @@                    },                    {                      "name": "MYSQL_DATABASE", -                    "value": "${MYSQL_DATABASE}" +                    "valueFrom": { +                      "secretKeyRef" : { +                        "name" : "${DATABASE_SERVICE_NAME}", +                        "key" : "database-name" +                      } +                    }                    }                  ],                  "resources": { diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-persistent-template.json index 4f39d41a5..2bd84b106 100644 --- a/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-persistent-template.json @@ -27,13 +27,15 @@          "annotations": {            "template.openshift.io/expose-username": "{.data['database-user']}",            "template.openshift.io/expose-password": "{.data['database-password']}", -          "template.openshift.io/expose-root_password": "{.data['database-root-password']}" +          "template.openshift.io/expose-root_password": "{.data['database-root-password']}", +          "template.openshift.io/expose-database_name": "{.data['database-name']}"          }        },        "stringData" : {          "database-user" : "${MYSQL_USER}",          "database-password" : "${MYSQL_PASSWORD}", -        "database-root-password" : "${MYSQL_ROOT_PASSWORD}" +        "database-root-password" : "${MYSQL_ROOT_PASSWORD}", +        "database-name" : "${MYSQL_DATABASE}"        }      },      { @@ -78,7 +80,10 @@        "kind": "DeploymentConfig",        "apiVersion": "v1",        "metadata": { -        "name": "${DATABASE_SERVICE_NAME}" +        "name": "${DATABASE_SERVICE_NAME}", +        "annotations": { +          "template.alpha.openshift.io/wait-for-ready": "true" +        }        },        "spec": {          "strategy": { @@ -168,7 +173,12 @@                    },                    {                      "name": "MYSQL_DATABASE", -                    "value": "${MYSQL_DATABASE}" +                    "valueFrom": { +                      "secretKeyRef" : { +                        "name" : "${DATABASE_SERVICE_NAME}", +                        "key" : "database-name" +                      } +                    }                    }                  ],                  "resources": { diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-ephemeral-template.json index c37102cb0..849c9d83f 100644 --- a/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-ephemeral-template.json @@ -3,7 +3,6 @@    "apiVersion": "v1",    "metadata": {      "name": "postgresql-ephemeral", -    "creationTimestamp": null,      "annotations": {        "openshift.io/display-name": "PostgreSQL (Ephemeral)",        "description": "PostgreSQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", @@ -27,12 +26,14 @@          "name": "${DATABASE_SERVICE_NAME}",          "annotations": {            "template.openshift.io/expose-username": "{.data['database-user']}", -          "template.openshift.io/expose-password": "{.data['database-password']}" +          "template.openshift.io/expose-password": "{.data['database-password']}", +          "template.openshift.io/expose-database_name": "{.data['database-name']}"          }        },        "stringData" : {          "database-user" : "${POSTGRESQL_USER}", -        "database-password" : "${POSTGRESQL_PASSWORD}" +        "database-password" : "${POSTGRESQL_PASSWORD}", +        "database-name" : "${POSTGRESQL_DATABASE}"        }      },      { @@ -40,7 +41,6 @@        "apiVersion": "v1",        "metadata": {          "name": "${DATABASE_SERVICE_NAME}", -        "creationTimestamp": null,          "annotations": {            "template.openshift.io/expose-uri": "postgres://{.spec.clusterIP}:{.spec.ports[?(.name==\"postgresql\")].port}"          } @@ -70,7 +70,9 @@        "apiVersion": "v1",        "metadata": {          "name": "${DATABASE_SERVICE_NAME}", -        "creationTimestamp": null +        "annotations": { +          "template.alpha.openshift.io/wait-for-ready": "true" +        }        },        "spec": {          "strategy": { @@ -102,7 +104,6 @@          },          "template": {            "metadata": { -            "creationTimestamp": null,              "labels": {                "name": "${DATABASE_SERVICE_NAME}"              } @@ -153,7 +154,12 @@                    },                    {                      "name": "POSTGRESQL_DATABASE", -                    "value": "${POSTGRESQL_DATABASE}" +                    "valueFrom": { +                      "secretKeyRef" : { +                        "name" : "${DATABASE_SERVICE_NAME}", +                        "key" : "database-name" +                      } +                    }                    }                  ],                  "resources": { diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-persistent-template.json index 32dc93a95..b622baa01 100644 --- a/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-persistent-template.json @@ -3,7 +3,6 @@    "apiVersion": "v1",    "metadata": {      "name": "postgresql-persistent", -    "creationTimestamp": null,      "annotations": {        "openshift.io/display-name": "PostgreSQL (Persistent)",        "description": "PostgreSQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.", @@ -27,12 +26,14 @@          "name": "${DATABASE_SERVICE_NAME}",          "annotations": {            "template.openshift.io/expose-username": "{.data['database-user']}", -          "template.openshift.io/expose-password": "{.data['database-password']}" +          "template.openshift.io/expose-password": "{.data['database-password']}", +          "template.openshift.io/expose-database_name": "{.data['database-name']}"          }        },        "stringData" : {          "database-user" : "${POSTGRESQL_USER}", -        "database-password" : "${POSTGRESQL_PASSWORD}" +        "database-password" : "${POSTGRESQL_PASSWORD}", +        "database-name" : "${POSTGRESQL_DATABASE}"        }      },      { @@ -40,7 +41,6 @@        "apiVersion": "v1",        "metadata": {          "name": "${DATABASE_SERVICE_NAME}", -        "creationTimestamp": null,          "annotations": {            "template.openshift.io/expose-uri": "postgres://{.spec.clusterIP}:{.spec.ports[?(.name==\"postgresql\")].port}"          } @@ -87,7 +87,9 @@        "apiVersion": "v1",        "metadata": {          "name": "${DATABASE_SERVICE_NAME}", -        "creationTimestamp": null +        "annotations": { +          "template.alpha.openshift.io/wait-for-ready": "true" +        }        },        "spec": {          "strategy": { @@ -119,7 +121,6 @@          },          "template": {            "metadata": { -            "creationTimestamp": null,              "labels": {                "name": "${DATABASE_SERVICE_NAME}"              } @@ -170,7 +171,12 @@                    },                    {                      "name": "POSTGRESQL_DATABASE", -                    "value": "${POSTGRESQL_DATABASE}" +                    "valueFrom": { +                      "secretKeyRef" : { +                        "name" : "${DATABASE_SERVICE_NAME}", +                        "key" : "database-name" +                      } +                    }                    }                  ],                  "resources": { diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/redis-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-ephemeral-template.json index 6bb683e52..15bdd079b 100644 --- a/roles/openshift_examples/files/examples/v3.6/db-templates/redis-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-ephemeral-template.json @@ -3,7 +3,6 @@    "apiVersion": "v1",    "metadata": {      "name": "redis-ephemeral", -    "creationTimestamp": null,      "annotations": {        "openshift.io/display-name": "Redis (Ephemeral)",        "description": "Redis in-memory data structure store, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", @@ -38,7 +37,6 @@        "apiVersion": "v1",        "metadata": {          "name": "${DATABASE_SERVICE_NAME}", -        "creationTimestamp": null,          "annotations": {            "template.openshift.io/expose-uri": "redis://{.spec.clusterIP}:{.spec.ports[?(.name==\"redis\")].port}"          } @@ -68,7 +66,9 @@        "apiVersion": "v1",        "metadata": {          "name": "${DATABASE_SERVICE_NAME}", -        "creationTimestamp": null +        "annotations": { +          "template.alpha.openshift.io/wait-for-ready": "true" +        }        },        "spec": {          "strategy": { @@ -100,7 +100,6 @@          },          "template": {            "metadata": { -            "creationTimestamp": null,              "labels": {                "name": "${DATABASE_SERVICE_NAME}"              } diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/redis-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-persistent-template.json index 9e8be2309..1e31b02e0 100644 --- a/roles/openshift_examples/files/examples/v3.6/db-templates/redis-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-persistent-template.json @@ -3,7 +3,6 @@    "apiVersion": "v1",    "metadata": {      "name": "redis-persistent", -    "creationTimestamp": null,      "annotations": {        "openshift.io/display-name": "Redis (Persistent)",        "description": "Redis in-memory data structure store, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.", @@ -38,7 +37,6 @@        "apiVersion": "v1",        "metadata": {          "name": "${DATABASE_SERVICE_NAME}", -        "creationTimestamp": null,          "annotations": {            "template.openshift.io/expose-uri": "redis://{.spec.clusterIP}:{.spec.ports[?(.name==\"redis\")].port}"          } @@ -85,7 +83,9 @@        "apiVersion": "v1",        "metadata": {          "name": "${DATABASE_SERVICE_NAME}", -        "creationTimestamp": null +        "annotations": { +          "template.alpha.openshift.io/wait-for-ready": "true" +        }        },        "spec": {          "strategy": { @@ -117,7 +117,6 @@          },          "template": {            "metadata": { -            "creationTimestamp": null,              "labels": {                "name": "${DATABASE_SERVICE_NAME}"              } diff --git a/roles/openshift_examples/files/examples/v3.6/image-streams/OWNERS b/roles/openshift_examples/files/examples/v3.6/image-streams/OWNERS new file mode 100644 index 000000000..6ddf77f12 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.6/image-streams/OWNERS @@ -0,0 +1,14 @@ +reviewers: +  - bparees +  - sspeiche +  - mfojtik +  - liggitt +  - jcantrill +  - hhorak +  - csrwng +approvers: +  - bparees +  - mfojtik +  - liggitt +  - jcantrill +  - csrwng diff --git a/roles/openshift_examples/files/examples/v3.6/image-streams/dotnet_imagestreams.json b/roles/openshift_examples/files/examples/v3.6/image-streams/dotnet_imagestreams.json index 857ffa980..ee753966f 100644 --- a/roles/openshift_examples/files/examples/v3.6/image-streams/dotnet_imagestreams.json +++ b/roles/openshift_examples/files/examples/v3.6/image-streams/dotnet_imagestreams.json @@ -14,7 +14,7 @@              "metadata": {                  "name": "dotnet",                  "annotations": { -                    "openshift.io/display-name": ".NET Core" +                    "openshift.io/display-name": ".NET Core Builder Images"                  }              },              "spec": { @@ -23,17 +23,35 @@                          "name": "latest",                          "annotations": {                            "openshift.io/display-name": ".NET Core (Latest)", -                          "description": "Build and run .NET Core applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/1.1/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of .NET Core available on OpenShift, including major versions updates.", +                          "description": "Build and run .NET Core applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/2.0/build/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of .NET Core available on OpenShift, including major versions updates.",                            "iconClass": "icon-dotnet",                            "tags": "builder,.net,dotnet,dotnetcore",                            "supports":"dotnet",                            "sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore-ex.git",                            "sampleContextDir": "app", -                          "sampleRef": "dotnetcore-1.1" +                          "sampleRef": "dotnetcore-2.0"                          },                          "from": {                            "kind": "ImageStreamTag", -                          "name": "1.1" +                          "name": "2.0" +                        } +                    }, +                    { +                        "name": "2.0", +                        "annotations": { +                            "openshift.io/display-name": ".NET Core 2.0", +                            "description": "Build and run .NET Core 2.0 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/2.0/build/README.md.", +                            "iconClass": "icon-dotnet", +                            "tags": "builder,.net,dotnet,dotnetcore,rh-dotnet20", +                            "supports":"dotnet:2.0,dotnet", +                            "sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore-ex.git", +                            "sampleContextDir": "app", +                            "sampleRef": "dotnetcore-2.0", +                            "version": "2.0" +                        }, +                        "from": { +                          "kind": "DockerImage", +                          "name": "registry.access.redhat.com/dotnet/dotnet-20-rhel7:2.0"                          }                      },                      { @@ -74,6 +92,49 @@                      }                  ]              } +        }, +        { +            "kind": "ImageStream", +            "apiVersion": "v1", +            "metadata": { +                "name": "dotnet-runtime", +                "annotations": { +                    "openshift.io/display-name": ".NET Core Runtime Images" +                } +            }, +            "spec": { +                "tags": [ +                    { +                        "name": "latest", +                        "annotations": { +                          "openshift.io/display-name": ".NET Core Runtime (Latest)", +                          "description": "Run .NET Core applications on RHEL 7. For more information about using this image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/2.0/runtime/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of .NET Core Runtime available on OpenShift, including major versions updates.", +                          "iconClass": "icon-dotnet", +                          "tags": "runtime,.net-runtime,dotnet-runtime,dotnetcore-runtime", +                          "supports":"dotnet-runtime" +                        }, +                        "from": { +                          "kind": "ImageStreamTag", +                          "name": "2.0" +                        } +                    }, +                    { +                        "name": "2.0", +                        "annotations": { +                            "openshift.io/display-name": ".NET Core 2.0 Runtime", +                            "description": "Run .NET Core applications on RHEL 7. For more information about using this image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/2.0/runtime/README.md.", +                            "iconClass": "icon-dotnet", +                            "tags": "runtime,.net-runtime,dotnet-runtime,dotnetcore-runtime", +                            "supports":"dotnet-runtime", +                            "version": "2.0" +                        }, +                        "from": { +                          "kind": "DockerImage", +                          "name": "registry.access.redhat.com/dotnet/dotnet-20-runtime-rhel7:2.0" +                        } +                    } +                ] +            }          }      ]  } diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/OWNERS b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/OWNERS new file mode 100644 index 000000000..a26e484d6 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/OWNERS @@ -0,0 +1,12 @@ +reviewers: +  - bparees +  - gabemontero +  - coreydaley +  - dinhxuanvu +  - sspeiche +  - mfojtik +  - jupierce +approvers: +  - bparees +  - mfojtik +  - jupierce diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql-persistent.json index 6d987ee33..289f809fa 100644 --- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql-persistent.json @@ -89,7 +89,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to build the application" +          "description": "Defines how to build the application", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { @@ -148,7 +149,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to deploy the application server" +          "description": "Defines how to deploy the application server", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql.json index fb2ef206e..0562982b3 100644 --- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql.json @@ -89,7 +89,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to build the application" +          "description": "Defines how to build the application", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { @@ -148,7 +149,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to deploy the application server" +          "description": "Defines how to deploy the application server", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json index 7ffb25e14..7a3875d09 100644 --- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json @@ -87,7 +87,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to build the application" +          "description": "Defines how to build the application", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { @@ -146,7 +147,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to deploy the application server" +          "description": "Defines how to deploy the application server", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json index d787e376b..399ec72a8 100644 --- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json @@ -87,7 +87,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to build the application" +          "description": "Defines how to build the application", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { @@ -146,7 +147,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to deploy the application server" +          "description": "Defines how to deploy the application server", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json index a2070207b..e37f7a492 100644 --- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json @@ -87,7 +87,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to build the application" +          "description": "Defines how to build the application", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { @@ -146,7 +147,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to deploy the application server" +          "description": "Defines how to deploy the application server", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json index 0d33c6e0e..965c2ebfe 100644 --- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json @@ -87,7 +87,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to build the application" +          "description": "Defines how to build the application", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { @@ -146,7 +147,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to deploy the application server" +          "description": "Defines how to deploy the application server", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-example.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-example.json index af46579c8..f1fef3093 100644 --- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-example.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-example.json @@ -249,7 +249,7 @@              "displayName": ".NET builder",              "required": true,              "description": "The image stream tag which is used to build the code.", -            "value": "dotnet:1.0" +            "value": "dotnet:2.0"          },          {              "name": "NAMESPACE", @@ -269,7 +269,7 @@              "name": "SOURCE_REPOSITORY_REF",              "displayName": "Git Reference",              "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.", -            "value": "dotnetcore-1.0" +            "value": "dotnetcore-2.0"          },          {              "name": "CONTEXT_DIR", @@ -299,7 +299,7 @@          {              "name": "DOTNET_STARTUP_PROJECT",              "displayName": "Startup Project", -            "description": "Set this to the folder containing your startup project.", +            "description": "Set this to a project file (e.g. csproj) or a folder containing a single project file.",              "value": "app"          },          { diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-pgsql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-pgsql-persistent.json index a2b59c2d3..c83132152 100644 --- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-pgsql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-pgsql-persistent.json @@ -455,7 +455,7 @@              "displayName": ".NET builder",              "required": true,              "description": "The image stream tag which is used to build the code.", -            "value": "dotnet:1.1" +            "value": "dotnet:2.0"          },          {              "name": "NAMESPACE", @@ -475,7 +475,7 @@              "name": "SOURCE_REPOSITORY_REF",              "displayName": "Git Reference",              "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.", -            "value": "rel/1.1-example" +            "value": "rel/2.0-example"          },          {              "name": "CONTEXT_DIR", @@ -485,7 +485,7 @@          {              "name": "DOTNET_STARTUP_PROJECT",              "displayName": "Startup Project", -            "description": "Set this to the folder containing your startup project.", +            "description": "Set this to a project file (e.g. csproj) or a folder containing a single project file.",              "value": "samples/MusicStore"          },          { diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-runtime-example.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-runtime-example.json new file mode 100644 index 000000000..e1dccf290 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-runtime-example.json @@ -0,0 +1,412 @@ +{ +    "kind": "Template", +    "apiVersion": "v1", +    "metadata": { +        "name": "dotnet-runtime-example", +        "annotations": { +            "openshift.io/display-name": ".NET Core Runtime Example", +            "description": "An example .NET Core Runtime example application.", +            "tags": "quickstart,dotnet,.net", +            "iconClass": "icon-dotnet", +            "template.openshift.io/provider-display-name": "Red Hat, Inc.", +            "template.openshift.io/documentation-url": "https://github.com/redhat-developer/s2i-dotnetcore", +            "template.openshift.io/support-url": "https://access.redhat.com" +        } +    }, +    "objects": [ +        { +            "kind": "Route", +            "apiVersion": "v1", +            "metadata": { +                "name": "${NAME}" +            }, +            "spec": { +                "host": "${APPLICATION_DOMAIN}", +                "to": { +                    "kind": "Service", +                    "name": "${NAME}" +                } +            } +        }, +        { +            "kind": "Service", +            "apiVersion": "v1", +            "metadata": { +                "name": "${NAME}", +                "annotations": { +                    "description": "Exposes and load balances the application pods" +                } +            }, +            "spec": { +                "ports": [ +                    { +                        "name": "web", +                        "port": 8080, +                        "targetPort": 8080 +                    } +                ], +                "selector": { +                    "name": "${NAME}" +                } +            } +        }, +        { +            "kind": "ImageStream", +            "apiVersion": "v1", +            "metadata": { +                "name": "${NAME}", +                "annotations": { +                    "description": "Keeps track of changes in the application runtime image" +                } +            } +        }, +        { +            "kind": "ImageStream", +            "apiVersion": "v1", +            "metadata": { +                "name": "${NAME}-build", +                "annotations": { +                    "description": "Keeps track of changes in the application builder image" +                } +            } +        }, +        { +            "kind": "BuildConfig", +            "apiVersion": "v1", +            "metadata": { +                "name": "${NAME}-build", +                "annotations": { +                    "description": "Defines how to build the application" +                } +            }, +            "spec": { +                "source": { +                    "type": "Git", +                    "git": { +                        "uri": "${SOURCE_REPOSITORY_URL}", +                        "ref": "${SOURCE_REPOSITORY_REF}" +                    }, +                    "contextDir": "${CONTEXT_DIR}" +                }, +                "strategy": { +                    "type": "Source", +                    "sourceStrategy": { +                        "from": { +                            "kind": "ImageStreamTag", +                            "namespace": "${NAMESPACE}", +                            "name": "${DOTNET_BUILD_IMAGE_STREAM_TAG}" +                        }, +                        "env": [ +                            { +                                "name": "DOTNET_STARTUP_PROJECT", +                                "value": "${DOTNET_STARTUP_PROJECT}" +                            }, +                            { +                                "name": "DOTNET_ASSEMBLY_NAME", +                                "value": "${DOTNET_ASSEMBLY_NAME}" +                            }, +                            { +                                "name": "DOTNET_NPM_TOOLS", +                                "value": "${DOTNET_NPM_TOOLS}" +                            }, +                            { +                                "name": "DOTNET_TEST_PROJECTS", +                                "value": "${DOTNET_TEST_PROJECTS}" +                            }, +                            { +                                "name": "DOTNET_CONFIGURATION", +                                "value": "${DOTNET_CONFIGURATION}" +                            }, +                            { +                                "name": "DOTNET_RESTORE_SOURCES", +                                "value": "${DOTNET_RESTORE_SOURCES}" +                            }, +                            { +                                "name": "DOTNET_PACK", +                                "value": "true" +                            } +                        ] +                    } +                }, +                "output": { +                    "to": { +                        "kind": "ImageStreamTag", +                        "name": "${NAME}-build:latest" +                    } +                }, +                "triggers": [ +                    { +                        "type": "ImageChange" +                    }, +                    { +                        "type": "ConfigChange" +                    }, +                    { +                        "type": "GitHub", +                        "github": { +                            "secret": "${GITHUB_WEBHOOK_SECRET}" +                        } +                    }, +                    { +                        "type": "Generic", +                        "generic": { +                            "secret": "${GENERIC_WEBHOOK_SECRET}" +                        } +                    } +                ] +            } +        }, +        { +            "kind": "BuildConfig", +            "apiVersion": "v1", +            "metadata": { +                "name": "${NAME}-runtime", +                "annotations": { +                    "description": "Defines how to chain the runtime image from the source build image" +                } +            }, +            "spec": { +                "source": { +                    "dockerfile": "FROM ${DOTNET_RUNTIME_IMAGE_STREAM_TAG}\nADD app.tar.gz .", +                    "images": [ +                        { +                            "from": { +                                "kind": "ImageStreamTag", +                                "name": "${NAME}-build:latest" +                            }, +                            "paths": [ +                                { +                                    "sourcePath": "/opt/app-root/app.tar.gz", +                                    "destinationDir": "." +                                } +                            ] +                        } +                    ] +                }, +                "strategy": { +                    "type": "Docker", +                    "dockerStrategy": { +                        "from": { +                            "kind": "ImageStreamTag", +                            "namespace": "${NAMESPACE}", +                            "name": "${DOTNET_RUNTIME_IMAGE_STREAM_TAG}" +                        } +                    } +                }, +                "output": { +                    "to": { +                        "kind": "ImageStreamTag", +                        "name": "${NAME}:latest" +                    } +                }, +                "triggers": [ +                    { +                        "type": "ImageChange" +                    }, +                    { +                        "type": "ConfigChange" +                    }, +                    { +                        "type": "ImageChange", +                        "imageChange": { +                            "from": { +                                "kind": "ImageStreamTag", +                                "name": "${NAME}-build:latest" +                            } +                        } +                    } +                ] +            } +        }, +        { +            "kind": "DeploymentConfig", +            "apiVersion": "v1", +            "metadata": { +                "name": "${NAME}-runtime", +                "annotations": { +                    "description": "Defines how to deploy the application server" +                } +            }, +            "spec": { +                "strategy": { +                    "type": "Rolling" +                }, +                "triggers": [ +                    { +                        "type": "ImageChange", +                        "imageChangeParams": { +                            "automatic": true, +                            "containerNames": [ +                                "dotnet-runtime-app" +                            ], +                            "from": { +                                "kind": "ImageStreamTag", +                                "name": "${NAME}:latest" +                            } +                        } +                    }, +                    { +                        "type": "ConfigChange" +                    } +                ], +                "replicas": 1, +                "selector": { +                    "name": "${NAME}" +                }, +                "template": { +                    "metadata": { +                        "name": "${NAME}", +                        "labels": { +                            "name": "${NAME}" +                        } +                    }, +                    "spec": { +                        "containers": [ +                            { +                                "name": "dotnet-runtime-app", +                                "image": " ", +                                "ports": [ +                                    { +                                        "containerPort": 8080 +                                    } +                                ], +                                "livenessProbe": { +                                    "httpGet": { +                                        "path": "/", +                                        "port": 8080, +                                        "scheme": "HTTP" +                                    }, +                                    "initialDelaySeconds": 40, +                                    "timeoutSeconds": 15 +                                }, +                                "readinessProbe": { +                                    "httpGet": { +                                        "path": "/", +                                        "port": 8080, +                                        "scheme": "HTTP" +                                    }, +                                    "initialDelaySeconds": 10, +                                    "timeoutSeconds": 30 +                                }, +                                "resources": { +                                    "limits": { +                                        "memory": "${MEMORY_LIMIT}" +                                    } +                                }, +                                "env": [] +                            } +                        ] +                    } +                } +            } +        } +    ], +    "parameters": [ +        { +            "name": "NAME", +            "displayName": "Name", +            "description": "The name assigned to all of the frontend objects defined in this template.", +            "required": true, +            "value": "dotnet-runtime-example" +        }, +        { +            "name": "MEMORY_LIMIT", +            "displayName": "Memory Limit", +            "description": "Maximum amount of memory the container can use.", +            "required": true, +            "value": "512Mi" +        }, +        { +            "name": "DOTNET_RUNTIME_IMAGE_STREAM_TAG", +            "displayName": ".NET Runtime Imagestream Tag", +            "description": "The image stream tag which is used to run the application.", +            "required": true, +            "value": "dotnet-runtime:2.0" +        }, +        { +            "name": "DOTNET_BUILD_IMAGE_STREAM_TAG", +            "displayName": ".NET builder", +            "required": true, +            "description": "The image stream tag which is used to build the application.", +            "value": "dotnet:2.0" +        }, +        { +            "name": "NAMESPACE", +            "displayName": "Namespace", +            "description": "The OpenShift Namespace where the ImageStream resides.", +            "required": true, +            "value": "openshift" +        }, +        { +            "name": "APPLICATION_DOMAIN", +            "displayName": "Application Hostname", +            "description": "The exposed hostname that will route to the .NET Core service, if left blank a value will be defaulted.", +            "value": "" +        }, +        { +            "name": "SOURCE_REPOSITORY_URL", +            "displayName": "Git Repository URL", +            "description": "The URL of the repository with your application source code.", +            "required": true, +            "value": "https://github.com/redhat-developer/s2i-dotnetcore-ex.git" +        }, +        { +            "name": "SOURCE_REPOSITORY_REF", +            "displayName": "Git Reference", +            "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.", +            "value": "dotnetcore-2.0" +        }, +        { +            "name": "CONTEXT_DIR", +            "displayName": "Context Directory", +            "description": "Set this to use a subdirectory of the source code repository" +        }, +        { +            "name": "GITHUB_WEBHOOK_SECRET", +            "displayName": "GitHub Webhook Secret", +            "description": "A secret string used to configure the GitHub webhook.", +            "generate": "expression", +            "from": "[a-zA-Z0-9]{40}" +        }, +        { +            "name": "GENERIC_WEBHOOK_SECRET", +            "displayName": "Generic Webhook Secret", +            "description": "A secret string used to configure the Generic webhook.", +            "generate": "expression", +            "from": "[a-zA-Z0-9]{40}" +        }, +        { +            "name": "DOTNET_STARTUP_PROJECT", +            "displayName": "Startup Project", +            "description": "Set this to the folder containing your startup project.", +            "value": "app" +        }, +        { +            "name": "DOTNET_ASSEMBLY_NAME", +            "displayName": "Startup Assembly", +            "description": "Set this when the assembly name is overridden in the project file." +        }, +        { +            "name": "DOTNET_NPM_TOOLS", +            "displayName": "Npm Tools", +            "description": "Set this to a space separated list of npm tools needed to publish.", +            "value": "bower gulp" +        }, +        { +            "name": "DOTNET_TEST_PROJECTS", +            "displayName": "Test projects", +            "description": "Set this to a space separated list of test projects to run before publishing." +        }, +        { +            "name": "DOTNET_CONFIGURATION", +            "displayName": "Configuration", +            "description": "Set this to configuration (Release/Debug).", +            "value": "Release" +        }, +        { +            "name": "DOTNET_RESTORE_SOURCES", +            "displayName": "NuGet package sources", +            "description": "Set this to override the NuGet.config sources." +        } +    ] +} diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/httpd.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/httpd.json index ac671cc06..6cf9d76eb 100644 --- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/httpd.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/httpd.json @@ -74,7 +74,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to build the application" +          "description": "Defines how to build the application", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { @@ -130,7 +131,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to deploy the application server" +          "description": "Defines how to deploy the application server", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-ephemeral-template.json index ce96684a9..62f43bc0b 100644 --- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-ephemeral-template.json @@ -3,7 +3,6 @@    "apiVersion": "v1",    "metadata": {      "name": "jenkins-ephemeral", -    "creationTimestamp": null,      "annotations": {        "openshift.io/display-name": "Jenkins (Ephemeral)",        "description": "Jenkins service, without persistent storage.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.", @@ -22,7 +21,6 @@        "apiVersion": "v1",        "metadata": {          "name": "${JENKINS_SERVICE_NAME}", -        "creationTimestamp": null,          "annotations": {            "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"          } @@ -43,7 +41,9 @@        "apiVersion": "v1",        "metadata": {          "name": "${JENKINS_SERVICE_NAME}", -        "creationTimestamp": null +        "annotations": { +          "template.alpha.openshift.io/wait-for-ready": "true" +        }        },        "spec": {          "strategy": { @@ -75,7 +75,6 @@          },          "template": {            "metadata": { -            "creationTimestamp": null,              "labels": {                "name": "${JENKINS_SERVICE_NAME}"              } @@ -221,8 +220,7 @@           "annotations": {             "service.alpha.openshift.io/dependencies": "[{\"name\": \"${JNLP_SERVICE_NAME}\", \"namespace\": \"\", \"kind\": \"Service\"}]",             "service.openshift.io/infrastructure": "true" -         }, -         "creationTimestamp": null +         }         },         "spec": {           "ports": [ diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-persistent-template.json index 34b2b920b..e9068e455 100644 --- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-persistent-template.json @@ -3,7 +3,6 @@    "apiVersion": "v1",    "metadata": {      "name": "jenkins-persistent", -    "creationTimestamp": null,      "annotations": {        "openshift.io/display-name": "Jenkins (Persistent)",        "description": "Jenkins service, with persistent storage.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.", @@ -22,7 +21,6 @@        "apiVersion": "v1",        "metadata": {          "name": "${JENKINS_SERVICE_NAME}", -        "creationTimestamp": null,          "annotations": {            "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"          } @@ -60,7 +58,9 @@        "apiVersion": "v1",        "metadata": {          "name": "${JENKINS_SERVICE_NAME}", -        "creationTimestamp": null +        "annotations": { +          "template.alpha.openshift.io/wait-for-ready": "true" +        }        },        "spec": {          "strategy": { @@ -92,7 +92,6 @@          },          "template": {            "metadata": { -            "creationTimestamp": null,              "labels": {                "name": "${JENKINS_SERVICE_NAME}"              } @@ -238,8 +237,7 @@           "annotations": {             "service.alpha.openshift.io/dependencies": "[{\"name\": \"${JNLP_SERVICE_NAME}\", \"namespace\": \"\", \"kind\": \"Service\"}]",             "service.openshift.io/infrastructure": "true" -         }, -         "creationTimestamp": null +         }         },         "spec": {           "ports": [ diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json index a9c365361..df3704b9f 100644 --- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json @@ -87,7 +87,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to build the application" +          "description": "Defines how to build the application", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { @@ -152,7 +153,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to deploy the application server" +          "description": "Defines how to deploy the application server", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json index 53a6147d5..eb6ab33d9 100644 --- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json @@ -87,7 +87,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to build the application" +          "description": "Defines how to build the application", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { @@ -152,7 +153,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to deploy the application server" +          "description": "Defines how to deploy the application server", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql-persistent.json index f07a43071..59e2e41ea 100644 --- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql-persistent.json @@ -93,7 +93,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to build the application" +          "description": "Defines how to build the application", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { @@ -152,7 +153,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to deploy the application server" +          "description": "Defines how to deploy the application server", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql.json index a7992c988..b3d080a91 100644 --- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql.json @@ -93,7 +93,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to build the application" +          "description": "Defines how to build the application", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { @@ -152,7 +153,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to deploy the application server" +          "description": "Defines how to deploy the application server", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-ephemeral-template.json index 536f7275e..6500ed0d3 100644 --- a/roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-ephemeral-template.json @@ -27,13 +27,15 @@          "annotations": {            "template.openshift.io/expose-username": "{.data['database-user']}",            "template.openshift.io/expose-password": "{.data['database-password']}", -          "template.openshift.io/expose-root_password": "{.data['database-root-password']}" +          "template.openshift.io/expose-root_password": "{.data['database-root-password']}", +          "template.openshift.io/expose-database_name": "{.data['database-name']}"          }        },        "stringData" : {          "database-user" : "${MYSQL_USER}",          "database-password" : "${MYSQL_PASSWORD}", -        "database-root-password" : "${MYSQL_ROOT_PASSWORD}" +        "database-root-password" : "${MYSQL_ROOT_PASSWORD}", +        "database-name" : "${MYSQL_DATABASE}"        }      },      { @@ -61,7 +63,10 @@        "kind": "DeploymentConfig",        "apiVersion": "v1",        "metadata": { -        "name": "${DATABASE_SERVICE_NAME}" +        "name": "${DATABASE_SERVICE_NAME}", +        "annotations": { +          "template.alpha.openshift.io/wait-for-ready": "true" +        }        },        "spec": {          "strategy": { @@ -151,7 +156,12 @@                    },                    {                      "name": "MYSQL_DATABASE", -                    "value": "${MYSQL_DATABASE}" +                    "valueFrom": { +                      "secretKeyRef" : { +                        "name" : "${DATABASE_SERVICE_NAME}", +                        "key" : "database-name" +                      } +                    }                    }                  ],                  "resources": { diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-persistent-template.json index 3b7fdccce..4378fa4a0 100644 --- a/roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-persistent-template.json @@ -27,13 +27,15 @@          "annotations": {            "template.openshift.io/expose-username": "{.data['database-user']}",            "template.openshift.io/expose-password": "{.data['database-password']}", -          "template.openshift.io/expose-root_password": "{.data['database-root-password']}" +          "template.openshift.io/expose-root_password": "{.data['database-root-password']}", +          "template.openshift.io/expose-database_name": "{.data['database-name']}"          }        },        "stringData" : {          "database-user" : "${MYSQL_USER}",          "database-password" : "${MYSQL_PASSWORD}", -        "database-root-password" : "${MYSQL_ROOT_PASSWORD}" +        "database-root-password" : "${MYSQL_ROOT_PASSWORD}", +        "database-name" : "${MYSQL_DATABASE}"        }      },      { @@ -78,7 +80,10 @@        "kind": "DeploymentConfig",        "apiVersion": "v1",        "metadata": { -        "name": "${DATABASE_SERVICE_NAME}" +        "name": "${DATABASE_SERVICE_NAME}", +        "annotations": { +          "template.alpha.openshift.io/wait-for-ready": "true" +        }        },        "spec": {          "strategy": { @@ -168,7 +173,12 @@                    },                    {                      "name": "MYSQL_DATABASE", -                    "value": "${MYSQL_DATABASE}" +                    "valueFrom": { +                      "secretKeyRef" : { +                        "name" : "${DATABASE_SERVICE_NAME}", +                        "key" : "database-name" +                      } +                    }                    }                  ],                  "resources": { diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-ephemeral-template.json index ee274194f..7271a2c69 100644 --- a/roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-ephemeral-template.json @@ -3,7 +3,6 @@    "apiVersion": "v1",    "metadata": {      "name": "mongodb-ephemeral", -    "creationTimestamp": null,      "annotations": {        "openshift.io/display-name": "MongoDB (Ephemeral)",        "description": "MongoDB database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", @@ -28,13 +27,15 @@          "annotations": {            "template.openshift.io/expose-username": "{.data['database-user']}",            "template.openshift.io/expose-password": "{.data['database-password']}", -          "template.openshift.io/expose-admin_password": "{.data['database-admin-password']}" +          "template.openshift.io/expose-admin_password": "{.data['database-admin-password']}", +          "template.openshift.io/expose-database_name": "{.data['database-name']}"          }        },        "stringData" : {          "database-user" : "${MONGODB_USER}",          "database-password" : "${MONGODB_PASSWORD}", -        "database-admin-password" : "${MONGODB_ADMIN_PASSWORD}" +        "database-admin-password" : "${MONGODB_ADMIN_PASSWORD}", +        "database-name" : "${MONGODB_DATABASE}"        }      },      { @@ -42,7 +43,6 @@        "apiVersion": "v1",        "metadata": {          "name": "${DATABASE_SERVICE_NAME}", -        "creationTimestamp": null,          "annotations": {            "template.openshift.io/expose-uri": "mongodb://{.spec.clusterIP}:{.spec.ports[?(.name==\"mongo\")].port}"          } @@ -72,7 +72,9 @@        "apiVersion": "v1",        "metadata": {          "name": "${DATABASE_SERVICE_NAME}", -        "creationTimestamp": null +        "annotations": { +          "template.alpha.openshift.io/wait-for-ready": "true" +        }        },        "spec": {          "strategy": { @@ -104,7 +106,6 @@          },          "template": {            "metadata": { -            "creationTimestamp": null,              "labels": {                "name": "${DATABASE_SERVICE_NAME}"              } @@ -164,7 +165,12 @@                    },                    {                      "name": "MONGODB_DATABASE", -                    "value": "${MONGODB_DATABASE}" +                    "valueFrom": { +                      "secretKeyRef" : { +                        "name" : "${DATABASE_SERVICE_NAME}", +                        "key" : "database-name" +                      } +                    }                    }                  ],                  "resources": { diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-persistent-template.json index e5ba43669..d70d2263f 100644 --- a/roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-persistent-template.json @@ -3,7 +3,6 @@    "apiVersion": "v1",    "metadata": {      "name": "mongodb-persistent", -    "creationTimestamp": null,      "annotations": {        "openshift.io/display-name": "MongoDB (Persistent)",        "description": "MongoDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.", @@ -28,13 +27,15 @@          "annotations": {            "template.openshift.io/expose-username": "{.data['database-user']}",            "template.openshift.io/expose-password": "{.data['database-password']}", -          "template.openshift.io/expose-admin_password": "{.data['database-admin-password']}" +          "template.openshift.io/expose-admin_password": "{.data['database-admin-password']}", +          "template.openshift.io/expose-database_name": "{.data['database-name']}"          }        },        "stringData" : {          "database-user" : "${MONGODB_USER}",          "database-password" : "${MONGODB_PASSWORD}", -        "database-admin-password" : "${MONGODB_ADMIN_PASSWORD}" +        "database-admin-password" : "${MONGODB_ADMIN_PASSWORD}", +        "database-name" : "${MONGODB_DATABASE}"        }      },      { @@ -42,7 +43,6 @@        "apiVersion": "v1",        "metadata": {          "name": "${DATABASE_SERVICE_NAME}", -        "creationTimestamp": null,          "annotations": {            "template.openshift.io/expose-uri": "mongodb://{.spec.clusterIP}:{.spec.ports[?(.name==\"mongo\")].port}"          } @@ -89,7 +89,9 @@        "apiVersion": "v1",        "metadata": {          "name": "${DATABASE_SERVICE_NAME}", -        "creationTimestamp": null +        "annotations": { +          "template.alpha.openshift.io/wait-for-ready": "true" +        }        },        "spec": {          "strategy": { @@ -121,7 +123,6 @@          },          "template": {            "metadata": { -            "creationTimestamp": null,              "labels": {                "name": "${DATABASE_SERVICE_NAME}"              } @@ -181,7 +182,12 @@                    },                    {                      "name": "MONGODB_DATABASE", -                    "value": "${MONGODB_DATABASE}" +                    "valueFrom": { +                      "secretKeyRef" : { +                        "name" : "${DATABASE_SERVICE_NAME}", +                        "key" : "database-name" +                      } +                    }                    }                  ],                  "resources": { diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/mysql-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/mysql-ephemeral-template.json index 969e62ac5..54785993c 100644 --- a/roles/openshift_examples/files/examples/v3.7/db-templates/mysql-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.7/db-templates/mysql-ephemeral-template.json @@ -19,7 +19,7 @@      "template": "mysql-ephemeral-template"    },    "objects": [ -   { +    {        "kind": "Secret",        "apiVersion": "v1",        "metadata": { @@ -27,13 +27,15 @@          "annotations": {            "template.openshift.io/expose-username": "{.data['database-user']}",            "template.openshift.io/expose-password": "{.data['database-password']}", -          "template.openshift.io/expose-root_password": "{.data['database-root-password']}" +          "template.openshift.io/expose-root_password": "{.data['database-root-password']}", +          "template.openshift.io/expose-database_name": "{.data['database-name']}"          }        },        "stringData" : {          "database-user" : "${MYSQL_USER}",          "database-password" : "${MYSQL_PASSWORD}", -        "database-root-password" : "${MYSQL_ROOT_PASSWORD}" +        "database-root-password" : "${MYSQL_ROOT_PASSWORD}", +        "database-name" : "${MYSQL_DATABASE}"        }      },      { @@ -41,7 +43,6 @@        "apiVersion": "v1",        "metadata": {          "name": "${DATABASE_SERVICE_NAME}", -        "creationTimestamp": null,          "annotations": {            "template.openshift.io/expose-uri": "mysql://{.spec.clusterIP}:{.spec.ports[?(.name==\"mysql\")].port}"          } @@ -71,7 +72,9 @@        "apiVersion": "v1",        "metadata": {          "name": "${DATABASE_SERVICE_NAME}", -        "creationTimestamp": null +        "annotations": { +          "template.alpha.openshift.io/wait-for-ready": "true" +        }        },        "spec": {          "strategy": { @@ -103,7 +106,6 @@          },          "template": {            "metadata": { -            "creationTimestamp": null,              "labels": {                "name": "${DATABASE_SERVICE_NAME}"              } @@ -164,7 +166,12 @@                    },                    {                      "name": "MYSQL_DATABASE", -                    "value": "${MYSQL_DATABASE}" +                    "valueFrom": { +                      "secretKeyRef" : { +                        "name" : "${DATABASE_SERVICE_NAME}", +                        "key" : "database-name" +                      } +                    }                    }                  ],                  "resources": { diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/mysql-persistent-template.json index 4f39d41a5..2bd84b106 100644 --- a/roles/openshift_examples/files/examples/v3.7/db-templates/mysql-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.7/db-templates/mysql-persistent-template.json @@ -27,13 +27,15 @@          "annotations": {            "template.openshift.io/expose-username": "{.data['database-user']}",            "template.openshift.io/expose-password": "{.data['database-password']}", -          "template.openshift.io/expose-root_password": "{.data['database-root-password']}" +          "template.openshift.io/expose-root_password": "{.data['database-root-password']}", +          "template.openshift.io/expose-database_name": "{.data['database-name']}"          }        },        "stringData" : {          "database-user" : "${MYSQL_USER}",          "database-password" : "${MYSQL_PASSWORD}", -        "database-root-password" : "${MYSQL_ROOT_PASSWORD}" +        "database-root-password" : "${MYSQL_ROOT_PASSWORD}", +        "database-name" : "${MYSQL_DATABASE}"        }      },      { @@ -78,7 +80,10 @@        "kind": "DeploymentConfig",        "apiVersion": "v1",        "metadata": { -        "name": "${DATABASE_SERVICE_NAME}" +        "name": "${DATABASE_SERVICE_NAME}", +        "annotations": { +          "template.alpha.openshift.io/wait-for-ready": "true" +        }        },        "spec": {          "strategy": { @@ -168,7 +173,12 @@                    },                    {                      "name": "MYSQL_DATABASE", -                    "value": "${MYSQL_DATABASE}" +                    "valueFrom": { +                      "secretKeyRef" : { +                        "name" : "${DATABASE_SERVICE_NAME}", +                        "key" : "database-name" +                      } +                    }                    }                  ],                  "resources": { diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-ephemeral-template.json index c37102cb0..849c9d83f 100644 --- a/roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-ephemeral-template.json @@ -3,7 +3,6 @@    "apiVersion": "v1",    "metadata": {      "name": "postgresql-ephemeral", -    "creationTimestamp": null,      "annotations": {        "openshift.io/display-name": "PostgreSQL (Ephemeral)",        "description": "PostgreSQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", @@ -27,12 +26,14 @@          "name": "${DATABASE_SERVICE_NAME}",          "annotations": {            "template.openshift.io/expose-username": "{.data['database-user']}", -          "template.openshift.io/expose-password": "{.data['database-password']}" +          "template.openshift.io/expose-password": "{.data['database-password']}", +          "template.openshift.io/expose-database_name": "{.data['database-name']}"          }        },        "stringData" : {          "database-user" : "${POSTGRESQL_USER}", -        "database-password" : "${POSTGRESQL_PASSWORD}" +        "database-password" : "${POSTGRESQL_PASSWORD}", +        "database-name" : "${POSTGRESQL_DATABASE}"        }      },      { @@ -40,7 +41,6 @@        "apiVersion": "v1",        "metadata": {          "name": "${DATABASE_SERVICE_NAME}", -        "creationTimestamp": null,          "annotations": {            "template.openshift.io/expose-uri": "postgres://{.spec.clusterIP}:{.spec.ports[?(.name==\"postgresql\")].port}"          } @@ -70,7 +70,9 @@        "apiVersion": "v1",        "metadata": {          "name": "${DATABASE_SERVICE_NAME}", -        "creationTimestamp": null +        "annotations": { +          "template.alpha.openshift.io/wait-for-ready": "true" +        }        },        "spec": {          "strategy": { @@ -102,7 +104,6 @@          },          "template": {            "metadata": { -            "creationTimestamp": null,              "labels": {                "name": "${DATABASE_SERVICE_NAME}"              } @@ -153,7 +154,12 @@                    },                    {                      "name": "POSTGRESQL_DATABASE", -                    "value": "${POSTGRESQL_DATABASE}" +                    "valueFrom": { +                      "secretKeyRef" : { +                        "name" : "${DATABASE_SERVICE_NAME}", +                        "key" : "database-name" +                      } +                    }                    }                  ],                  "resources": { diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-persistent-template.json index 32dc93a95..b622baa01 100644 --- a/roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-persistent-template.json @@ -3,7 +3,6 @@    "apiVersion": "v1",    "metadata": {      "name": "postgresql-persistent", -    "creationTimestamp": null,      "annotations": {        "openshift.io/display-name": "PostgreSQL (Persistent)",        "description": "PostgreSQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.", @@ -27,12 +26,14 @@          "name": "${DATABASE_SERVICE_NAME}",          "annotations": {            "template.openshift.io/expose-username": "{.data['database-user']}", -          "template.openshift.io/expose-password": "{.data['database-password']}" +          "template.openshift.io/expose-password": "{.data['database-password']}", +          "template.openshift.io/expose-database_name": "{.data['database-name']}"          }        },        "stringData" : {          "database-user" : "${POSTGRESQL_USER}", -        "database-password" : "${POSTGRESQL_PASSWORD}" +        "database-password" : "${POSTGRESQL_PASSWORD}", +        "database-name" : "${POSTGRESQL_DATABASE}"        }      },      { @@ -40,7 +41,6 @@        "apiVersion": "v1",        "metadata": {          "name": "${DATABASE_SERVICE_NAME}", -        "creationTimestamp": null,          "annotations": {            "template.openshift.io/expose-uri": "postgres://{.spec.clusterIP}:{.spec.ports[?(.name==\"postgresql\")].port}"          } @@ -87,7 +87,9 @@        "apiVersion": "v1",        "metadata": {          "name": "${DATABASE_SERVICE_NAME}", -        "creationTimestamp": null +        "annotations": { +          "template.alpha.openshift.io/wait-for-ready": "true" +        }        },        "spec": {          "strategy": { @@ -119,7 +121,6 @@          },          "template": {            "metadata": { -            "creationTimestamp": null,              "labels": {                "name": "${DATABASE_SERVICE_NAME}"              } @@ -170,7 +171,12 @@                    },                    {                      "name": "POSTGRESQL_DATABASE", -                    "value": "${POSTGRESQL_DATABASE}" +                    "valueFrom": { +                      "secretKeyRef" : { +                        "name" : "${DATABASE_SERVICE_NAME}", +                        "key" : "database-name" +                      } +                    }                    }                  ],                  "resources": { diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/redis-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/redis-ephemeral-template.json index 6bb683e52..15bdd079b 100644 --- a/roles/openshift_examples/files/examples/v3.7/db-templates/redis-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.7/db-templates/redis-ephemeral-template.json @@ -3,7 +3,6 @@    "apiVersion": "v1",    "metadata": {      "name": "redis-ephemeral", -    "creationTimestamp": null,      "annotations": {        "openshift.io/display-name": "Redis (Ephemeral)",        "description": "Redis in-memory data structure store, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", @@ -38,7 +37,6 @@        "apiVersion": "v1",        "metadata": {          "name": "${DATABASE_SERVICE_NAME}", -        "creationTimestamp": null,          "annotations": {            "template.openshift.io/expose-uri": "redis://{.spec.clusterIP}:{.spec.ports[?(.name==\"redis\")].port}"          } @@ -68,7 +66,9 @@        "apiVersion": "v1",        "metadata": {          "name": "${DATABASE_SERVICE_NAME}", -        "creationTimestamp": null +        "annotations": { +          "template.alpha.openshift.io/wait-for-ready": "true" +        }        },        "spec": {          "strategy": { @@ -100,7 +100,6 @@          },          "template": {            "metadata": { -            "creationTimestamp": null,              "labels": {                "name": "${DATABASE_SERVICE_NAME}"              } diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/redis-persistent-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/redis-persistent-template.json index 9e8be2309..1e31b02e0 100644 --- a/roles/openshift_examples/files/examples/v3.7/db-templates/redis-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.7/db-templates/redis-persistent-template.json @@ -3,7 +3,6 @@    "apiVersion": "v1",    "metadata": {      "name": "redis-persistent", -    "creationTimestamp": null,      "annotations": {        "openshift.io/display-name": "Redis (Persistent)",        "description": "Redis in-memory data structure store, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.", @@ -38,7 +37,6 @@        "apiVersion": "v1",        "metadata": {          "name": "${DATABASE_SERVICE_NAME}", -        "creationTimestamp": null,          "annotations": {            "template.openshift.io/expose-uri": "redis://{.spec.clusterIP}:{.spec.ports[?(.name==\"redis\")].port}"          } @@ -85,7 +83,9 @@        "apiVersion": "v1",        "metadata": {          "name": "${DATABASE_SERVICE_NAME}", -        "creationTimestamp": null +        "annotations": { +          "template.alpha.openshift.io/wait-for-ready": "true" +        }        },        "spec": {          "strategy": { @@ -117,7 +117,6 @@          },          "template": {            "metadata": { -            "creationTimestamp": null,              "labels": {                "name": "${DATABASE_SERVICE_NAME}"              } diff --git a/roles/openshift_examples/files/examples/v3.7/image-streams/OWNERS b/roles/openshift_examples/files/examples/v3.7/image-streams/OWNERS index 4ccb64c74..6ddf77f12 100644 --- a/roles/openshift_examples/files/examples/v3.7/image-streams/OWNERS +++ b/roles/openshift_examples/files/examples/v3.7/image-streams/OWNERS @@ -1,7 +1,6 @@  reviewers:    - bparees    - sspeiche -  - oatmealraisin    - mfojtik    - liggitt    - jcantrill diff --git a/roles/openshift_examples/files/examples/v3.7/image-streams/dotnet_imagestreams.json b/roles/openshift_examples/files/examples/v3.7/image-streams/dotnet_imagestreams.json index 857ffa980..ee753966f 100644 --- a/roles/openshift_examples/files/examples/v3.7/image-streams/dotnet_imagestreams.json +++ b/roles/openshift_examples/files/examples/v3.7/image-streams/dotnet_imagestreams.json @@ -14,7 +14,7 @@              "metadata": {                  "name": "dotnet",                  "annotations": { -                    "openshift.io/display-name": ".NET Core" +                    "openshift.io/display-name": ".NET Core Builder Images"                  }              },              "spec": { @@ -23,17 +23,35 @@                          "name": "latest",                          "annotations": {                            "openshift.io/display-name": ".NET Core (Latest)", -                          "description": "Build and run .NET Core applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/1.1/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of .NET Core available on OpenShift, including major versions updates.", +                          "description": "Build and run .NET Core applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/2.0/build/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of .NET Core available on OpenShift, including major versions updates.",                            "iconClass": "icon-dotnet",                            "tags": "builder,.net,dotnet,dotnetcore",                            "supports":"dotnet",                            "sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore-ex.git",                            "sampleContextDir": "app", -                          "sampleRef": "dotnetcore-1.1" +                          "sampleRef": "dotnetcore-2.0"                          },                          "from": {                            "kind": "ImageStreamTag", -                          "name": "1.1" +                          "name": "2.0" +                        } +                    }, +                    { +                        "name": "2.0", +                        "annotations": { +                            "openshift.io/display-name": ".NET Core 2.0", +                            "description": "Build and run .NET Core 2.0 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/2.0/build/README.md.", +                            "iconClass": "icon-dotnet", +                            "tags": "builder,.net,dotnet,dotnetcore,rh-dotnet20", +                            "supports":"dotnet:2.0,dotnet", +                            "sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore-ex.git", +                            "sampleContextDir": "app", +                            "sampleRef": "dotnetcore-2.0", +                            "version": "2.0" +                        }, +                        "from": { +                          "kind": "DockerImage", +                          "name": "registry.access.redhat.com/dotnet/dotnet-20-rhel7:2.0"                          }                      },                      { @@ -74,6 +92,49 @@                      }                  ]              } +        }, +        { +            "kind": "ImageStream", +            "apiVersion": "v1", +            "metadata": { +                "name": "dotnet-runtime", +                "annotations": { +                    "openshift.io/display-name": ".NET Core Runtime Images" +                } +            }, +            "spec": { +                "tags": [ +                    { +                        "name": "latest", +                        "annotations": { +                          "openshift.io/display-name": ".NET Core Runtime (Latest)", +                          "description": "Run .NET Core applications on RHEL 7. For more information about using this image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/2.0/runtime/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of .NET Core Runtime available on OpenShift, including major versions updates.", +                          "iconClass": "icon-dotnet", +                          "tags": "runtime,.net-runtime,dotnet-runtime,dotnetcore-runtime", +                          "supports":"dotnet-runtime" +                        }, +                        "from": { +                          "kind": "ImageStreamTag", +                          "name": "2.0" +                        } +                    }, +                    { +                        "name": "2.0", +                        "annotations": { +                            "openshift.io/display-name": ".NET Core 2.0 Runtime", +                            "description": "Run .NET Core applications on RHEL 7. For more information about using this image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/2.0/runtime/README.md.", +                            "iconClass": "icon-dotnet", +                            "tags": "runtime,.net-runtime,dotnet-runtime,dotnetcore-runtime", +                            "supports":"dotnet-runtime", +                            "version": "2.0" +                        }, +                        "from": { +                          "kind": "DockerImage", +                          "name": "registry.access.redhat.com/dotnet/dotnet-20-runtime-rhel7:2.0" +                        } +                    } +                ] +            }          }      ]  } diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql-persistent.json index 6d987ee33..289f809fa 100644 --- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql-persistent.json @@ -89,7 +89,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to build the application" +          "description": "Defines how to build the application", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { @@ -148,7 +149,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to deploy the application server" +          "description": "Defines how to deploy the application server", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql.json index fb2ef206e..0562982b3 100644 --- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql.json +++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql.json @@ -89,7 +89,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to build the application" +          "description": "Defines how to build the application", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { @@ -148,7 +149,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to deploy the application server" +          "description": "Defines how to deploy the application server", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql-persistent.json index 7ffb25e14..7a3875d09 100644 --- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql-persistent.json @@ -87,7 +87,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to build the application" +          "description": "Defines how to build the application", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { @@ -146,7 +147,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to deploy the application server" +          "description": "Defines how to deploy the application server", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql.json index d787e376b..399ec72a8 100644 --- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql.json +++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql.json @@ -87,7 +87,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to build the application" +          "description": "Defines how to build the application", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { @@ -146,7 +147,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to deploy the application server" +          "description": "Defines how to deploy the application server", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql-persistent.json index a2070207b..e37f7a492 100644 --- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql-persistent.json @@ -87,7 +87,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to build the application" +          "description": "Defines how to build the application", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { @@ -146,7 +147,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to deploy the application server" +          "description": "Defines how to deploy the application server", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql.json index 0d33c6e0e..965c2ebfe 100644 --- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql.json +++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql.json @@ -87,7 +87,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to build the application" +          "description": "Defines how to build the application", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { @@ -146,7 +147,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to deploy the application server" +          "description": "Defines how to deploy the application server", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dotnet-example.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dotnet-example.json index af46579c8..f1fef3093 100644 --- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dotnet-example.json +++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dotnet-example.json @@ -249,7 +249,7 @@              "displayName": ".NET builder",              "required": true,              "description": "The image stream tag which is used to build the code.", -            "value": "dotnet:1.0" +            "value": "dotnet:2.0"          },          {              "name": "NAMESPACE", @@ -269,7 +269,7 @@              "name": "SOURCE_REPOSITORY_REF",              "displayName": "Git Reference",              "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.", -            "value": "dotnetcore-1.0" +            "value": "dotnetcore-2.0"          },          {              "name": "CONTEXT_DIR", @@ -299,7 +299,7 @@          {              "name": "DOTNET_STARTUP_PROJECT",              "displayName": "Startup Project", -            "description": "Set this to the folder containing your startup project.", +            "description": "Set this to a project file (e.g. csproj) or a folder containing a single project file.",              "value": "app"          },          { diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dotnet-pgsql-persistent.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dotnet-pgsql-persistent.json index a2b59c2d3..c83132152 100644 --- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dotnet-pgsql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dotnet-pgsql-persistent.json @@ -455,7 +455,7 @@              "displayName": ".NET builder",              "required": true,              "description": "The image stream tag which is used to build the code.", -            "value": "dotnet:1.1" +            "value": "dotnet:2.0"          },          {              "name": "NAMESPACE", @@ -475,7 +475,7 @@              "name": "SOURCE_REPOSITORY_REF",              "displayName": "Git Reference",              "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.", -            "value": "rel/1.1-example" +            "value": "rel/2.0-example"          },          {              "name": "CONTEXT_DIR", @@ -485,7 +485,7 @@          {              "name": "DOTNET_STARTUP_PROJECT",              "displayName": "Startup Project", -            "description": "Set this to the folder containing your startup project.", +            "description": "Set this to a project file (e.g. csproj) or a folder containing a single project file.",              "value": "samples/MusicStore"          },          { diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dotnet-runtime-example.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dotnet-runtime-example.json new file mode 100644 index 000000000..e1dccf290 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dotnet-runtime-example.json @@ -0,0 +1,412 @@ +{ +    "kind": "Template", +    "apiVersion": "v1", +    "metadata": { +        "name": "dotnet-runtime-example", +        "annotations": { +            "openshift.io/display-name": ".NET Core Runtime Example", +            "description": "An example .NET Core Runtime example application.", +            "tags": "quickstart,dotnet,.net", +            "iconClass": "icon-dotnet", +            "template.openshift.io/provider-display-name": "Red Hat, Inc.", +            "template.openshift.io/documentation-url": "https://github.com/redhat-developer/s2i-dotnetcore", +            "template.openshift.io/support-url": "https://access.redhat.com" +        } +    }, +    "objects": [ +        { +            "kind": "Route", +            "apiVersion": "v1", +            "metadata": { +                "name": "${NAME}" +            }, +            "spec": { +                "host": "${APPLICATION_DOMAIN}", +                "to": { +                    "kind": "Service", +                    "name": "${NAME}" +                } +            } +        }, +        { +            "kind": "Service", +            "apiVersion": "v1", +            "metadata": { +                "name": "${NAME}", +                "annotations": { +                    "description": "Exposes and load balances the application pods" +                } +            }, +            "spec": { +                "ports": [ +                    { +                        "name": "web", +                        "port": 8080, +                        "targetPort": 8080 +                    } +                ], +                "selector": { +                    "name": "${NAME}" +                } +            } +        }, +        { +            "kind": "ImageStream", +            "apiVersion": "v1", +            "metadata": { +                "name": "${NAME}", +                "annotations": { +                    "description": "Keeps track of changes in the application runtime image" +                } +            } +        }, +        { +            "kind": "ImageStream", +            "apiVersion": "v1", +            "metadata": { +                "name": "${NAME}-build", +                "annotations": { +                    "description": "Keeps track of changes in the application builder image" +                } +            } +        }, +        { +            "kind": "BuildConfig", +            "apiVersion": "v1", +            "metadata": { +                "name": "${NAME}-build", +                "annotations": { +                    "description": "Defines how to build the application" +                } +            }, +            "spec": { +                "source": { +                    "type": "Git", +                    "git": { +                        "uri": "${SOURCE_REPOSITORY_URL}", +                        "ref": "${SOURCE_REPOSITORY_REF}" +                    }, +                    "contextDir": "${CONTEXT_DIR}" +                }, +                "strategy": { +                    "type": "Source", +                    "sourceStrategy": { +                        "from": { +                            "kind": "ImageStreamTag", +                            "namespace": "${NAMESPACE}", +                            "name": "${DOTNET_BUILD_IMAGE_STREAM_TAG}" +                        }, +                        "env": [ +                            { +                                "name": "DOTNET_STARTUP_PROJECT", +                                "value": "${DOTNET_STARTUP_PROJECT}" +                            }, +                            { +                                "name": "DOTNET_ASSEMBLY_NAME", +                                "value": "${DOTNET_ASSEMBLY_NAME}" +                            }, +                            { +                                "name": "DOTNET_NPM_TOOLS", +                                "value": "${DOTNET_NPM_TOOLS}" +                            }, +                            { +                                "name": "DOTNET_TEST_PROJECTS", +                                "value": "${DOTNET_TEST_PROJECTS}" +                            }, +                            { +                                "name": "DOTNET_CONFIGURATION", +                                "value": "${DOTNET_CONFIGURATION}" +                            }, +                            { +                                "name": "DOTNET_RESTORE_SOURCES", +                                "value": "${DOTNET_RESTORE_SOURCES}" +                            }, +                            { +                                "name": "DOTNET_PACK", +                                "value": "true" +                            } +                        ] +                    } +                }, +                "output": { +                    "to": { +                        "kind": "ImageStreamTag", +                        "name": "${NAME}-build:latest" +                    } +                }, +                "triggers": [ +                    { +                        "type": "ImageChange" +                    }, +                    { +                        "type": "ConfigChange" +                    }, +                    { +                        "type": "GitHub", +                        "github": { +                            "secret": "${GITHUB_WEBHOOK_SECRET}" +                        } +                    }, +                    { +                        "type": "Generic", +                        "generic": { +                            "secret": "${GENERIC_WEBHOOK_SECRET}" +                        } +                    } +                ] +            } +        }, +        { +            "kind": "BuildConfig", +            "apiVersion": "v1", +            "metadata": { +                "name": "${NAME}-runtime", +                "annotations": { +                    "description": "Defines how to chain the runtime image from the source build image" +                } +            }, +            "spec": { +                "source": { +                    "dockerfile": "FROM ${DOTNET_RUNTIME_IMAGE_STREAM_TAG}\nADD app.tar.gz .", +                    "images": [ +                        { +                            "from": { +                                "kind": "ImageStreamTag", +                                "name": "${NAME}-build:latest" +                            }, +                            "paths": [ +                                { +                                    "sourcePath": "/opt/app-root/app.tar.gz", +                                    "destinationDir": "." +                                } +                            ] +                        } +                    ] +                }, +                "strategy": { +                    "type": "Docker", +                    "dockerStrategy": { +                        "from": { +                            "kind": "ImageStreamTag", +                            "namespace": "${NAMESPACE}", +                            "name": "${DOTNET_RUNTIME_IMAGE_STREAM_TAG}" +                        } +                    } +                }, +                "output": { +                    "to": { +                        "kind": "ImageStreamTag", +                        "name": "${NAME}:latest" +                    } +                }, +                "triggers": [ +                    { +                        "type": "ImageChange" +                    }, +                    { +                        "type": "ConfigChange" +                    }, +                    { +                        "type": "ImageChange", +                        "imageChange": { +                            "from": { +                                "kind": "ImageStreamTag", +                                "name": "${NAME}-build:latest" +                            } +                        } +                    } +                ] +            } +        }, +        { +            "kind": "DeploymentConfig", +            "apiVersion": "v1", +            "metadata": { +                "name": "${NAME}-runtime", +                "annotations": { +                    "description": "Defines how to deploy the application server" +                } +            }, +            "spec": { +                "strategy": { +                    "type": "Rolling" +                }, +                "triggers": [ +                    { +                        "type": "ImageChange", +                        "imageChangeParams": { +                            "automatic": true, +                            "containerNames": [ +                                "dotnet-runtime-app" +                            ], +                            "from": { +                                "kind": "ImageStreamTag", +                                "name": "${NAME}:latest" +                            } +                        } +                    }, +                    { +                        "type": "ConfigChange" +                    } +                ], +                "replicas": 1, +                "selector": { +                    "name": "${NAME}" +                }, +                "template": { +                    "metadata": { +                        "name": "${NAME}", +                        "labels": { +                            "name": "${NAME}" +                        } +                    }, +                    "spec": { +                        "containers": [ +                            { +                                "name": "dotnet-runtime-app", +                                "image": " ", +                                "ports": [ +                                    { +                                        "containerPort": 8080 +                                    } +                                ], +                                "livenessProbe": { +                                    "httpGet": { +                                        "path": "/", +                                        "port": 8080, +                                        "scheme": "HTTP" +                                    }, +                                    "initialDelaySeconds": 40, +                                    "timeoutSeconds": 15 +                                }, +                                "readinessProbe": { +                                    "httpGet": { +                                        "path": "/", +                                        "port": 8080, +                                        "scheme": "HTTP" +                                    }, +                                    "initialDelaySeconds": 10, +                                    "timeoutSeconds": 30 +                                }, +                                "resources": { +                                    "limits": { +                                        "memory": "${MEMORY_LIMIT}" +                                    } +                                }, +                                "env": [] +                            } +                        ] +                    } +                } +            } +        } +    ], +    "parameters": [ +        { +            "name": "NAME", +            "displayName": "Name", +            "description": "The name assigned to all of the frontend objects defined in this template.", +            "required": true, +            "value": "dotnet-runtime-example" +        }, +        { +            "name": "MEMORY_LIMIT", +            "displayName": "Memory Limit", +            "description": "Maximum amount of memory the container can use.", +            "required": true, +            "value": "512Mi" +        }, +        { +            "name": "DOTNET_RUNTIME_IMAGE_STREAM_TAG", +            "displayName": ".NET Runtime Imagestream Tag", +            "description": "The image stream tag which is used to run the application.", +            "required": true, +            "value": "dotnet-runtime:2.0" +        }, +        { +            "name": "DOTNET_BUILD_IMAGE_STREAM_TAG", +            "displayName": ".NET builder", +            "required": true, +            "description": "The image stream tag which is used to build the application.", +            "value": "dotnet:2.0" +        }, +        { +            "name": "NAMESPACE", +            "displayName": "Namespace", +            "description": "The OpenShift Namespace where the ImageStream resides.", +            "required": true, +            "value": "openshift" +        }, +        { +            "name": "APPLICATION_DOMAIN", +            "displayName": "Application Hostname", +            "description": "The exposed hostname that will route to the .NET Core service, if left blank a value will be defaulted.", +            "value": "" +        }, +        { +            "name": "SOURCE_REPOSITORY_URL", +            "displayName": "Git Repository URL", +            "description": "The URL of the repository with your application source code.", +            "required": true, +            "value": "https://github.com/redhat-developer/s2i-dotnetcore-ex.git" +        }, +        { +            "name": "SOURCE_REPOSITORY_REF", +            "displayName": "Git Reference", +            "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.", +            "value": "dotnetcore-2.0" +        }, +        { +            "name": "CONTEXT_DIR", +            "displayName": "Context Directory", +            "description": "Set this to use a subdirectory of the source code repository" +        }, +        { +            "name": "GITHUB_WEBHOOK_SECRET", +            "displayName": "GitHub Webhook Secret", +            "description": "A secret string used to configure the GitHub webhook.", +            "generate": "expression", +            "from": "[a-zA-Z0-9]{40}" +        }, +        { +            "name": "GENERIC_WEBHOOK_SECRET", +            "displayName": "Generic Webhook Secret", +            "description": "A secret string used to configure the Generic webhook.", +            "generate": "expression", +            "from": "[a-zA-Z0-9]{40}" +        }, +        { +            "name": "DOTNET_STARTUP_PROJECT", +            "displayName": "Startup Project", +            "description": "Set this to the folder containing your startup project.", +            "value": "app" +        }, +        { +            "name": "DOTNET_ASSEMBLY_NAME", +            "displayName": "Startup Assembly", +            "description": "Set this when the assembly name is overridden in the project file." +        }, +        { +            "name": "DOTNET_NPM_TOOLS", +            "displayName": "Npm Tools", +            "description": "Set this to a space separated list of npm tools needed to publish.", +            "value": "bower gulp" +        }, +        { +            "name": "DOTNET_TEST_PROJECTS", +            "displayName": "Test projects", +            "description": "Set this to a space separated list of test projects to run before publishing." +        }, +        { +            "name": "DOTNET_CONFIGURATION", +            "displayName": "Configuration", +            "description": "Set this to configuration (Release/Debug).", +            "value": "Release" +        }, +        { +            "name": "DOTNET_RESTORE_SOURCES", +            "displayName": "NuGet package sources", +            "description": "Set this to override the NuGet.config sources." +        } +    ] +} diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/httpd.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/httpd.json index ac671cc06..6cf9d76eb 100644 --- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/httpd.json +++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/httpd.json @@ -74,7 +74,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to build the application" +          "description": "Defines how to build the application", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { @@ -130,7 +131,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to deploy the application server" +          "description": "Defines how to deploy the application server", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-ephemeral-template.json index ce96684a9..62f43bc0b 100644 --- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-ephemeral-template.json @@ -3,7 +3,6 @@    "apiVersion": "v1",    "metadata": {      "name": "jenkins-ephemeral", -    "creationTimestamp": null,      "annotations": {        "openshift.io/display-name": "Jenkins (Ephemeral)",        "description": "Jenkins service, without persistent storage.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.", @@ -22,7 +21,6 @@        "apiVersion": "v1",        "metadata": {          "name": "${JENKINS_SERVICE_NAME}", -        "creationTimestamp": null,          "annotations": {            "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"          } @@ -43,7 +41,9 @@        "apiVersion": "v1",        "metadata": {          "name": "${JENKINS_SERVICE_NAME}", -        "creationTimestamp": null +        "annotations": { +          "template.alpha.openshift.io/wait-for-ready": "true" +        }        },        "spec": {          "strategy": { @@ -75,7 +75,6 @@          },          "template": {            "metadata": { -            "creationTimestamp": null,              "labels": {                "name": "${JENKINS_SERVICE_NAME}"              } @@ -221,8 +220,7 @@           "annotations": {             "service.alpha.openshift.io/dependencies": "[{\"name\": \"${JNLP_SERVICE_NAME}\", \"namespace\": \"\", \"kind\": \"Service\"}]",             "service.openshift.io/infrastructure": "true" -         }, -         "creationTimestamp": null +         }         },         "spec": {           "ports": [ diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-persistent-template.json index 34b2b920b..e9068e455 100644 --- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-persistent-template.json @@ -3,7 +3,6 @@    "apiVersion": "v1",    "metadata": {      "name": "jenkins-persistent", -    "creationTimestamp": null,      "annotations": {        "openshift.io/display-name": "Jenkins (Persistent)",        "description": "Jenkins service, with persistent storage.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.", @@ -22,7 +21,6 @@        "apiVersion": "v1",        "metadata": {          "name": "${JENKINS_SERVICE_NAME}", -        "creationTimestamp": null,          "annotations": {            "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"          } @@ -60,7 +58,9 @@        "apiVersion": "v1",        "metadata": {          "name": "${JENKINS_SERVICE_NAME}", -        "creationTimestamp": null +        "annotations": { +          "template.alpha.openshift.io/wait-for-ready": "true" +        }        },        "spec": {          "strategy": { @@ -92,7 +92,6 @@          },          "template": {            "metadata": { -            "creationTimestamp": null,              "labels": {                "name": "${JENKINS_SERVICE_NAME}"              } @@ -238,8 +237,7 @@           "annotations": {             "service.alpha.openshift.io/dependencies": "[{\"name\": \"${JNLP_SERVICE_NAME}\", \"namespace\": \"\", \"kind\": \"Service\"}]",             "service.openshift.io/infrastructure": "true" -         }, -         "creationTimestamp": null +         }         },         "spec": {           "ports": [ diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb-persistent.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb-persistent.json index a9c365361..df3704b9f 100644 --- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb-persistent.json +++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb-persistent.json @@ -87,7 +87,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to build the application" +          "description": "Defines how to build the application", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { @@ -152,7 +153,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to deploy the application server" +          "description": "Defines how to deploy the application server", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb.json index 53a6147d5..eb6ab33d9 100644 --- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb.json +++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb.json @@ -87,7 +87,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to build the application" +          "description": "Defines how to build the application", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { @@ -152,7 +153,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to deploy the application server" +          "description": "Defines how to deploy the application server", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql-persistent.json index f07a43071..59e2e41ea 100644 --- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql-persistent.json @@ -93,7 +93,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to build the application" +          "description": "Defines how to build the application", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { @@ -152,7 +153,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to deploy the application server" +          "description": "Defines how to deploy the application server", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql.json index a7992c988..b3d080a91 100644 --- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql.json +++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql.json @@ -93,7 +93,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to build the application" +          "description": "Defines how to build the application", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { @@ -152,7 +153,8 @@        "metadata": {          "name": "${NAME}",          "annotations": { -          "description": "Defines how to deploy the application server" +          "description": "Defines how to deploy the application server", +          "template.alpha.openshift.io/wait-for-ready": "true"          }        },        "spec": { diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 844d77255..251d1dfb4 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -193,7 +193,9 @@ def hostname_valid(hostname):      """      if (not hostname or              hostname.startswith('localhost') or -            hostname.endswith('localdomain')): +            hostname.endswith('localdomain') or +            # OpenShift will not allow a node with more than 63 chars in name. +            len(hostname) > 63):          return False      return True diff --git a/roles/openshift_health_checker/action_plugins/openshift_health_check.py b/roles/openshift_health_checker/action_plugins/openshift_health_check.py index 05e53333d..8d35db6b5 100644 --- a/roles/openshift_health_checker/action_plugins/openshift_health_check.py +++ b/roles/openshift_health_checker/action_plugins/openshift_health_check.py @@ -1,76 +1,74 @@  """  Ansible action plugin to execute health checks in OpenShift clusters.  """ -# pylint: disable=wrong-import-position,missing-docstring,invalid-name  import sys  import os +import traceback  from collections import defaultdict +from ansible.plugins.action import ActionBase +from ansible.module_utils.six import string_types +  try:      from __main__ import display  except ImportError: +    # pylint: disable=ungrouped-imports; this is the standard way how to import +    # the default display object in Ansible action plugins.      from ansible.utils.display import Display      display = Display() -from ansible.plugins.action import ActionBase -from ansible.module_utils.six import string_types -  # Augment sys.path so that we can import checks from a directory relative to  # this callback plugin.  sys.path.insert(1, os.path.dirname(os.path.dirname(__file__))) +# pylint: disable=wrong-import-position; the import statement must come after +# the manipulation of sys.path.  from openshift_checks import OpenShiftCheck, OpenShiftCheckException, load_checks  # noqa: E402  class ActionModule(ActionBase): +    """Action plugin to execute health checks."""      def run(self, tmp=None, task_vars=None):          result = super(ActionModule, self).run(tmp, task_vars)          task_vars = task_vars or {} -        # vars are not supportably available in the callback plugin, -        # so record any it will need in the result. +        # callback plugins cannot read Ansible vars, but we would like +        # zz_failure_summary to have access to certain values. We do so by +        # storing the information we need in the result.          result['playbook_context'] = task_vars.get('r_openshift_health_checker_playbook_context') -        if "openshift" not in task_vars: -            result["failed"] = True -            result["msg"] = "'openshift' is undefined, did 'openshift_facts' run?" -            return result -          try:              known_checks = self.load_known_checks(tmp, task_vars)              args = self._task.args              requested_checks = normalize(args.get('checks', [])) + +            if not requested_checks: +                result['failed'] = True +                result['msg'] = list_known_checks(known_checks) +                return result +              resolved_checks = resolve_checks(requested_checks, known_checks.values()) -        except OpenShiftCheckException as e: +        except OpenShiftCheckException as exc:              result["failed"] = True -            result["msg"] = str(e) +            result["msg"] = str(exc) +            return result + +        if "openshift" not in task_vars: +            result["failed"] = True +            result["msg"] = "'openshift' is undefined, did 'openshift_facts' run?"              return result          result["checks"] = check_results = {}          user_disabled_checks = normalize(task_vars.get('openshift_disable_check', [])) -        for check_name in resolved_checks: -            display.banner("CHECK [{} : {}]".format(check_name, task_vars["ansible_host"])) -            check = known_checks[check_name] - -            if not check.is_active(): -                r = dict(skipped=True, skipped_reason="Not active for this host") -            elif check_name in user_disabled_checks: -                r = dict(skipped=True, skipped_reason="Disabled by user request") -            else: -                try: -                    r = check.run() -                except OpenShiftCheckException as e: -                    r = dict( -                        failed=True, -                        msg=str(e), -                    ) - +        for name in resolved_checks: +            display.banner("CHECK [{} : {}]".format(name, task_vars["ansible_host"])) +            check = known_checks[name] +            check_results[name] = run_check(name, check, user_disabled_checks)              if check.changed: -                r["changed"] = True -            check_results[check_name] = r +                check_results[name]["changed"] = True          result["changed"] = any(r.get("changed") for r in check_results.values())          if any(r.get("failed") for r in check_results.values()): @@ -80,22 +78,55 @@ class ActionModule(ActionBase):          return result      def load_known_checks(self, tmp, task_vars): +        """Find all existing checks and return a mapping of names to instances."""          load_checks()          known_checks = {}          for cls in OpenShiftCheck.subclasses(): -            check_name = cls.name -            if check_name in known_checks: -                other_cls = known_checks[check_name].__class__ +            name = cls.name +            if name in known_checks: +                other_cls = known_checks[name].__class__                  raise OpenShiftCheckException( -                    "non-unique check name '{}' in: '{}.{}' and '{}.{}'".format( -                        check_name, -                        cls.__module__, cls.__name__, -                        other_cls.__module__, other_cls.__name__)) -            known_checks[check_name] = cls(execute_module=self._execute_module, tmp=tmp, task_vars=task_vars) +                    "duplicate check name '{}' in: '{}' and '{}'" +                    "".format(name, full_class_name(cls), full_class_name(other_cls)) +                ) +            known_checks[name] = cls(execute_module=self._execute_module, tmp=tmp, task_vars=task_vars)          return known_checks +def list_known_checks(known_checks): +    """Return text listing the existing checks and tags.""" +    # TODO: we could include a description of each check by taking it from a +    # check class attribute (e.g., __doc__) when building the message below. +    msg = ( +        'This playbook is meant to run health checks, but no checks were ' +        'requested. Set the `openshift_checks` variable to a comma-separated ' +        'list of check names or a YAML list. Available checks:\n  {}' +    ).format('\n  '.join(sorted(known_checks))) + +    tags = describe_tags(known_checks.values()) + +    msg += ( +        '\n\nTags can be used as a shortcut to select multiple ' +        'checks. Available tags and the checks they select:\n  {}' +    ).format('\n  '.join(tags)) + +    return msg + + +def describe_tags(check_classes): +    """Return a sorted list of strings describing tags and the checks they include.""" +    tag_checks = defaultdict(list) +    for cls in check_classes: +        for tag in cls.tags: +            tag_checks[tag].append(cls.name) +    tags = [ +        '@{} = {}'.format(tag, ','.join(sorted(checks))) +        for tag, checks in tag_checks.items() +    ] +    return sorted(tags) + +  def resolve_checks(names, all_checks):      """Returns a set of resolved check names. @@ -123,6 +154,12 @@ def resolve_checks(names, all_checks):          if unknown_tag_names:              msg.append('Unknown tag names: {}.'.format(', '.join(sorted(unknown_tag_names))))          msg.append('Make sure there is no typo in the playbook and no files are missing.') +        # TODO: implement a "Did you mean ...?" when the input is similar to a +        # valid check or tag. +        msg.append('Known checks:') +        msg.append('  {}'.format('\n  '.join(sorted(known_check_names)))) +        msg.append('Known tags:') +        msg.append('  {}'.format('\n  '.join(describe_tags(all_checks))))          raise OpenShiftCheckException('\n'.join(msg))      tag_to_checks = defaultdict(set) @@ -146,3 +183,32 @@ def normalize(checks):      if isinstance(checks, string_types):          checks = checks.split(',')      return [name.strip() for name in checks if name.strip()] + + +def run_check(name, check, user_disabled_checks): +    """Run a single check if enabled and return a result dict.""" +    if name in user_disabled_checks: +        return dict(skipped=True, skipped_reason="Disabled by user request") + +    # pylint: disable=broad-except; capturing exceptions broadly is intentional, +    # to isolate arbitrary failures in one check from others. +    try: +        is_active = check.is_active() +    except Exception as exc: +        reason = "Could not determine if check should be run, exception: {}".format(exc) +        return dict(skipped=True, skipped_reason=reason, exception=traceback.format_exc()) + +    if not is_active: +        return dict(skipped=True, skipped_reason="Not active for this host") + +    try: +        return check.run() +    except OpenShiftCheckException as exc: +        return dict(failed=True, msg=str(exc)) +    except Exception as exc: +        return dict(failed=True, msg=str(exc), exception=traceback.format_exc()) + + +def full_class_name(cls): +    """Return the name of a class prefixed with its module name.""" +    return '{}.{}'.format(cls.__module__, cls.__name__) diff --git a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py index d10200719..349655966 100644 --- a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py +++ b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py @@ -1,161 +1,223 @@ -""" -Ansible callback plugin to give a nicely formatted summary of failures. -""" +"""Ansible callback plugin to print a nicely formatted summary of failures. -# Reason: In several locations below we disable pylint protected-access -#         for Ansible objects that do not give us any public way -#         to access the full details we need to report check failures. -# Status: disabled permanently or until Ansible object has a public API. -# This does leave the code more likely to be broken by future Ansible changes. +The file / module name is prefixed with `zz_` to make this plugin be loaded last +by Ansible, thus making its output the last thing that users see. +""" -from pprint import pformat +from collections import defaultdict +import traceback  from ansible.plugins.callback import CallbackBase  from ansible import constants as C  from ansible.utils.color import stringc +FAILED_NO_MSG = u'Failed without returning a message.' + +  class CallbackModule(CallbackBase): -    """ -    This callback plugin stores task results and summarizes failures. -    The file name is prefixed with `zz_` to make this plugin be loaded last by -    Ansible, thus making its output the last thing that users see. -    """ +    """This callback plugin stores task results and summarizes failures."""      CALLBACK_VERSION = 2.0      CALLBACK_TYPE = 'aggregate'      CALLBACK_NAME = 'failure_summary'      CALLBACK_NEEDS_WHITELIST = False -    _playbook_file = None      def __init__(self):          super(CallbackModule, self).__init__()          self.__failures = [] +        self.__playbook_file = ''      def v2_playbook_on_start(self, playbook):          super(CallbackModule, self).v2_playbook_on_start(playbook) -        # re: playbook attrs see top comment  # pylint: disable=protected-access -        self._playbook_file = playbook._file_name +        # pylint: disable=protected-access; Ansible gives us no public API to +        # get the file name of the current playbook from a callback plugin. +        self.__playbook_file = playbook._file_name      def v2_runner_on_failed(self, result, ignore_errors=False):          super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors)          if not ignore_errors: -            self.__failures.append(dict(result=result, ignore_errors=ignore_errors)) +            self.__failures.append(result)      def v2_playbook_on_stats(self, stats):          super(CallbackModule, self).v2_playbook_on_stats(stats) -        if self.__failures: -            self._print_failure_details(self.__failures) - -    def _print_failure_details(self, failures): -        """Print a summary of failed tasks or checks.""" -        self._display.display(u'\nFailure summary:\n') - -        width = len(str(len(failures))) -        initial_indent_format = u'  {{:>{width}}}. '.format(width=width) -        initial_indent_len = len(initial_indent_format.format(0)) -        subsequent_indent = u' ' * initial_indent_len -        subsequent_extra_indent = u' ' * (initial_indent_len + 10) - -        for i, failure in enumerate(failures, 1): -            entries = _format_failure(failure) -            self._display.display(u'\n{}{}'.format(initial_indent_format.format(i), entries[0])) -            for entry in entries[1:]: -                entry = entry.replace(u'\n', u'\n' + subsequent_extra_indent) -                indented = u'{}{}'.format(subsequent_indent, entry) -                self._display.display(indented) - -        failed_checks = set() -        playbook_context = None -        # re: result attrs see top comment  # pylint: disable=protected-access -        for failure in failures: -            # Get context from check task result since callback plugins cannot access task vars. -            # NOTE: thus context is not known unless checks run. Failures prior to checks running -            # don't have playbook_context in the results. But we only use it now when checks fail. -            playbook_context = playbook_context or failure['result']._result.get('playbook_context') -            failed_checks.update( -                name -                for name, result in failure['result']._result.get('checks', {}).items() -                if result.get('failed') -            ) -        if failed_checks: -            self._print_check_failure_summary(failed_checks, playbook_context) - -    def _print_check_failure_summary(self, failed_checks, context): -        checks = ','.join(sorted(failed_checks)) -        # The purpose of specifying context is to vary the output depending on what the user was -        # expecting to happen (based on which playbook they ran). The only use currently is to -        # vary the message depending on whether the user was deliberately running checks or was -        # trying to install/upgrade and checks are just included. Other use cases may arise. -        summary = (  # default to explaining what checks are in the first place -            '\n' -            'The execution of "{playbook}"\n' -            'includes checks designed to fail early if the requirements\n' -            'of the playbook are not met. One or more of these checks\n' -            'failed. To disregard these results, you may choose to\n' -            'disable failing checks by setting an Ansible variable:\n\n' -            '   openshift_disable_check={checks}\n\n' -            'Failing check names are shown in the failure details above.\n' -            'Some checks may be configurable by variables if your requirements\n' -            'are different from the defaults; consult check documentation.\n' -            'Variables can be set in the inventory or passed on the\n' -            'command line using the -e flag to ansible-playbook.\n\n' -        ).format(playbook=self._playbook_file, checks=checks) -        if context in ['pre-install', 'health']: -            summary = (  # user was expecting to run checks, less explanation needed -                '\n' -                'You may choose to configure or disable failing checks by\n' -                'setting Ansible variables. To disable those above:\n\n' -                '    openshift_disable_check={checks}\n\n' -                'Consult check documentation for configurable variables.\n' -                'Variables can be set in the inventory or passed on the\n' -                'command line using the -e flag to ansible-playbook.\n\n' -            ).format(checks=checks) -        self._display.display(summary) - - -# re: result attrs see top comment  # pylint: disable=protected-access -def _format_failure(failure): +        # pylint: disable=broad-except; capturing exceptions broadly is +        # intentional, to isolate arbitrary failures in this callback plugin. +        try: +            if self.__failures: +                self._display.display(failure_summary(self.__failures, self.__playbook_file)) +        except Exception: +            msg = stringc( +                u'An error happened while generating a summary of failures:\n' +                u'{}'.format(traceback.format_exc()), C.COLOR_WARN) +            self._display.v(msg) + + +def failure_summary(failures, playbook): +    """Return a summary of failed tasks, including details on health checks.""" +    if not failures: +        return u'' + +    # NOTE: because we don't have access to task_vars from callback plugins, we +    # store the playbook context in the task result when the +    # openshift_health_check action plugin is used, and we use this context to +    # customize the error message. +    # pylint: disable=protected-access; Ansible gives us no sufficient public +    # API on TaskResult objects. +    context = next(( +        context for context in +        (failure._result.get('playbook_context') for failure in failures) +        if context +    ), None) + +    failures = [failure_to_dict(failure) for failure in failures] +    failures = deduplicate_failures(failures) + +    summary = [u'', u'', u'Failure summary:', u''] + +    width = len(str(len(failures))) +    initial_indent_format = u'  {{:>{width}}}. '.format(width=width) +    initial_indent_len = len(initial_indent_format.format(0)) +    subsequent_indent = u' ' * initial_indent_len +    subsequent_extra_indent = u' ' * (initial_indent_len + 10) + +    for i, failure in enumerate(failures, 1): +        entries = format_failure(failure) +        summary.append(u'\n{}{}'.format(initial_indent_format.format(i), entries[0])) +        for entry in entries[1:]: +            entry = entry.replace(u'\n', u'\n' + subsequent_extra_indent) +            indented = u'{}{}'.format(subsequent_indent, entry) +            summary.append(indented) + +    failed_checks = set() +    for failure in failures: +        failed_checks.update(name for name, message in failure['checks']) +    if failed_checks: +        summary.append(check_failure_footer(failed_checks, context, playbook)) + +    return u'\n'.join(summary) + + +def failure_to_dict(failed_task_result): +    """Extract information out of a failed TaskResult into a dict. + +    The intent is to transform a TaskResult object into something easier to +    manipulate. TaskResult is ansible.executor.task_result.TaskResult. +    """ +    # pylint: disable=protected-access; Ansible gives us no sufficient public +    # API on TaskResult objects. +    _result = failed_task_result._result +    return { +        'host': failed_task_result._host.get_name(), +        'play': play_name(failed_task_result._task), +        'task': failed_task_result.task_name, +        'msg': _result.get('msg', FAILED_NO_MSG), +        'checks': tuple( +            (name, result.get('msg', FAILED_NO_MSG)) +            for name, result in sorted(_result.get('checks', {}).items()) +            if result.get('failed') +        ), +    } + + +def play_name(obj): +    """Given a task or block, return the name of its parent play. + +    This is loosely inspired by ansible.playbook.base.Base.dump_me. +    """ +    # pylint: disable=protected-access; Ansible gives us no sufficient public +    # API to implement this. +    if not obj: +        return '' +    if hasattr(obj, '_play'): +        return obj._play.get_name() +    return play_name(getattr(obj, '_parent')) + + +def deduplicate_failures(failures): +    """Group together similar failures from different hosts. + +    Returns a new list of failures such that identical failures from different +    hosts are grouped together in a single entry. The relative order of failures +    is preserved. +    """ +    groups = defaultdict(list) +    for failure in failures: +        group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host')) +        groups[group_key].append(failure) +    result = [] +    for failure in failures: +        group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host')) +        if group_key not in groups: +            continue +        failure['host'] = tuple(sorted(g_failure['host'] for g_failure in groups.pop(group_key))) +        result.append(failure) +    return result + + +def format_failure(failure):      """Return a list of pretty-formatted text entries describing a failure, including      relevant information about it. Expect that the list of text entries will be joined      by a newline separator when output to the user.""" -    result = failure['result'] -    host = result._host.get_name() -    play = _get_play(result._task) -    if play: -        play = play.get_name() -    task = result._task.get_name() -    msg = result._result.get('msg', u'???') +    host = u', '.join(failure['host']) +    play = failure['play'] +    task = failure['task'] +    msg = failure['msg'] +    checks = failure['checks']      fields = ( -        (u'Host', host), +        (u'Hosts', host),          (u'Play', play),          (u'Task', task),          (u'Message', stringc(msg, C.COLOR_ERROR)),      ) -    if 'checks' in result._result: -        fields += ((u'Details', _format_failed_checks(result._result['checks'])),) +    if checks: +        fields += ((u'Details', format_failed_checks(checks)),)      row_format = '{:10}{}'      return [row_format.format(header + u':', body) for header, body in fields] -def _format_failed_checks(checks): +def format_failed_checks(checks):      """Return pretty-formatted text describing checks that failed.""" -    failed_check_msgs = [] -    for check, body in checks.items(): -        if body.get('failed', False):   # only show the failed checks -            msg = body.get('msg', u"Failed without returning a message") -            failed_check_msgs.append('check "%s":\n%s' % (check, msg)) -    if failed_check_msgs: -        return stringc("\n\n".join(failed_check_msgs), C.COLOR_ERROR) -    else:    # something failed but no checks will admit to it, so dump everything -        return stringc(pformat(checks), C.COLOR_ERROR) - - -# This is inspired by ansible.playbook.base.Base.dump_me. -# re: play/task/block attrs see top comment  # pylint: disable=protected-access -def _get_play(obj): -    """Given a task or block, recursively try to find its parent play.""" -    if hasattr(obj, '_play'): -        return obj._play -    if getattr(obj, '_parent'): -        return _get_play(obj._parent) +    messages = [] +    for name, message in checks: +        messages.append(u'check "{}":\n{}'.format(name, message)) +    return stringc(u'\n\n'.join(messages), C.COLOR_ERROR) + + +def check_failure_footer(failed_checks, context, playbook): +    """Return a textual explanation about checks depending on context. + +    The purpose of specifying context is to vary the output depending on what +    the user was expecting to happen (based on which playbook they ran). The +    only use currently is to vary the message depending on whether the user was +    deliberately running checks or was trying to install/upgrade and checks are +    just included. Other use cases may arise. +    """ +    checks = ','.join(sorted(failed_checks)) +    summary = [u''] +    if context in ['pre-install', 'health', 'adhoc']: +        # User was expecting to run checks, less explanation needed. +        summary.extend([ +            u'You may configure or disable checks by setting Ansible ' +            u'variables. To disable those above, set:', +            u'    openshift_disable_check={checks}'.format(checks=checks), +            u'Consult check documentation for configurable variables.', +        ]) +    else: +        # User may not be familiar with the checks, explain what checks are in +        # the first place. +        summary.extend([ +            u'The execution of "{playbook}" includes checks designed to fail ' +            u'early if the requirements of the playbook are not met. One or ' +            u'more of these checks failed. To disregard these results,' +            u'explicitly disable checks by setting an Ansible variable:'.format(playbook=playbook), +            u'   openshift_disable_check={checks}'.format(checks=checks), +            u'Failing check names are shown in the failure details above. ' +            u'Some checks may be configurable by variables if your requirements ' +            u'are different from the defaults; consult check documentation.', +        ]) +    summary.append( +        u'Variables can be set in the inventory or passed on the command line ' +        u'using the -e flag to ansible-playbook.' +    ) +    return u'\n'.join(summary) diff --git a/roles/openshift_health_checker/test/action_plugin_test.py b/roles/openshift_health_checker/test/action_plugin_test.py index f5161d6f5..c109ebd24 100644 --- a/roles/openshift_health_checker/test/action_plugin_test.py +++ b/roles/openshift_health_checker/test/action_plugin_test.py @@ -80,7 +80,8 @@ def skipped(result):      None,      {},  ]) -def test_action_plugin_missing_openshift_facts(plugin, task_vars): +def test_action_plugin_missing_openshift_facts(plugin, task_vars, monkeypatch): +    monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])      result = plugin.run(tmp=None, task_vars=task_vars)      assert failed(result, msg_has=['openshift_facts']) @@ -94,7 +95,7 @@ def test_action_plugin_cannot_load_checks_with_the_same_name(plugin, task_vars,      result = plugin.run(tmp=None, task_vars=task_vars) -    assert failed(result, msg_has=['unique', 'duplicate_name', 'FakeCheck']) +    assert failed(result, msg_has=['duplicate', 'duplicate_name', 'FakeCheck'])  def test_action_plugin_skip_non_active_checks(plugin, task_vars, monkeypatch): @@ -217,24 +218,21 @@ def test_resolve_checks_ok(names, all_checks, expected):      assert resolve_checks(names, all_checks) == expected -@pytest.mark.parametrize('names,all_checks,words_in_exception,words_not_in_exception', [ +@pytest.mark.parametrize('names,all_checks,words_in_exception', [      (          ['testA', 'testB'],          [],          ['check', 'name', 'testA', 'testB'], -        ['tag', 'group', '@'],      ),      (          ['@group'],          [],          ['tag', 'name', 'group'], -        ['check', '@'],      ),      (          ['testA', 'testB', '@group'],          [],          ['check', 'name', 'testA', 'testB', 'tag', 'group'], -        ['@'],      ),      (          ['testA', 'testB', '@group'], @@ -244,13 +242,10 @@ def test_resolve_checks_ok(names, all_checks, expected):              fake_check('from_group_2', ['preflight', 'group']),          ],          ['check', 'name', 'testA', 'testB'], -        ['tag', 'group', '@'],      ),  ]) -def test_resolve_checks_failure(names, all_checks, words_in_exception, words_not_in_exception): +def test_resolve_checks_failure(names, all_checks, words_in_exception):      with pytest.raises(Exception) as excinfo:          resolve_checks(names, all_checks)      for word in words_in_exception:          assert word in str(excinfo.value) -    for word in words_not_in_exception: -        assert word not in str(excinfo.value) diff --git a/roles/openshift_health_checker/test/conftest.py b/roles/openshift_health_checker/test/conftest.py index 3cbd65507..244a1f0fa 100644 --- a/roles/openshift_health_checker/test/conftest.py +++ b/roles/openshift_health_checker/test/conftest.py @@ -7,5 +7,6 @@ openshift_health_checker_path = os.path.dirname(os.path.dirname(__file__))  sys.path[1:1] = [      openshift_health_checker_path,      os.path.join(openshift_health_checker_path, 'action_plugins'), +    os.path.join(openshift_health_checker_path, 'callback_plugins'),      os.path.join(openshift_health_checker_path, 'library'),  ] diff --git a/roles/openshift_health_checker/test/zz_failure_summary_test.py b/roles/openshift_health_checker/test/zz_failure_summary_test.py new file mode 100644 index 000000000..0fc258133 --- /dev/null +++ b/roles/openshift_health_checker/test/zz_failure_summary_test.py @@ -0,0 +1,70 @@ +from zz_failure_summary import deduplicate_failures + +import pytest + + +@pytest.mark.parametrize('failures,deduplicated', [ +    ( +        [ +            { +                'host': 'master1', +                'msg': 'One or more checks failed', +            }, +        ], +        [ +            { +                'host': ('master1',), +                'msg': 'One or more checks failed', +            }, +        ], +    ), +    ( +        [ +            { +                'host': 'master1', +                'msg': 'One or more checks failed', +            }, +            { +                'host': 'node1', +                'msg': 'One or more checks failed', +            }, +        ], +        [ +            { +                'host': ('master1', 'node1'), +                'msg': 'One or more checks failed', +            }, +        ], +    ), +    ( +        [ +            { +                'host': 'node1', +                'msg': 'One or more checks failed', +                'checks': (('test_check', 'error message'),), +            }, +            { +                'host': 'master2', +                'msg': 'Some error happened', +            }, +            { +                'host': 'master1', +                'msg': 'One or more checks failed', +                'checks': (('test_check', 'error message'),), +            }, +        ], +        [ +            { +                'host': ('master1', 'node1'), +                'msg': 'One or more checks failed', +                'checks': (('test_check', 'error message'),), +            }, +            { +                'host': ('master2',), +                'msg': 'Some error happened', +            }, +        ], +    ), +]) +def test_deduplicate_failures(failures, deduplicated): +    assert deduplicate_failures(failures) == deduplicated diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml index 13cbfb14e..f3747eead 100644 --- a/roles/openshift_hosted/defaults/main.yml +++ b/roles/openshift_hosted/defaults/main.yml @@ -1,9 +1,12 @@  --- -r_openshift_hosted_router_firewall_enabled: True -r_openshift_hosted_router_use_firewalld: False +r_openshift_hosted_router_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_openshift_hosted_router_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" -r_openshift_hosted_registry_firewall_enabled: True -r_openshift_hosted_registry_use_firewalld: False +r_openshift_hosted_registry_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_openshift_hosted_registry_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" + +openshift_hosted_router_wait: True +openshift_hosted_registry_wait: True  registry_volume_claim: 'registry-claim' diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml index 6f012aed1..3e424da12 100644 --- a/roles/openshift_hosted/tasks/registry/registry.yml +++ b/roles/openshift_hosted/tasks/registry/registry.yml @@ -129,34 +129,36 @@      edits: "{{ openshift_hosted_registry_edits }}"      force: "{{ True|bool in openshift_hosted_registry_force }}" -- name: Ensure OpenShift registry correctly rolls out (best-effort today) -  command: | -    oc rollout status deploymentconfig {{ openshift_hosted_registry_name }} \ -                      --namespace {{ openshift_hosted_registry_namespace }} \ -                      --config {{ openshift.common.config_base }}/master/admin.kubeconfig -  async: 600 -  poll: 15 -  failed_when: false - -- name: Determine the latest version of the OpenShift registry deployment -  command: | -    {{ openshift.common.client_binary }} get deploymentconfig {{ openshift_hosted_registry_name }} \ -           --namespace {{ openshift_hosted_registry_namespace }} \ -           --config {{ openshift.common.config_base }}/master/admin.kubeconfig \ -           -o jsonpath='{ .status.latestVersion }' -  register: openshift_hosted_registry_latest_version - -- name: Sanity-check that the OpenShift registry rolled out correctly -  command: | -    {{ openshift.common.client_binary }} get replicationcontroller {{ openshift_hosted_registry_name }}-{{ openshift_hosted_registry_latest_version.stdout }} \ -           --namespace {{ openshift_hosted_registry_namespace }} \ -           --config {{ openshift.common.config_base }}/master/admin.kubeconfig \ -           -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }' -  register: openshift_hosted_registry_rc_phase -  until: "'Running' not in openshift_hosted_registry_rc_phase.stdout" -  delay: 15 -  retries: 40 -  failed_when: "'Failed' in openshift_hosted_registry_rc_phase.stdout" +- when: openshift_hosted_registry_wait +  block: +  - name: Ensure OpenShift registry correctly rolls out (best-effort today) +    command: | +      oc rollout status deploymentconfig {{ openshift_hosted_registry_name }} \ +                        --namespace {{ openshift_hosted_registry_namespace }} \ +                        --config {{ openshift.common.config_base }}/master/admin.kubeconfig +    async: 600 +    poll: 15 +    failed_when: false + +  - name: Determine the latest version of the OpenShift registry deployment +    command: | +      {{ openshift.common.client_binary }} get deploymentconfig {{ openshift_hosted_registry_name }} \ +             --namespace {{ openshift_hosted_registry_namespace }} \ +             --config {{ openshift.common.config_base }}/master/admin.kubeconfig \ +             -o jsonpath='{ .status.latestVersion }' +    register: openshift_hosted_registry_latest_version + +  - name: Sanity-check that the OpenShift registry rolled out correctly +    command: | +      {{ openshift.common.client_binary }} get replicationcontroller {{ openshift_hosted_registry_name }}-{{ openshift_hosted_registry_latest_version.stdout }} \ +             --namespace {{ openshift_hosted_registry_namespace }} \ +             --config {{ openshift.common.config_base }}/master/admin.kubeconfig \ +             -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }' +    register: openshift_hosted_registry_rc_phase +    until: "'Running' not in openshift_hosted_registry_rc_phase.stdout" +    delay: 15 +    retries: 40 +    failed_when: "'Failed' in openshift_hosted_registry_rc_phase.stdout"  - include: storage/glusterfs.yml    when: diff --git a/roles/openshift_hosted/tasks/registry/secure.yml b/roles/openshift_hosted/tasks/registry/secure.yml index a18e6eea9..a8a6f6fc8 100644 --- a/roles/openshift_hosted/tasks/registry/secure.yml +++ b/roles/openshift_hosted/tasks/registry/secure.yml @@ -37,6 +37,9 @@      hostnames:      - "{{ docker_registry_service.results.clusterip }}"      - "{{ docker_registry_route.results[0].spec.host }}" +    - "{{ openshift_hosted_registry_name }}.default.svc" +    - "{{ openshift_hosted_registry_name }}.default.svc.{{ openshift.common.dns_domain }}" +    - "{{ openshift_hosted_registry_routehost }}"      cert: "{{ docker_registry_cert_path }}"      key: "{{ docker_registry_key_path }}"      expire_days: "{{ openshift_hosted_registry_cert_expire_days if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool else omit }}" diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml index 72a1ead80..e57ed733e 100644 --- a/roles/openshift_hosted/tasks/router/router.yml +++ b/roles/openshift_hosted/tasks/router/router.yml @@ -94,36 +94,38 @@      stats_port: "{{ item.stats_port }}"    with_items: "{{ openshift_hosted_routers }}" -- name: Ensure OpenShift router correctly rolls out (best-effort today) -  command: | -    {{ openshift.common.client_binary }} rollout status deploymentconfig {{ item.name }} \ -                      --namespace {{ item.namespace | default('default') }} \ -                      --config {{ openshift.common.config_base }}/master/admin.kubeconfig -  async: 600 -  poll: 15 -  with_items: "{{ openshift_hosted_routers }}" -  failed_when: false +- when: openshift_hosted_router_wait +  block: +  - name: Ensure OpenShift router correctly rolls out (best-effort today) +    command: | +      {{ openshift.common.client_binary }} rollout status deploymentconfig {{ item.name }} \ +                        --namespace {{ item.namespace | default('default') }} \ +                        --config {{ openshift.common.config_base }}/master/admin.kubeconfig +    async: 600 +    poll: 15 +    with_items: "{{ openshift_hosted_routers }}" +    failed_when: false -- name: Determine the latest version of the OpenShift router deployment -  command: | -    {{ openshift.common.client_binary }} get deploymentconfig {{ item.name }} \ -           --namespace {{ item.namespace }} \ -           --config {{ openshift.common.config_base }}/master/admin.kubeconfig \ -           -o jsonpath='{ .status.latestVersion }' -  register: openshift_hosted_routers_latest_version -  with_items: "{{ openshift_hosted_routers }}" +  - name: Determine the latest version of the OpenShift router deployment +    command: | +      {{ openshift.common.client_binary }} get deploymentconfig {{ item.name }} \ +             --namespace {{ item.namespace }} \ +             --config {{ openshift.common.config_base }}/master/admin.kubeconfig \ +             -o jsonpath='{ .status.latestVersion }' +    register: openshift_hosted_routers_latest_version +    with_items: "{{ openshift_hosted_routers }}" -- name: Poll for OpenShift router deployment success -  command: | -    {{ openshift.common.client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \ -           --namespace {{ item.0.namespace }} \ -           --config {{ openshift.common.config_base }}/master/admin.kubeconfig \ -           -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }' -  register: openshift_hosted_router_rc_phase -  until: "'Running' not in openshift_hosted_router_rc_phase.stdout" -  delay: 15 -  retries: 40 -  failed_when: "'Failed' in openshift_hosted_router_rc_phase.stdout" -  with_together: -  - "{{ openshift_hosted_routers }}" -  - "{{ openshift_hosted_routers_latest_version.results }}" +  - name: Poll for OpenShift router deployment success +    command: | +      {{ openshift.common.client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \ +             --namespace {{ item.0.namespace }} \ +             --config {{ openshift.common.config_base }}/master/admin.kubeconfig \ +             -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }' +    register: openshift_hosted_router_rc_phase +    until: "'Running' not in openshift_hosted_router_rc_phase.stdout" +    delay: 15 +    retries: 40 +    failed_when: "'Failed' in openshift_hosted_router_rc_phase.stdout" +    with_together: +    - "{{ openshift_hosted_routers }}" +    - "{{ openshift_hosted_routers_latest_version.results }}" diff --git a/roles/openshift_loadbalancer/defaults/main.yml b/roles/openshift_loadbalancer/defaults/main.yml index 3f6409233..41a2b12a2 100644 --- a/roles/openshift_loadbalancer/defaults/main.yml +++ b/roles/openshift_loadbalancer/defaults/main.yml @@ -1,6 +1,6 @@  --- -r_openshift_loadbalancer_firewall_enabled: True -r_openshift_loadbalancer_use_firewalld: False +r_openshift_loadbalancer_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_openshift_loadbalancer_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"  haproxy_frontends:  - name: main diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index 84ead3548..70aef02cd 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -22,7 +22,19 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log  ###Optional vars:  - `openshift_logging_image_prefix`: The prefix for the logging images to use. Defaults to 'docker.io/openshift/origin-'. +- `openshift_logging_curator_image_prefix`: Setting the image prefix for Curator image. Defaults to `openshift_logging_image_prefix`. +- `openshift_logging_elasticsearch_image_prefix`: Setting the image prefix for Elasticsearch image. Defaults to `openshift_logging_image_prefix`. +- `openshift_logging_fluentd_image_prefix`: Setting the image prefix for Fluentd image. Defaults to `openshift_logging_image_prefix`. +- `openshift_logging_kibana_image_prefix`: Setting the image prefix for Kibana image. Defaults to `openshift_logging_image_prefix`. +- `openshift_logging_kibana_proxy_image_prefix`: Setting the image prefix for Kibana proxy image. Defaults to `openshift_logging_image_prefix`. +- `openshift_logging_mux_image_prefix`: Setting the image prefix for Mux image. Defaults to `openshift_logging_image_prefix`.  - `openshift_logging_image_version`: The image version for the logging images to use. Defaults to 'latest'. +- `openshift_logging_curator_image_version`: Setting the image version for Curator image. Defaults to `openshift_logging_image_version`. +- `openshift_logging_elasticsearch_image_version`: Setting the image version for Elasticsearch image. Defaults to `openshift_logging_image_version`. +- `openshift_logging_fluentd_image_version`: Setting the image version for Fluentd image. Defaults to `openshift_logging_image_version`. +- `openshift_logging_kibana_image_version`: Setting the image version for Kibana image. Defaults to `openshift_logging_image_version`. +- `openshift_logging_kibana_proxy_image_version`: Setting the image version for Kibana proxy image. Defaults to `openshift_logging_image_version`. +- `openshift_logging_mux_image_version`: Setting the image version for Mux image. Defaults to `openshift_logging_image_version`.  - `openshift_logging_use_ops`: If 'True', set up a second ES and Kibana cluster for infrastructure logs. Defaults to 'False'.  - `openshift_logging_master_url`: The URL for the Kubernetes master, this does not need to be public facing but should be accessible from within the cluster. Defaults to 'https://kubernetes.default.svc.{{openshift.common.dns_domain}}'.  - `openshift_logging_master_public_url`: The public facing URL for the Kubernetes master, this is used for Authentication redirection. Defaults to 'https://{{openshift.common.public_hostname}}:{{openshift.master.api_port}}'. diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml index 8b0f4cb62..f07d7e6da 100644 --- a/roles/openshift_logging/defaults/main.yml +++ b/roles/openshift_logging/defaults/main.yml @@ -84,7 +84,7 @@ openshift_logging_es_ca: /etc/fluent/keys/ca  openshift_logging_es_client_cert: /etc/fluent/keys/cert  openshift_logging_es_client_key: /etc/fluent/keys/key  openshift_logging_es_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}" -openshift_logging_es_cpu_limit: null +openshift_logging_es_cpu_limit: 1000m  # the logging appenders for the root loggers to write ES logs. Valid values: 'file', 'console'  openshift_logging_es_log_appenders: ['file']  openshift_logging_es_memory_limit: "{{ openshift_hosted_logging_elasticsearch_instance_ram | default('8Gi') }}" @@ -125,7 +125,7 @@ openshift_logging_es_ops_ca: /etc/fluent/keys/ca  openshift_logging_es_ops_client_cert: /etc/fluent/keys/cert  openshift_logging_es_ops_client_key: /etc/fluent/keys/key  openshift_logging_es_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}" -openshift_logging_es_ops_cpu_limit: null +openshift_logging_es_ops_cpu_limit: 1000m  openshift_logging_es_ops_memory_limit: "{{ openshift_hosted_logging_elasticsearch_ops_instance_ram | default('8Gi') }}"  openshift_logging_es_ops_pv_selector: "{{ openshift_hosted_loggingops_storage_labels | default('') }}"  openshift_logging_es_ops_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | default(False) }}" diff --git a/roles/openshift_logging/tasks/annotate_ops_projects.yaml b/roles/openshift_logging/tasks/annotate_ops_projects.yaml new file mode 100644 index 000000000..d4b33616a --- /dev/null +++ b/roles/openshift_logging/tasks/annotate_ops_projects.yaml @@ -0,0 +1,17 @@ +--- +- oc_obj: +    state: list +    kind: project +    name: "{{ item }}" +  with_items: "{{ __default_logging_ops_projects }}" +  register: __logging_ops_projects + +- name: Annotate Operations Projects +  oc_edit: +    kind: ns +    name: "{{ item.item }}" +    separator: '#' +    content: +      metadata#annotations#openshift.io/logging.ui.hostname: "{{ openshift_logging_kibana_ops_hostname }}" +  with_items: "{{ __logging_ops_projects.results }}" +  when: "{{ item.results.stderr is not defined }}" diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index 464e8594f..a77df9986 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -132,6 +132,8 @@      openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"      openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}"      openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}" +    openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}" +    openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"      openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}"      openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}"      openshift_logging_es_ca_ext: "{{ openshift_logging_es_ops_ca_ext }}" @@ -161,6 +163,8 @@      openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"      openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}"      openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}" +    openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}" +    openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"      openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}"      openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}"      openshift_logging_es_ca_ext: "{{ openshift_logging_es_ops_ca_ext }}" @@ -181,8 +185,6 @@      openshift_logging_kibana_namespace: "{{ openshift_logging_namespace }}"      openshift_logging_kibana_master_url: "{{ openshift_logging_master_url }}"      openshift_logging_kibana_master_public_url: "{{ openshift_logging_master_public_url }}" -    openshift_logging_kibana_image_prefix: "{{ openshift_logging_image_prefix }}" -    openshift_logging_kibana_image_version: "{{ openshift_logging_image_version }}"      openshift_logging_kibana_replicas: "{{ openshift_logging_kibana_replica_count }}"      openshift_logging_kibana_es_host: "{{ openshift_logging_es_host }}"      openshift_logging_kibana_es_port: "{{ openshift_logging_es_port }}" @@ -197,8 +199,6 @@      openshift_logging_kibana_namespace: "{{ openshift_logging_namespace }}"      openshift_logging_kibana_master_url: "{{ openshift_logging_master_url }}"      openshift_logging_kibana_master_public_url: "{{ openshift_logging_master_public_url }}" -    openshift_logging_kibana_image_prefix: "{{ openshift_logging_image_prefix }}" -    openshift_logging_kibana_image_version: "{{ openshift_logging_image_version }}"      openshift_logging_kibana_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"      openshift_logging_kibana_es_host: "{{ openshift_logging_es_ops_host }}"      openshift_logging_kibana_es_port: "{{ openshift_logging_es_ops_port }}" @@ -216,6 +216,7 @@    when:    - openshift_logging_use_ops | bool +- include: annotate_ops_projects.yaml  ## Curator  - include_role: @@ -226,8 +227,6 @@      openshift_logging_curator_es_host: "{{ openshift_logging_es_host }}"      openshift_logging_curator_es_port: "{{ openshift_logging_es_port }}"      openshift_logging_curator_master_url: "{{ openshift_logging_master_url }}" -    openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix }}" -    openshift_logging_curator_image_version: "{{ openshift_logging_image_version }}"      openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"  - include_role: @@ -239,8 +238,6 @@      openshift_logging_curator_es_port: "{{ openshift_logging_es_ops_port }}"      openshift_logging_curator_namespace: "{{ openshift_logging_namespace }}"      openshift_logging_curator_master_url: "{{ openshift_logging_master_url }}" -    openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix }}" -    openshift_logging_curator_image_version: "{{ openshift_logging_image_version }}"      openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"      openshift_logging_curator_cpu_limit: "{{ openshift_logging_curator_ops_cpu_limit }}"      openshift_logging_curator_memory_limit: "{{ openshift_logging_curator_ops_memory_limit }}" @@ -256,8 +253,6 @@      openshift_logging_mux_ops_host: "{{ ( openshift_logging_use_ops | bool ) | ternary('logging-es-ops', 'logging-es') }}"      openshift_logging_mux_namespace: "{{ openshift_logging_namespace }}"      openshift_logging_mux_master_url: "{{ openshift_logging_master_url }}" -    openshift_logging_mux_image_prefix: "{{ openshift_logging_image_prefix }}" -    openshift_logging_mux_image_version: "{{ openshift_logging_image_version }}"      openshift_logging_mux_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"    when:    - openshift_logging_use_mux | bool @@ -269,8 +264,6 @@    vars:      generated_certs_dir: "{{openshift.common.config_base}}/logging"      openshift_logging_fluentd_ops_host: "{{ ( openshift_logging_use_ops | bool ) | ternary('logging-es-ops', 'logging-es') }}" -    openshift_logging_fluentd_image_prefix: "{{ openshift_logging_image_prefix }}" -    openshift_logging_fluentd_image_version: "{{ openshift_logging_image_version }}"      openshift_logging_fluentd_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"      openshift_logging_fluentd_master_url: "{{ openshift_logging_master_url }}"      openshift_logging_fluentd_namespace: "{{ openshift_logging_namespace }}" diff --git a/roles/openshift_logging/vars/main.yaml b/roles/openshift_logging/vars/main.yaml index e561b41e2..01809fddf 100644 --- a/roles/openshift_logging/vars/main.yaml +++ b/roles/openshift_logging/vars/main.yaml @@ -6,3 +6,5 @@ es_ops_node_quorum: "{{ (openshift_logging_es_ops_cluster_size | int/2 | round(0  es_ops_recover_expected_nodes: "{{openshift_logging_es_ops_cluster_size | int}}"  es_log_appenders: ['file', 'console'] + +__default_logging_ops_projects: ['default', 'openshift', 'openshift-infra', 'kube-system'] diff --git a/roles/openshift_logging_curator/defaults/main.yml b/roles/openshift_logging_curator/defaults/main.yml index 82ffb2f93..17807b644 100644 --- a/roles/openshift_logging_curator/defaults/main.yml +++ b/roles/openshift_logging_curator/defaults/main.yml @@ -1,7 +1,7 @@  ---  ### General logging settings -openshift_logging_curator_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}" -openshift_logging_curator_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}" +openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}" +openshift_logging_curator_image_version: "{{ openshift_logging_image_version | default('latest') }}"  openshift_logging_curator_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"  openshift_logging_curator_master_url: "https://kubernetes.default.svc.cluster.local" diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml index 3113fb3c9..6e8fab2b5 100644 --- a/roles/openshift_logging_curator/tasks/main.yaml +++ b/roles/openshift_logging_curator/tasks/main.yaml @@ -86,7 +86,7 @@      component: "{{ curator_component }}"      logging_component: curator      deploy_name: "{{ curator_name }}" -    image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}" +    image: "{{openshift_logging_curator_image_prefix}}logging-curator:{{openshift_logging_curator_image_version}}"      es_host: "{{ openshift_logging_curator_es_host }}"      es_port: "{{ openshift_logging_curator_es_port }}"      curator_cpu_limit: "{{ openshift_logging_curator_cpu_limit }}" diff --git a/roles/openshift_logging_elasticsearch/defaults/main.yml b/roles/openshift_logging_elasticsearch/defaults/main.yml index 0690bf114..75bd479be 100644 --- a/roles/openshift_logging_elasticsearch/defaults/main.yml +++ b/roles/openshift_logging_elasticsearch/defaults/main.yml @@ -1,7 +1,7 @@  ---  ### Common settings -openshift_logging_elasticsearch_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}" -openshift_logging_elasticsearch_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}" +openshift_logging_elasticsearch_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}" +openshift_logging_elasticsearch_image_version: "{{ openshift_logging_image_version | default('latest') }}"  openshift_logging_elasticsearch_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"  openshift_logging_elasticsearch_namespace: logging diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml index 8380f8f1f..1e800b1d6 100644 --- a/roles/openshift_logging_elasticsearch/tasks/main.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml @@ -277,7 +277,7 @@      component: "{{ es_component }}"      logging_component: elasticsearch      deploy_name: "{{ es_deploy_name }}" -    image: "{{ openshift_logging_image_prefix }}logging-elasticsearch:{{ openshift_logging_image_version }}" +    image: "{{ openshift_logging_elasticsearch_image_prefix }}logging-elasticsearch:{{ openshift_logging_elasticsearch_image_version }}"      es_cpu_limit: "{{ openshift_logging_elasticsearch_cpu_limit }}"      es_memory_limit: "{{ openshift_logging_elasticsearch_memory_limit }}"      es_node_selector: "{{ openshift_logging_elasticsearch_nodeselector | default({}) }}" diff --git a/roles/openshift_logging_fluentd/defaults/main.yml b/roles/openshift_logging_fluentd/defaults/main.yml index a53bbd2df..30d3d854a 100644 --- a/roles/openshift_logging_fluentd/defaults/main.yml +++ b/roles/openshift_logging_fluentd/defaults/main.yml @@ -1,7 +1,7 @@  ---  ### General logging settings -openshift_logging_fluentd_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}" -openshift_logging_fluentd_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}" +openshift_logging_fluentd_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}" +openshift_logging_fluentd_image_version: "{{ openshift_logging_image_version | default('latest') }}"  openshift_logging_fluentd_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"  openshift_logging_fluentd_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"  openshift_logging_fluentd_namespace: logging diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2 index 39dffba19..88e039e3f 100644 --- a/roles/openshift_logging_fluentd/templates/fluentd.j2 +++ b/roles/openshift_logging_fluentd/templates/fluentd.j2 @@ -28,7 +28,7 @@ spec:          {{ fluentd_nodeselector_key }}: "{{ fluentd_nodeselector_value }}"        containers:        - name: "{{ daemonset_container_name }}" -        image: "{{ openshift_logging_image_prefix }}{{ daemonset_name }}:{{ openshift_logging_image_version }}" +        image: "{{ openshift_logging_fluentd_image_prefix }}{{ daemonset_name }}:{{ openshift_logging_fluentd_image_version }}"          imagePullPolicy: Always          securityContext:            privileged: true diff --git a/roles/openshift_logging_kibana/defaults/main.yml b/roles/openshift_logging_kibana/defaults/main.yml index 14787a62b..ee265bb14 100644 --- a/roles/openshift_logging_kibana/defaults/main.yml +++ b/roles/openshift_logging_kibana/defaults/main.yml @@ -2,8 +2,8 @@  ### Common settings  openshift_logging_kibana_master_url: "https://kubernetes.default.svc.cluster.local"  openshift_logging_kibana_master_public_url: "https://kubernetes.default.svc.cluster.local" -openshift_logging_kibana_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}" -openshift_logging_kibana_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}" +openshift_logging_kibana_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}" +openshift_logging_kibana_image_version: "{{ openshift_logging_image_version | default('latest') }}"  openshift_logging_kibana_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"  openshift_logging_kibana_namespace: logging @@ -24,6 +24,8 @@ openshift_logging_kibana_edge_term_policy: Redirect  openshift_logging_kibana_ops_deployment: false  # Proxy settings +openshift_logging_kibana_proxy_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}" +openshift_logging_kibana_proxy_image_version: "{{ openshift_logging_image_version | default('latest') }}"  openshift_logging_kibana_proxy_debug: false  openshift_logging_kibana_proxy_cpu_limit: null  openshift_logging_kibana_proxy_memory_limit: 256Mi diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml index 166f102f7..e17e8c1f2 100644 --- a/roles/openshift_logging_kibana/tasks/main.yaml +++ b/roles/openshift_logging_kibana/tasks/main.yaml @@ -225,8 +225,8 @@      component: "{{ kibana_component }}"      logging_component: kibana      deploy_name: "{{ kibana_name }}" -    image: "{{ openshift_logging_image_prefix }}logging-kibana:{{ openshift_logging_image_version }}" -    proxy_image: "{{ openshift_logging_image_prefix }}logging-auth-proxy:{{ openshift_logging_image_version }}" +    image: "{{ openshift_logging_kibana_image_prefix }}logging-kibana:{{ openshift_logging_kibana_image_version }}" +    proxy_image: "{{ openshift_logging_kibana_proxy_image_prefix }}logging-auth-proxy:{{ openshift_logging_kibana_proxy_image_version }}"      es_host: "{{ openshift_logging_kibana_es_host }}"      es_port: "{{ openshift_logging_kibana_es_port }}"      kibana_cpu_limit: "{{ openshift_logging_kibana_cpu_limit }}" diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml index 7a3da9b4c..68412aec8 100644 --- a/roles/openshift_logging_mux/defaults/main.yml +++ b/roles/openshift_logging_mux/defaults/main.yml @@ -1,7 +1,7 @@  ---  ### General logging settings -openshift_logging_mux_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}" -openshift_logging_mux_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}" +openshift_logging_mux_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}" +openshift_logging_mux_image_version: "{{ openshift_logging_image_version | default('latest') }}"  openshift_logging_mux_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"  openshift_logging_mux_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"  openshift_logging_mux_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true))) }}" diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml index 8ec93de7d..2ec863afa 100644 --- a/roles/openshift_logging_mux/tasks/main.yaml +++ b/roles/openshift_logging_mux/tasks/main.yaml @@ -165,7 +165,7 @@      component: mux      logging_component: mux      deploy_name: "logging-{{ component }}" -    image: "{{ openshift_logging_image_prefix }}logging-fluentd:{{ openshift_logging_image_version }}" +    image: "{{ openshift_logging_mux_image_prefix }}logging-fluentd:{{ openshift_logging_mux_image_version }}"      es_host: "{{ openshift_logging_mux_app_host }}"      es_port: "{{ openshift_logging_mux_app_port }}"      ops_host: "{{ openshift_logging_mux_ops_host }}" diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index a4c178908..d70106276 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -1,6 +1,6 @@  --- -r_openshift_master_firewall_enabled: True -r_openshift_master_use_firewalld: False +r_openshift_master_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_openshift_master_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"  openshift_node_ips: []  r_openshift_master_clean_install: false @@ -19,3 +19,8 @@ r_openshift_master_os_firewall_allow:  - service: etcd embedded    port: 4001/tcp    cond: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" + +oreg_url: '' +oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}" +oreg_auth_credentials_path: "{{ openshift.common.data_dir }}/.docker" +oreg_auth_credentials_replace: False diff --git a/roles/openshift_master/tasks/bootstrap.yml b/roles/openshift_master/tasks/bootstrap.yml new file mode 100644 index 000000000..0013f5289 --- /dev/null +++ b/roles/openshift_master/tasks/bootstrap.yml @@ -0,0 +1,28 @@ +--- + +- name: ensure the node-bootstrap service account exists +  oc_serviceaccount: +    name: node-bootstrapper +    namespace: openshift-infra +    state: present +  run_once: true + +- name: grant node-bootstrapper the correct permissions to bootstrap +  oc_adm_policy_user: +    namespace: openshift-infra +    user: system:serviceaccount:openshift-infra:node-bootstrapper +    resource_kind: cluster-role +    resource_name: system:node-bootstrapper +    state: present +  run_once: true + +# TODO: create a module for this command. +# oc_serviceaccounts_kubeconfig +- name: create service account kubeconfig with csr rights +  command: "oc serviceaccounts create-kubeconfig node-bootstrapper -n openshift-infra" +  register: kubeconfig_out + +- name: put service account kubeconfig into a file on disk for bootstrap +  copy: +    content: "{{ kubeconfig_out.stdout }}" +    dest: "{{ openshift_master_config_dir }}/bootstrap.kubeconfig" diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index a11471891..ba56ac94e 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -218,6 +218,36 @@    - restart master api    - restart master controllers +- name: modify controller args +  yedit: +    src: /etc/origin/master/master-config.yaml +    edits: +    - key: kubernetesMasterConfig.controllerArguments.cluster-signing-cert-file +      value: +      - /etc/origin/master/ca.crt +    - key: kubernetesMasterConfig.controllerArguments.cluster-signing-key-file +      value: +      - /etc/origin/master/ca.key +  notify: +  - restart master controllers +  when: openshift_master_bootstrap_enabled | default(False) + +- name: Check for credentials file for registry auth +  stat: +    path: "{{oreg_auth_credentials_path }}" +  when: +  - oreg_auth_user is defined +  register: master_oreg_auth_credentials_stat + +- name: Create credentials for registry auth +  command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" +  when: +  - oreg_auth_user is defined +  - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool +  notify: +  - restart master api +  - restart master controllers +  - include: set_loopback_context.yml    when:    - openshift.common.version_gte_3_2_or_1_2 @@ -366,3 +396,7 @@    shell: echo {{ openshift_master_cluster_password | quote }} | passwd --stdin hacluster    when:    - l_install_result | changed + +- name: node bootstrap settings +  include: bootstrap.yml +  when: openshift_master_bootstrap_enabled | default(False) diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index 973b3a619..cc000496a 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -1,6 +1,6 @@  --- -r_openshift_node_firewall_enabled: True -r_openshift_node_use_firewalld: False +r_openshift_node_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"  r_openshift_node_os_firewall_deny: []  r_openshift_node_os_firewall_allow:  - service: Kubernetes kubelet @@ -21,3 +21,8 @@ r_openshift_node_os_firewall_allow:  - service: Kubernetes service NodePort UDP    port: "{{ openshift_node_port_range | default('') }}/udp"    cond: "{{ openshift_node_port_range is defined }}" + +oreg_url: '' +oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}" +oreg_auth_credentials_path: "{{ openshift.common.data_dir }}/.docker" +oreg_auth_credentials_replace: False diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 3353a22e3..525dd1d1a 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -4,7 +4,7 @@      msg: "SELinux is disabled, This deployment type requires that SELinux is enabled."    when:      - (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise'] -    - not openshift_docker_use_crio | default(false) +    - not openshift_use_crio | default(false)  - name: setup firewall    include: firewall.yml @@ -70,25 +70,15 @@      - openshift_disable_swap | default(true) | bool  # End Disable Swap Block -# We have to add tuned-profiles in the same transaction otherwise we run into depsolving -# problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging.  - name: Install Node package    package: -    name: "{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" +    name: "{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"      state: present    when: not openshift.common.is_containerized | bool -- name: Check for tuned package -  command: rpm -q tuned -  args: -    warn: no -  register: tuned_installed -  changed_when: false -  failed_when: false - -- name: Set atomic-guest tuned profile -  command: "tuned-adm profile atomic-guest" -  when: tuned_installed.rc == 0 and openshift.common.is_atomic | bool +- name: setup tuned +  include: tuned.yml +  static: yes  - name: Install sdn-ovs package    package: @@ -103,7 +93,7 @@      name: cri-o      enabled: yes      state: restarted -  when: openshift_docker_use_crio | default(false) +  when: openshift_use_crio | default(false)  - name: Install conntrack-tools package    package: @@ -160,6 +150,21 @@    notify:      - restart node +- name: Check for credentials file for registry auth +  stat: +    path: "{{oreg_auth_credentials_path }}" +  when: +    - oreg_auth_user is defined +  register: node_oreg_auth_credentials_stat + +- name: Create credentials for registry auth +  command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" +  when: +    - oreg_auth_user is defined +    - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool +  notify: +    - restart node +  - name: Configure AWS Cloud Provider Settings    lineinfile:      dest: /etc/sysconfig/{{ openshift.common.service_type }}-node diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml index dc1df9185..e09063aa5 100644 --- a/roles/openshift_node/tasks/openvswitch_system_container.yml +++ b/roles/openshift_node/tasks/openvswitch_system_container.yml @@ -1,6 +1,6 @@  ---  - set_fact: -    l_use_crio: "{{ openshift_docker_use_crio | default(false) }}" +    l_use_crio: "{{ openshift_use_crio | default(false) }}"  - set_fact:      l_service_name: "cri-o" diff --git a/roles/openshift_node/tasks/tuned.yml b/roles/openshift_node/tasks/tuned.yml new file mode 100644 index 000000000..425bf6a26 --- /dev/null +++ b/roles/openshift_node/tasks/tuned.yml @@ -0,0 +1,41 @@ +--- +- name: Check for tuned package +  command: rpm -q tuned +  args: +    warn: no +  register: tuned_installed +  changed_when: false +  failed_when: false + +- name: Tuned service setup +  block: +  - name: Set tuned OpenShift variables +    set_fact: +      openshift_tuned_guest_profile: "{{ 'atomic-guest' if openshift.common.is_atomic else 'virtual-guest' }}" +      tuned_etc_directory: '/etc/tuned' +      tuned_templates_source: '../templates/tuned' + +  - name: Ensure directory structure exists +    file: +      state: directory +      dest: '{{ tuned_etc_directory }}/{{ item.path }}' +    with_filetree: '{{ tuned_templates_source }}' +    when: item.state == 'directory' + +  - name: Ensure files are populated from templates +    template: +      src: '{{ item.src }}' +      dest: '{{ tuned_etc_directory }}/{{ item.path }}' +    with_filetree: '{{ tuned_templates_source }}' +    when: item.state == 'file' + +  - name: Make tuned use the recommended tuned profile on restart +    file: path=/etc/tuned/active_profile state=absent + +  - name: Restart tuned service +    systemd: +      state: restarted +      daemon_reload: yes +      name: tuned + +  when: tuned_installed.rc == 0 | bool diff --git a/roles/openshift_node/templates/node.service.j2 b/roles/openshift_node/templates/node.service.j2 index 3d0ae3bbd..0856737f6 100644 --- a/roles/openshift_node/templates/node.service.j2 +++ b/roles/openshift_node/templates/node.service.j2 @@ -8,7 +8,7 @@ Wants={{ openshift.docker.service_name }}.service  Documentation=https://github.com/openshift/origin  Requires=dnsmasq.service  After=dnsmasq.service -{% if openshift.docker.use_crio %}Wants=cri-o.service{% endif %} +{% if openshift_use_crio|default(false) %}Wants=cri-o.service{% endif %}  [Service]  Type=notify diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index 93f8658b4..711afcadb 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -16,7 +16,7 @@ imageConfig:    latest: false  kind: NodeConfig  kubeletArguments: {{ openshift.node.kubelet_args | default(None) | to_padded_yaml(level=1) }} -{% if openshift.docker.use_crio | default(False) %} +{% if openshift_use_crio | default(False) %}    container-runtime:    - remote    container-runtime-endpoint: diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service index c4580be1f..8734e7443 100644 --- a/roles/openshift_node/templates/openshift.docker.node.dep.service +++ b/roles/openshift_node/templates/openshift.docker.node.dep.service @@ -3,7 +3,7 @@ Requires={{ openshift.docker.service_name }}.service  After={{ openshift.docker.service_name }}.service  PartOf={{ openshift.common.service_type }}-node.service  Before={{ openshift.common.service_type }}-node.service -{% if openshift.docker.use_crio %}Wants=cri-o.service{% endif %} +{% if openshift_use_crio|default(false) %}Wants=cri-o.service{% endif %}  [Service]  ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi" diff --git a/roles/openshift_node/templates/tuned/openshift-control-plane/tuned.conf b/roles/openshift_node/templates/tuned/openshift-control-plane/tuned.conf new file mode 100644 index 000000000..f22f21065 --- /dev/null +++ b/roles/openshift_node/templates/tuned/openshift-control-plane/tuned.conf @@ -0,0 +1,25 @@ +# +# tuned configuration +# + +[main] +summary=Optimize systems running OpenShift control plane +include=openshift + +[sysctl] +# ktune sysctl settings, maximizing i/o throughput +# +# Minimal preemption granularity for CPU-bound tasks: +# (default: 1 msec#  (1 + ilog(ncpus)), units: nanoseconds) +kernel.sched_min_granularity_ns=10000000 + +# The total time the scheduler will consider a migrated process +# "cache hot" and thus less likely to be re-migrated +# (system default is 500000, i.e. 0.5 ms) +kernel.sched_migration_cost_ns=5000000 + +# SCHED_OTHER wake-up granularity. +# +# Preemption granularity when tasks wake up.  Lower the value to improve  +# wake-up latency and throughput for latency critical tasks. +kernel.sched_wakeup_granularity_ns = 4000000 diff --git a/roles/openshift_node/templates/tuned/openshift-node/tuned.conf b/roles/openshift_node/templates/tuned/openshift-node/tuned.conf new file mode 100644 index 000000000..78c7d19c9 --- /dev/null +++ b/roles/openshift_node/templates/tuned/openshift-node/tuned.conf @@ -0,0 +1,10 @@ +# +# tuned configuration +# + +[main] +summary=Optimize systems running OpenShift nodes +include=openshift + +[sysctl] +net.ipv4.tcp_fastopen=3 diff --git a/roles/openshift_node/templates/tuned/openshift/tuned.conf b/roles/openshift_node/templates/tuned/openshift/tuned.conf new file mode 100644 index 000000000..68ac5dadb --- /dev/null +++ b/roles/openshift_node/templates/tuned/openshift/tuned.conf @@ -0,0 +1,24 @@ +# +# tuned configuration +# + +[main] +summary=Optimize systems running OpenShift (parent profile) +include=${f:virt_check:{{ openshift_tuned_guest_profile }}:throughput-performance} + +[selinux] +avc_cache_threshold=65536 + +[net] +nf_conntrack_hashsize=131072 + +[sysctl] +kernel.pid_max=131072 +net.netfilter.nf_conntrack_max=1048576 +fs.inotify.max_user_watches=65536 +net.ipv4.neigh.default.gc_thresh1=8192 +net.ipv4.neigh.default.gc_thresh2=32768 +net.ipv4.neigh.default.gc_thresh3=65536 +net.ipv6.neigh.default.gc_thresh1=8192 +net.ipv6.neigh.default.gc_thresh2=32768 +net.ipv6.neigh.default.gc_thresh3=65536 diff --git a/roles/openshift_node/templates/tuned/recommend.conf b/roles/openshift_node/templates/tuned/recommend.conf new file mode 100644 index 000000000..5fa765798 --- /dev/null +++ b/roles/openshift_node/templates/tuned/recommend.conf @@ -0,0 +1,8 @@ +[openshift-node] +/etc/origin/node/node-config.yaml=.*region=primary + +[openshift-control-plane,master] +/etc/origin/master/master-config.yaml=.* + +[openshift-control-plane,node] +/etc/origin/node/node-config.yaml=.*region=infra diff --git a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh index 4aab8f2e9..61d2a5b51 100755 --- a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh +++ b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh @@ -46,9 +46,7 @@ if [[ $2 =~ ^(up|dhcp4-change|dhcp6-change)$ ]]; then    def_route=$(/sbin/ip route list match 0.0.0.0/0 | awk '{print $3 }')    def_route_int=$(/sbin/ip route get to ${def_route} | awk '{print $3}')    def_route_ip=$(/sbin/ip route get to ${def_route} | awk '{print $5}') -  if [[ ${DEVICE_IFACE} == ${def_route_int} && \ -       -n "${IP4_NAMESERVERS}" && \ -       "${IP4_NAMESERVERS}" != "${def_route_ip}" ]]; then +  if [[ ${DEVICE_IFACE} == ${def_route_int} ]]; then      if [ ! -f /etc/dnsmasq.d/origin-dns.conf ]; then        cat << EOF > /etc/dnsmasq.d/origin-dns.conf  no-resolv @@ -61,35 +59,40 @@ EOF        NEEDS_RESTART=1      fi -    ###################################################################### -    # Write out default nameservers for /etc/dnsmasq.d/origin-upstream-dns.conf -    # and /etc/origin/node/resolv.conf in their respective formats -    for ns in ${IP4_NAMESERVERS}; do -      if [[ ! -z $ns ]]; then -        echo "server=${ns}" >> $UPSTREAM_DNS_TMP -        echo "nameserver ${ns}" >> $NEW_NODE_RESOLV_CONF +    # If network manager doesn't know about the nameservers then the best +    # we can do is grab them from /etc/resolv.conf but only if we've got no +    # watermark +    if ! grep -q '99-origin-dns.sh' /etc/resolv.conf; then +      if [[ -z "${IP4_NAMESERVERS}" || "${IP4_NAMESERVERS}" == "${def_route_ip}" ]]; then +            IP4_NAMESERVERS=`grep '^nameserver ' /etc/resolv.conf | awk '{ print $2 }'` +      fi +      ###################################################################### +      # Write out default nameservers for /etc/dnsmasq.d/origin-upstream-dns.conf +      # and /etc/origin/node/resolv.conf in their respective formats +      for ns in ${IP4_NAMESERVERS}; do +        if [[ ! -z $ns ]]; then +          echo "server=${ns}" >> $UPSTREAM_DNS_TMP +          echo "nameserver ${ns}" >> $NEW_NODE_RESOLV_CONF +        fi +      done +      # Sort it in case DNS servers arrived in a different order +      sort $UPSTREAM_DNS_TMP > $UPSTREAM_DNS_TMP_SORTED +      sort $UPSTREAM_DNS > $CURRENT_UPSTREAM_DNS_SORTED +      # Compare to the current config file (sorted) +      NEW_DNS_SUM=`md5sum ${UPSTREAM_DNS_TMP_SORTED} | awk '{print $1}'` +      CURRENT_DNS_SUM=`md5sum ${CURRENT_UPSTREAM_DNS_SORTED} | awk '{print $1}'` +      if [ "${NEW_DNS_SUM}" != "${CURRENT_DNS_SUM}" ]; then +        # DNS has changed, copy the temp file to the proper location (-Z +        # sets default selinux context) and set the restart flag +        cp -Z $UPSTREAM_DNS_TMP $UPSTREAM_DNS +        NEEDS_RESTART=1 +      fi +      # compare /etc/origin/node/resolv.conf checksum and replace it if different +      NEW_NODE_RESOLV_CONF_MD5=`md5sum ${NEW_NODE_RESOLV_CONF}` +      OLD_NODE_RESOLV_CONF_MD5=`md5sum /etc/origin/node/resolv.conf` +      if [ "${NEW_NODE_RESOLV_CONF_MD5}" != "${OLD_NODE_RESOLV_CONF_MD5}" ]; then +        cp -Z $NEW_NODE_RESOLV_CONF /etc/origin/node/resolv.conf        fi -    done - -    # Sort it in case DNS servers arrived in a different order -    sort $UPSTREAM_DNS_TMP > $UPSTREAM_DNS_TMP_SORTED -    sort $UPSTREAM_DNS > $CURRENT_UPSTREAM_DNS_SORTED - -    # Compare to the current config file (sorted) -    NEW_DNS_SUM=`md5sum ${UPSTREAM_DNS_TMP_SORTED} | awk '{print $1}'` -    CURRENT_DNS_SUM=`md5sum ${CURRENT_UPSTREAM_DNS_SORTED} | awk '{print $1}'` -    if [ "${NEW_DNS_SUM}" != "${CURRENT_DNS_SUM}" ]; then -      # DNS has changed, copy the temp file to the proper location (-Z -      # sets default selinux context) and set the restart flag -      cp -Z $UPSTREAM_DNS_TMP $UPSTREAM_DNS -      NEEDS_RESTART=1 -    fi - -    # compare /etc/origin/node/resolv.conf checksum and replace it if different -    NEW_NODE_RESOLV_CONF_MD5=`md5sum ${NEW_NODE_RESOLV_CONF}` -    OLD_NODE_RESOLV_CONF_MD5=`md5sum /etc/origin/node/resolv.conf` -    if [ "${NEW_NODE_RESOLV_CONF_MD5}" != "${OLD_NODE_RESOLV_CONF_MD5}" ]; then -      cp -Z $NEW_NODE_RESOLV_CONF /etc/origin/node/resolv.conf      fi      if ! `systemctl -q is-active dnsmasq.service`; then diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md index d3de2165a..a059745a6 100644 --- a/roles/openshift_storage_glusterfs/README.md +++ b/roles/openshift_storage_glusterfs/README.md @@ -55,7 +55,7 @@ defined:  | Name              | Default value | Description                             |  |-------------------|---------------|-----------------------------------------| -| glusterfs_devices | None          | A list of block devices that will be completely managed as part of a GlusterFS cluster. There must be at least one device listed. Each device must be bare, e.g. no partitions or LVM PVs. **Example:** '[ "/dev/sdb" ]' +| glusterfs_devices | None          | A list of block devices that will be completely managed as part of a GlusterFS cluster. There must be at least one device listed. Each device must be bare, e.g. no partitions or LVM PVs. **Example:** '[ "/dev/sdb" ]' **NOTE:** You MUST set this as a host variable on each node host. For some reason, if you set this as a group variable it gets interpreted as a string rather than an array. See https://github.com/openshift/openshift-ansible/issues/5071  In addition, each host may specify the following variables to further control  their configuration as GlusterFS nodes: diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml index a5887465e..8d21a3f27 100644 --- a/roles/openshift_storage_glusterfs/defaults/main.yml +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -52,8 +52,8 @@ openshift_storage_glusterfs_registry_heketi_ssh_port: "{{ openshift_storage_glus  openshift_storage_glusterfs_registry_heketi_ssh_user: "{{ openshift_storage_glusterfs_heketi_ssh_user }}"  openshift_storage_glusterfs_registry_heketi_ssh_sudo: "{{ openshift_storage_glusterfs_heketi_ssh_sudo }}"  openshift_storage_glusterfs_registry_heketi_ssh_keyfile: "{{ openshift_storage_glusterfs_heketi_ssh_keyfile | default(omit) }}" -r_openshift_master_firewall_enabled: True -r_openshift_master_use_firewalld: False +r_openshift_master_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_openshift_master_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"  r_openshift_storage_glusterfs_os_firewall_deny: []  r_openshift_storage_glusterfs_os_firewall_allow:  - service: glusterfs_sshd diff --git a/roles/openshift_storage_nfs/defaults/main.yml b/roles/openshift_storage_nfs/defaults/main.yml index 4a2bc6141..e7e0b331b 100644 --- a/roles/openshift_storage_nfs/defaults/main.yml +++ b/roles/openshift_storage_nfs/defaults/main.yml @@ -1,6 +1,6 @@  --- -r_openshift_storage_nfs_firewall_enabled: True -r_openshift_storage_nfs_use_firewalld: False +r_openshift_storage_nfs_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_openshift_storage_nfs_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"  r_openshift_storage_nfs_os_firewall_deny: []  r_openshift_storage_nfs_os_firewall_allow: diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml index a6b8a40c8..c0ea00f34 100644 --- a/roles/openshift_version/tasks/main.yml +++ b/roles/openshift_version/tasks/main.yml @@ -60,13 +60,16 @@    # It also allows for optional trailing data which:    # - must start with a dash    # - may contain numbers +  # - may containe dots (https://github.com/openshift/openshift-ansible/issues/5192) +  #    - name: (Enterprise) Verify openshift_image_tag is valid      when: openshift.common.deployment_type == 'openshift-enterprise'      assert:        that: -      - "{{ openshift_image_tag|match('(^v\\d+\\.\\d+[\\.\\d+]*(-\\d+)?$)') }}" +      - "{{ openshift_image_tag|match('(^v\\d+\\.\\d+(\\.\\d+)*(-\\d+(\\.\\d+)*)?$)') }}"        msg: |- -        openshift_image_tag must be in the format v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3, v1.2-1, v1.2.3-4 +        openshift_image_tag must be in the format v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3, +        v3.5.1.3.4, v1.2-1, v1.2.3-4, v1.2.3-4.5, v1.2.3-4.5.6          You specified openshift_image_tag={{ openshift_image_tag }}  # Make sure we copy this to a fact if given a var: diff --git a/roles/openshift_version/tasks/set_version_containerized.yml b/roles/openshift_version/tasks/set_version_containerized.yml index 4d9f72f01..a2a579e9d 100644 --- a/roles/openshift_version/tasks/set_version_containerized.yml +++ b/roles/openshift_version/tasks/set_version_containerized.yml @@ -1,6 +1,6 @@  ---  - set_fact: -    l_use_crio: "{{ openshift_docker_use_crio | default(false) }}" +    l_use_crio: "{{ openshift_use_crio | default(false) }}"  - name: Set containerized version to configure if openshift_image_tag specified    set_fact: diff --git a/roles/os_firewall/defaults/main.yml b/roles/os_firewall/defaults/main.yml index f96a80f1c..2cae94411 100644 --- a/roles/os_firewall/defaults/main.yml +++ b/roles/os_firewall/defaults/main.yml @@ -2,4 +2,4 @@  os_firewall_enabled: True  # firewalld is not supported on Atomic Host  # https://bugzilla.redhat.com/show_bug.cgi?id=1403331 -os_firewall_use_firewalld: "{{ False }}" +os_firewall_use_firewalld: False  | 
