diff options
46 files changed, 722 insertions, 572 deletions
diff --git a/.gitignore b/.gitignore index 48507c5d1..ac249d5eb 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,7 @@ multi_inventory.yaml ansible.cfg *.retry .vscode/* +.cache +.tox +.coverage +*.egg-info diff --git a/.travis.yml b/.travis.yml index b5b7a2a59..0e3a75df7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,9 +1,13 @@ --- sudo: false +cache: + - pip + language: python python: - "2.7" + - "3.5" install: - pip install -r requirements.txt diff --git a/callback_plugins/openshift_quick_installer.py b/callback_plugins/openshift_quick_installer.py index fc9bfb899..f5c4c71a4 100644 --- a/callback_plugins/openshift_quick_installer.py +++ b/callback_plugins/openshift_quick_installer.py @@ -36,30 +36,13 @@ What's different: """ from __future__ import (absolute_import, print_function) -import imp -import os import sys from ansible import constants as C +from ansible.plugins.callback import CallbackBase from ansible.utils.color import colorize, hostcolor -ANSIBLE_PATH = imp.find_module('ansible')[1] -DEFAULT_PATH = os.path.join(ANSIBLE_PATH, 'plugins/callback/default.py') -DEFAULT_MODULE = imp.load_source( - 'ansible.plugins.callback.default', - DEFAULT_PATH -) -try: - from ansible.plugins.callback import CallbackBase - BASECLASS = CallbackBase -except ImportError: # < ansible 2.1 - BASECLASS = DEFAULT_MODULE.CallbackModule - -reload(sys) -sys.setdefaultencoding('utf-8') - - -class CallbackModule(DEFAULT_MODULE.CallbackModule): +class CallbackModule(CallbackBase): """ Ansible callback plugin diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index 8fe85d8e2..bad1f6a3b 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -19,6 +19,7 @@ from distutils.version import LooseVersion from operator import itemgetter from ansible.parsing.yaml.dumper import AnsibleDumper from urlparse import urlparse +from six import string_types HAS_OPENSSL = False try: @@ -120,7 +121,7 @@ class FilterModule(object): raise errors.AnsibleFilterError("|failed expects hostvars is dictionary or object") if not isinstance(variables, dict): raise errors.AnsibleFilterError("|failed expects variables is a dictionary") - if not isinstance(inventory_hostname, basestring): + if not isinstance(inventory_hostname, string_types): raise errors.AnsibleFilterError("|failed expects inventory_hostname is a string") # pylint: disable=no-member ansible_version = pkg_resources.get_distribution("ansible").version @@ -215,7 +216,7 @@ class FilterModule(object): """ if not isinstance(data, list): raise errors.AnsibleFilterError("|failed expects first param is a list") - if not all(isinstance(x, basestring) for x in data): + if not all(isinstance(x, string_types) for x in data): raise errors.AnsibleFilterError("|failed expects first param is a list" " of strings") retval = [prepend + s for s in data] @@ -362,7 +363,7 @@ class FilterModule(object): if not isinstance(data, list): raise errors.AnsibleFilterError("|failed expects to filter on a list") - if not isinstance(filter_attr, basestring): + if not isinstance(filter_attr, string_types): raise errors.AnsibleFilterError("|failed expects filter_attr is a str or unicode") # Gather up the values for the list of keys passed in @@ -401,9 +402,9 @@ class FilterModule(object): """ if not isinstance(nodes, list): raise errors.AnsibleFilterError("failed expects to filter on a list") - if not isinstance(label, basestring): + if not isinstance(label, string_types): raise errors.AnsibleFilterError("failed expects label to be a string") - if value is not None and not isinstance(value, basestring): + if value is not None and not isinstance(value, string_types): raise errors.AnsibleFilterError("failed expects value to be a string") def label_filter(node): @@ -419,7 +420,7 @@ class FilterModule(object): else: return False - if isinstance(labels, basestring): + if isinstance(labels, string_types): labels = yaml.safe_load(labels) if not isinstance(labels, dict): raise errors.AnsibleFilterError( @@ -518,7 +519,7 @@ class FilterModule(object): "cafile": "/etc/origin/master/named_certificates/custom-ca-2.crt", "names": [ "some-hostname.com" ] }] """ - if not isinstance(named_certs_dir, basestring): + if not isinstance(named_certs_dir, string_types): raise errors.AnsibleFilterError("|failed expects named_certs_dir is str or unicode") if not isinstance(internal_hostnames, list): @@ -545,7 +546,7 @@ class FilterModule(object): if cert.get_extension(i).get_short_name() == 'subjectAltName': for name in str(cert.get_extension(i)).replace('DNS:', '').split(', '): certificate['names'].append(name) - except: + except Exception: raise errors.AnsibleFilterError(("|failed to parse certificate '%s', " % certificate['certfile'] + "please specify certificate names in host inventory")) @@ -670,7 +671,7 @@ class FilterModule(object): migrations = {'openshift_router_selector': 'openshift_hosted_router_selector', 'openshift_registry_selector': 'openshift_hosted_registry_selector'} - for old_fact, new_fact in migrations.iteritems(): + for old_fact, new_fact in migrations.items(): if old_fact in facts and new_fact not in facts: facts[new_fact] = facts[old_fact] return facts @@ -781,7 +782,7 @@ class FilterModule(object): """ if not isinstance(rpms, list): raise errors.AnsibleFilterError("failed expects to filter on a list") - if openshift_version is not None and not isinstance(openshift_version, basestring): + if openshift_version is not None and not isinstance(openshift_version, string_types): raise errors.AnsibleFilterError("failed expects openshift_version to be a string") rpms_31 = [] @@ -800,9 +801,9 @@ class FilterModule(object): """ if not isinstance(pods, list): raise errors.AnsibleFilterError("failed expects to filter on a list") - if not isinstance(deployment_type, basestring): + if not isinstance(deployment_type, string_types): raise errors.AnsibleFilterError("failed expects deployment_type to be a string") - if not isinstance(component, basestring): + if not isinstance(component, string_types): raise errors.AnsibleFilterError("failed expects component to be a string") image_prefix = 'openshift/origin-' @@ -843,7 +844,7 @@ class FilterModule(object): Ex. v3.2.0.10 -> -3.2.0.10 v1.2.0-rc1 -> -1.2.0 """ - if not isinstance(version, basestring): + if not isinstance(version, string_types): raise errors.AnsibleFilterError("|failed expects a string or unicode") if version.startswith("v"): version = version[1:] @@ -861,7 +862,7 @@ class FilterModule(object): Ex: https://ose3-master.example.com/v1/api -> ose3-master.example.com """ - if not isinstance(url, basestring): + if not isinstance(url, string_types): raise errors.AnsibleFilterError("|failed expects a string or unicode") parse_result = urlparse(url) if parse_result.netloc != '': diff --git a/filter_plugins/oo_zabbix_filters.py b/filter_plugins/oo_zabbix_filters.py deleted file mode 100644 index 1c1854b29..000000000 --- a/filter_plugins/oo_zabbix_filters.py +++ /dev/null @@ -1,160 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4 -''' -Custom zabbix filters for use in openshift-ansible -''' - -import pdb - - -class FilterModule(object): - ''' Custom zabbix ansible filters ''' - - @staticmethod - def create_data(data, results, key, new_key): - '''Take a dict, filter through results and add results['key'] to dict - ''' - new_list = [app[key] for app in results] - data[new_key] = new_list - return data - - @staticmethod - def oo_set_zbx_trigger_triggerid(item, trigger_results): - '''Set zabbix trigger id from trigger results - ''' - if isinstance(trigger_results, list): - item['triggerid'] = trigger_results[0]['triggerid'] - return item - - item['triggerid'] = trigger_results['triggerids'][0] - return item - - @staticmethod - def oo_set_zbx_item_hostid(item, template_results): - ''' Set zabbix host id from template results - ''' - if isinstance(template_results, list): - item['hostid'] = template_results[0]['templateid'] - return item - - item['hostid'] = template_results['templateids'][0] - return item - - @staticmethod - def oo_pdb(arg): - ''' This pops you into a pdb instance where arg is the data passed in - from the filter. - Ex: "{{ hostvars | oo_pdb }}" - ''' - pdb.set_trace() - return arg - - @staticmethod - def select_by_name(ans_data, data): - ''' test - ''' - for zabbix_item in data: - if ans_data['name'] == zabbix_item: - data[zabbix_item]['params']['hostid'] = ans_data['templateid'] - return data[zabbix_item]['params'] - return None - - @staticmethod - def oo_build_zabbix_collect(data, string, value): - ''' Build a list of dicts from a list of data matched on string attribute - ''' - rval = [] - for item in data: - if item[string] == value: - rval.append(item) - - return rval - - @staticmethod - def oo_build_zabbix_list_dict(values, string): - ''' Build a list of dicts with string as key for each value - ''' - rval = [] - for value in values: - rval.append({string: value}) - return rval - - @staticmethod - def oo_remove_attr_from_list_dict(data, attr): - ''' Remove a specific attribute from a dict - ''' - attrs = [] - if isinstance(attr, str): - attrs.append(attr) - else: - attrs = attr - - for attribute in attrs: - for _entry in data: - _entry.pop(attribute, None) - - return data - - @staticmethod - def itservice_results_builder(data, clusters, keys): - '''Take a list of dict results, - loop through each results and create a hash - of: - [{clusterid: cluster1, key: 111 }] - ''' - r_list = [] - for cluster in clusters: - for results in data: - if cluster == results['item'][0]: - results = results['results'] - if results and len(results) > 0 and all([_key in results[0] for _key in keys]): - tmp = {} - tmp['clusterid'] = cluster - for key in keys: - tmp[key] = results[0][key] - r_list.append(tmp) - - return r_list - - @staticmethod - def itservice_dependency_builder(data, cluster): - '''Take a list of dict results, - loop through each results and create a hash - of: - [{clusterid: cluster1, key: 111 }] - ''' - r_list = [] - for dep in data: - if cluster == dep['clusterid']: - r_list.append({'name': '%s - %s' % (dep['clusterid'], dep['description']), 'dep_type': 'hard'}) - - return r_list - - @staticmethod - def itservice_dep_builder_list(data): - '''Take a list of dict results, - loop through each results and create a hash - of: - [{clusterid: cluster1, key: 111 }] - ''' - r_list = [] - for dep in data: - r_list.append({'name': '%s' % dep, 'dep_type': 'hard'}) - - return r_list - - def filters(self): - ''' returns a mapping of filters to methods ''' - return { - "select_by_name": self.select_by_name, - "oo_set_zbx_item_hostid": self.oo_set_zbx_item_hostid, - "oo_set_zbx_trigger_triggerid": self.oo_set_zbx_trigger_triggerid, - "oo_build_zabbix_list_dict": self.oo_build_zabbix_list_dict, - "create_data": self.create_data, - "oo_build_zabbix_collect": self.oo_build_zabbix_collect, - "oo_remove_attr_from_list_dict": self.oo_remove_attr_from_list_dict, - "itservice_results_builder": self.itservice_results_builder, - "itservice_dependency_builder": self.itservice_dependency_builder, - "itservice_dep_builder_list": self.itservice_dep_builder_list, - } diff --git a/filter_plugins/openshift_master.py b/filter_plugins/openshift_master.py index 57b1f7d82..ec09b09f6 100644 --- a/filter_plugins/openshift_master.py +++ b/filter_plugins/openshift_master.py @@ -6,22 +6,14 @@ Custom filters for use in openshift-master ''' import copy import sys -import yaml + +from distutils.version import LooseVersion # pylint: disable=no-name-in-module,import-error from ansible import errors +from ansible.plugins.filter.core import to_bool as ansible_bool +from six import string_types -# pylint: disable=no-name-in-module,import-error,wrong-import-order -from distutils.version import LooseVersion -try: - # ansible-2.1 - from ansible.plugins.filter.core import to_bool as ansible_bool -except ImportError: - try: - # ansible-2.0.x - from ansible.runner.filter_plugins.core import bool as ansible_bool - except ImportError: - # ansible-1.9.x - from ansible.plugins.filter.core import bool as ansible_bool +import yaml class IdentityProviderBase(object): @@ -513,7 +505,7 @@ class FilterModule(object): 'master3.example.com'] returns True ''' - if not issubclass(type(data), basestring): + if not issubclass(type(data), string_types): raise errors.AnsibleFilterError("|failed expects data is a string or unicode") if not issubclass(type(masters), list): raise errors.AnsibleFilterError("|failed expects masters is a list") @@ -558,7 +550,7 @@ class FilterModule(object): def oo_htpasswd_users_from_file(file_contents): ''' return a dictionary of htpasswd users from htpasswd file contents ''' htpasswd_entries = {} - if not isinstance(file_contents, basestring): + if not isinstance(file_contents, string_types): raise errors.AnsibleFilterError("failed, expects to filter on a string") for line in file_contents.splitlines(): user = None diff --git a/git/.pylintrc b/git/.pylintrc index 9c98889b3..411330fe7 100644 --- a/git/.pylintrc +++ b/git/.pylintrc @@ -8,7 +8,7 @@ #init-hook= # Profiled execution. -profile=no +#profile=no # Add files or directories to the blacklist. They should be base names, not # paths. @@ -98,7 +98,7 @@ evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / stateme # Add a comment according to your evaluation note. This is used by the global # evaluation report (RP0004). -comment=no +#comment=no # Template used to display messages. This is a python new-style format string # used to format the message information. See doc for all details @@ -249,7 +249,7 @@ ignored-classes=SQLObject # When zope mode is activated, add a predefined set of Zope acquired attributes # to generated-members. -zope=no +#zope=no # List of members which are set dynamically and missed by pylint inference # system, and so shouldn't trigger E0201 when accessed. Python regular diff --git a/library/modify_yaml.py b/library/modify_yaml.py index 0805f01ec..d8d22d5ea 100755 --- a/library/modify_yaml.py +++ b/library/modify_yaml.py @@ -95,7 +95,7 @@ def main(): # ignore broad-except error to avoid stack trace to ansible user # pylint: disable=broad-except - except Exception, e: + except Exception as e: return module.fail_json(msg=str(e)) diff --git a/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py b/playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py index c19274e06..daff68fbe 100644 --- a/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py +++ b/playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py @@ -5,22 +5,11 @@ Custom filters for use in openshift-ansible ''' -import pdb - class FilterModule(object): ''' Custom ansible filters ''' @staticmethod - def oo_pdb(arg): - ''' This pops you into a pdb instance where arg is the data passed in - from the filter. - Ex: "{{ hostvars | oo_pdb }}" - ''' - pdb.set_trace() - return arg - - @staticmethod def translate_volume_name(volumes, target_volume): ''' This filter matches a device string /dev/sdX to /dev/xvdX diff --git a/playbooks/adhoc/noc/create_host.yml b/playbooks/adhoc/noc/create_host.yml deleted file mode 100644 index 318396bcc..000000000 --- a/playbooks/adhoc/noc/create_host.yml +++ /dev/null @@ -1,58 +0,0 @@ ---- -- name: 'Create a host object in zabbix' - hosts: localhost - connection: local - become: no - gather_facts: no - roles: - - os_zabbix - post_tasks: - - - zbxapi: - server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php - zbx_class: Template - state: list - params: - host: ctr_test_kwoodson - filter: - host: - - ctr_kwoodson_test_tmpl - - register: tmpl_results - - - debug: var=tmpl_results - -#ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml -- name: 'Create a host object in zabbix' - hosts: localhost - connection: local - become: no - gather_facts: no - roles: - - os_zabbix - post_tasks: - - - zbxapi: - server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php - zbx_class: Host - state: absent - params: - host: ctr_test_kwoodson - interfaces: - - type: 1 - main: 1 - useip: 1 - ip: 127.0.0.1 - dns: "" - port: 10050 - groups: - - groupid: 1 - templates: "{{ tmpl_results.results | oo_collect('templateid') | oo_build_zabbix_list_dict('templateid') }}" - output: extend - filter: - host: - - ctr_test_kwoodson - - register: host_results - - - debug: var=host_results diff --git a/playbooks/adhoc/noc/create_maintenance.yml b/playbooks/adhoc/noc/create_maintenance.yml deleted file mode 100644 index b694aea1b..000000000 --- a/playbooks/adhoc/noc/create_maintenance.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -#ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml -- name: 'Create a maintenace object in zabbix' - hosts: localhost - connection: local - become: no - gather_facts: no - roles: - - os_zabbix - vars: - oo_hostids: '' - oo_groupids: '' - post_tasks: - - assert: - that: oo_desc is defined - - - zbxapi: - server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php - zbx_class: Maintenance - state: present - params: - name: "{{ oo_name }}" - description: "{{ oo_desc }}" - active_since: "{{ oo_start }}" - active_till: "{{ oo_stop }}" - maintenance_type: "0" - output: extend - hostids: "{{ oo_hostids.split(',') | default([]) }}" - #groupids: "{{ oo_groupids.split(',') | default([]) }}" - timeperiods: - - start_time: "{{ oo_start }}" - period: "{{ oo_stop }}" - selectTimeperiods: extend - - register: maintenance - - - debug: var=maintenance diff --git a/playbooks/adhoc/noc/get_zabbix_problems.yml b/playbooks/adhoc/noc/get_zabbix_problems.yml deleted file mode 100644 index 32fc7ce68..000000000 --- a/playbooks/adhoc/noc/get_zabbix_problems.yml +++ /dev/null @@ -1,43 +0,0 @@ ---- -- name: 'Get current hosts who have triggers that are alerting by trigger description' - hosts: localhost - connection: local - become: no - gather_facts: no - roles: - - os_zabbix - post_tasks: - - assert: - that: oo_desc is defined - - - zbxapi: - server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php - zbx_class: Trigger - state: list - params: - only_true: true - output: extend - selectHosts: extend - searchWildCardsEnabled: 1 - search: - description: "{{ oo_desc }}" - register: problems - - - debug: var=problems - - - set_fact: - problem_hosts: "{{ problems.results | oo_collect(attribute='hosts') | oo_flatten | oo_collect(attribute='host') | difference(['aggregates']) }}" - - - debug: var=problem_hosts - - - add_host: - name: "{{ item }}" - groups: problem_hosts_group - with_items: "{{ problem_hosts }}" - -- name: "Run on problem hosts" - hosts: problem_hosts_group - gather_facts: no - tasks: - - command: "{{ oo_cmd }}" - when: oo_cmd is defined diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index bdd92a47d..b9966e715 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -92,6 +92,8 @@ - atomic-enterprise-sdn-ovs - atomic-openshift - atomic-openshift-clients + - atomic-openshift-excluder + - atomic-openshift-docker-excluder - atomic-openshift-node - atomic-openshift-sdn-ovs - cockpit-bridge @@ -105,6 +107,8 @@ - openshift-sdn-ovs - openvswitch - origin + - origin-excluder + - origin-docker-excluder - origin-clients - origin-node - origin-sdn-ovs @@ -254,6 +258,8 @@ - atomic-enterprise-master - atomic-openshift - atomic-openshift-clients + - atomic-openshift-excluder + - atomic-openshift-docker-excluder - atomic-openshift-master - cockpit-bridge - cockpit-docker @@ -265,6 +271,8 @@ - openshift-master - origin - origin-clients + - origin-excluder + - origin-docker-excluder - origin-master - pacemaker - pcs diff --git a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml deleted file mode 100644 index 955f990b7..000000000 --- a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml +++ /dev/null @@ -1,60 +0,0 @@ ---- -- hosts: localhost - gather_facts: no - connection: local - become: no - vars: - g_server: http://localhost:8080/zabbix/api_jsonrpc.php - g_user: '' - g_password: '' - - roles: - - lib_zabbix - - post_tasks: - - name: CLEAN List template for heartbeat - zbx_template: - zbx_server: "{{ g_server }}" - zbx_user: "{{ g_user }}" - zbx_password: "{{ g_password }}" - state: list - name: 'Template Heartbeat' - register: templ_heartbeat - - - name: CLEAN List template app zabbix server - zbx_template: - zbx_server: "{{ g_server }}" - zbx_user: "{{ g_user }}" - zbx_password: "{{ g_password }}" - state: list - name: 'Template App Zabbix Server' - register: templ_zabbix_server - - - name: CLEAN List template app zabbix server - zbx_template: - zbx_server: "{{ g_server }}" - zbx_user: "{{ g_user }}" - zbx_password: "{{ g_password }}" - state: list - name: 'Template App Zabbix Agent' - register: templ_zabbix_agent - - - name: CLEAN List all templates - zbx_template: - zbx_server: "{{ g_server }}" - zbx_user: "{{ g_user }}" - zbx_password: "{{ g_password }}" - state: list - register: templates - - - debug: var=templ_heartbeat.results - - - name: Remove templates if heartbeat template is missing - zbx_template: - zbx_server: "{{ g_server }}" - zbx_user: "{{ g_user }}" - zbx_password: "{{ g_password }}" - name: "{{ item }}" - state: absent - with_items: "{{ templates.results | difference(templ_zabbix_agent.results) | difference(templ_zabbix_server.results) | oo_collect('host') }}" - when: templ_heartbeat.results | length == 0 diff --git a/playbooks/adhoc/zabbix_setup/filter_plugins b/playbooks/adhoc/zabbix_setup/filter_plugins deleted file mode 120000 index b0b7a3414..000000000 --- a/playbooks/adhoc/zabbix_setup/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins/
\ No newline at end of file diff --git a/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml deleted file mode 100755 index 0fe65b338..000000000 --- a/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env ansible-playbook ---- -- include: clean_zabbix.yml - vars: - g_server: http://localhost/zabbix/api_jsonrpc.php - g_user: Admin - g_password: zabbix diff --git a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml deleted file mode 100755 index 0d5e01878..000000000 --- a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/ansible-playbook ---- -- hosts: localhost - gather_facts: no - connection: local - become: no - vars: - g_server: http://localhost/zabbix/api_jsonrpc.php - g_user: Admin - g_password: zabbix - g_zbx_scriptrunner_user: scriptrunner - g_zbx_scriptrunner_bastion_host: specialhost.example.com - roles: - - role: os_zabbix - ozb_server: "{{ g_server }}" - ozb_user: "{{ g_user }}" - ozb_password: "{{ g_password }}" - ozb_scriptrunner_user: "{{ g_zbx_scriptrunner_user }}" - ozb_scriptrunner_bastion_host: "{{ g_zbx_scriptrunner_bastion_host }}" diff --git a/playbooks/adhoc/zabbix_setup/roles b/playbooks/adhoc/zabbix_setup/roles deleted file mode 120000 index 20c4c58cf..000000000 --- a/playbooks/adhoc/zabbix_setup/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml index 0a972adf6..be42f005f 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml @@ -42,15 +42,28 @@ {{ avail_disk.stdout }} Kb available. when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int) - # TODO - Refactor containerized backup to use etcd_container to backup the data so we don't rely on - # the host's etcdctl binary which may be of a different version. - - # for non containerized and non embedded we should have the correct version of etcd installed already - # For embedded we need to use the latest because OCP 3.3 uses a version of etcd that can only be backed - # up with etcd-3.x + # For non containerized and non embedded we should have the correct version of + # etcd installed already. So don't do anything. + # + # For embedded or containerized we need to use the latest because OCP 3.3 uses + # a version of etcd that can only be backed up with etcd-3.x and if it's + # containerized then etcd version may be newer than that on the host so + # upgrade it. + # + # On atomic we have neither yum nor dnf so ansible throws a hard to debug error + # if you use package there, like this: "Could not find a module for unknown." + # see https://bugzilla.redhat.com/show_bug.cgi?id=1408668 + # + # TODO - We should refactor all containerized backups to use the containerized + # version of etcd to perform the backup rather than relying on the host's + # binaries. Until we do that we'll continue to have problems backing up etcd + # when atomic host has an older version than the version that's running in the + # container whether that's embedded or not - name: Install latest etcd for containerized or embedded - package: name=etcd state=latest - when: ( openshift.common.is_containerized and not openshift.common.is_atomic ) or embedded_etcd | bool + package: + name: etcd + state: latest + when: ( embedded_etcd | bool or openshift.common.is_containerized ) and not openshift.common.is_atomic - name: Generate etcd backup command: > diff --git a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py index 1238acb05..673f11889 100755 --- a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py +++ b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py @@ -146,7 +146,7 @@ def main(): # ignore broad-except error to avoid stack trace to ansible user # pylint: disable=broad-except - except Exception, e: + except Exception as e: return module.fail_json(msg=str(e)) diff --git a/roles/kube_nfs_volumes/library/partitionpool.py b/roles/kube_nfs_volumes/library/partitionpool.py index 2cd454274..1857433c7 100644 --- a/roles/kube_nfs_volumes/library/partitionpool.py +++ b/roles/kube_nfs_volumes/library/partitionpool.py @@ -3,6 +3,8 @@ Ansible module for partitioning. """ +from __future__ import print_function + # There is no pyparted on our Jenkins worker # pylint: disable=import-error import parted @@ -131,7 +133,7 @@ def partition(diskname, specs, force=False, check_mode=False): disk = None if disk and len(disk.partitions) > 0 and not force: - print "skipping", diskname + print("skipping", diskname) return 0 # create new partition table, wiping all existing data @@ -220,7 +222,7 @@ def main(): try: specs = parse_spec(sizes) - except ValueError, ex: + except ValueError as ex: err = "Error parsing sizes=" + sizes + ": " + str(ex) module.fail_json(msg=err) @@ -229,7 +231,7 @@ def main(): for disk in disks.split(","): try: changed_count += partition(disk, specs, force, module.check_mode) - except Exception, ex: + except Exception as ex: err = "Error creating partitions on " + disk + ": " + str(ex) raise # module.fail_json(msg=err) diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py index 1fac284f2..7161b5277 100644 --- a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py +++ b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py @@ -371,7 +371,7 @@ an OpenShift Container Platform cluster ###################################################################### # Load the certificate and the CA, parse their expiration dates into # datetime objects so we can manipulate them later - for _, v in cert_meta.iteritems(): + for _, v in cert_meta.items(): with open(v, 'r') as fp: cert = fp.read() cert_subject, cert_expiry_date, time_remaining = load_and_handle_cert(cert, now) @@ -654,9 +654,13 @@ an OpenShift Container Platform cluster # will be at the front of the list and certificates which will # expire later are at the end. Router and registry certs should be # limited to just 1 result, so don't bother sorting those. - check_results['ocp_certs'] = sorted(check_results['ocp_certs'], cmp=lambda x, y: cmp(x['days_remaining'], y['days_remaining'])) - check_results['kubeconfigs'] = sorted(check_results['kubeconfigs'], cmp=lambda x, y: cmp(x['days_remaining'], y['days_remaining'])) - check_results['etcd'] = sorted(check_results['etcd'], cmp=lambda x, y: cmp(x['days_remaining'], y['days_remaining'])) + def cert_key(item): + ''' return the days_remaining key ''' + return item['days_remaining'] + + check_results['ocp_certs'] = sorted(check_results['ocp_certs'], key=cert_key) + check_results['kubeconfigs'] = sorted(check_results['kubeconfigs'], key=cert_key) + check_results['etcd'] = sorted(check_results['etcd'], key=cert_key) # This module will never change anything, but we might want to # change the return code parameter if there is some catastrophic diff --git a/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-app-example.yaml b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-app-example.yaml new file mode 100644 index 000000000..14bdd1dca --- /dev/null +++ b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-app-example.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: cloudforms +spec: + capacity: + storage: 2Gi + accessModes: + - ReadWriteOnce + nfs: + path: /opt/nfs/volumes-app + server: 10.19.0.216 + persistentVolumeReclaimPolicy: Recycle diff --git a/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-example.yaml b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-example.yaml new file mode 100644 index 000000000..709d8d976 --- /dev/null +++ b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-example.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: nfs-pv01 +spec: + capacity: + storage: 2Gi + accessModes: + - ReadWriteOnce + nfs: + path: /opt/nfs/volumes + server: 10.19.0.216 + persistentVolumeReclaimPolicy: Recycle diff --git a/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-template.yaml b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-template.yaml new file mode 100644 index 000000000..c8e3d4083 --- /dev/null +++ b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-template.yaml @@ -0,0 +1,479 @@ +apiVersion: v1 +kind: Template +labels: + template: cloudforms +metadata: + name: cloudforms + annotations: + description: "CloudForms appliance with persistent storage" + tags: "instant-app,cloudforms,cfme" + iconClass: "icon-rails" +objects: +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: "Exposes and load balances CloudForms pods" + service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]' + name: ${NAME} + spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: https + port: 443 + protocol: TCP + targetPort: 443 + selector: + name: ${NAME} +- apiVersion: v1 + kind: Route + metadata: + name: ${NAME} + spec: + host: ${APPLICATION_DOMAIN} + port: + targetPort: https + tls: + termination: passthrough + to: + kind: Service + name: ${NAME} +- apiVersion: v1 + kind: ImageStream + metadata: + name: cfme-openshift-app + annotations: + description: "Keeps track of changes in the CloudForms app image" + spec: + dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-app +- apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: ${DATABASE_SERVICE_NAME} + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: ${DATABASE_VOLUME_CAPACITY} +- apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: ${NAME} + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: ${APPLICATION_VOLUME_CAPACITY} +- apiVersion: v1 + kind: "DeploymentConfig" + metadata: + name: ${NAME} + annotations: + description: "Defines how to deploy the CloudForms appliance" + spec: + template: + metadata: + labels: + name: ${NAME} + name: ${NAME} + spec: + volumes: + - + name: "cfme-app-volume" + persistentVolumeClaim: + claimName: ${NAME} + containers: + - image: cloudforms/cfme-openshift-app:${APPLICATION_IMG_TAG} + imagePullPolicy: IfNotPresent + name: cloudforms + livenessProbe: + httpGet: + path: / + port: 80 + initialDelaySeconds: 480 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: / + port: 80 + initialDelaySeconds: 200 + timeoutSeconds: 3 + ports: + - containerPort: 80 + protocol: TCP + - containerPort: 443 + protocol: TCP + securityContext: + privileged: true + volumeMounts: + - + name: "cfme-app-volume" + mountPath: "/persistent" + env: + - + name: "APPLICATION_INIT_DELAY" + value: "${APPLICATION_INIT_DELAY}" + - + name: "DATABASE_SERVICE_NAME" + value: "${DATABASE_SERVICE_NAME}" + - + name: "DATABASE_REGION" + value: "${DATABASE_REGION}" + - + name: "MEMCACHED_SERVICE_NAME" + value: "${MEMCACHED_SERVICE_NAME}" + - + name: "POSTGRESQL_USER" + value: "${DATABASE_USER}" + - + name: "POSTGRESQL_PASSWORD" + value: "${DATABASE_PASSWORD}" + - + name: "POSTGRESQL_DATABASE" + value: "${DATABASE_NAME}" + - + name: "POSTGRESQL_MAX_CONNECTIONS" + value: "${POSTGRESQL_MAX_CONNECTIONS}" + - + name: "POSTGRESQL_SHARED_BUFFERS" + value: "${POSTGRESQL_SHARED_BUFFERS}" + resources: + requests: + memory: "${MEMORY_APPLICATION_MIN}" + lifecycle: + preStop: + exec: + command: + - /opt/rh/cfme-container-scripts/sync-pv-data + replicas: 1 + selector: + name: ${NAME} + triggers: + - type: "ConfigChange" + - type: "ImageChange" + imageChangeParams: + automatic: false + containerNames: + - "cloudforms" + from: + kind: "ImageStreamTag" + name: "cfme-openshift-app:${APPLICATION_IMG_TAG}" + strategy: + type: "Recreate" + recreateParams: + timeoutSeconds: 1200 +- apiVersion: v1 + kind: "Service" + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + annotations: + description: "Exposes the memcached server" + spec: + ports: + - + name: "memcached" + port: 11211 + targetPort: 11211 + selector: + name: "${MEMCACHED_SERVICE_NAME}" +- apiVersion: v1 + kind: ImageStream + metadata: + name: cfme-openshift-memcached + annotations: + description: "Keeps track of changes in the CloudForms memcached image" + spec: + dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-memcached +- apiVersion: v1 + kind: "DeploymentConfig" + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + annotations: + description: "Defines how to deploy memcached" + spec: + strategy: + type: "Recreate" + triggers: + - + type: "ImageChange" + imageChangeParams: + automatic: false + containerNames: + - "memcached" + from: + kind: "ImageStreamTag" + name: "cfme-openshift-memcached:${MEMCACHED_IMG_TAG}" + - + type: "ConfigChange" + replicas: 1 + selector: + name: "${MEMCACHED_SERVICE_NAME}" + template: + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + labels: + name: "${MEMCACHED_SERVICE_NAME}" + spec: + volumes: [] + containers: + - + name: "memcached" + image: "cloudforms/cfme-openshift-memcached:${MEMCACHED_IMG_TAG}" + ports: + - + containerPort: 11211 + readinessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 5 + tcpSocket: + port: 11211 + livenessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 30 + tcpSocket: + port: 11211 + volumeMounts: [] + env: + - + name: "MEMCACHED_MAX_MEMORY" + value: "${MEMCACHED_MAX_MEMORY}" + - + name: "MEMCACHED_MAX_CONNECTIONS" + value: "${MEMCACHED_MAX_CONNECTIONS}" + - + name: "MEMCACHED_SLAB_PAGE_SIZE" + value: "${MEMCACHED_SLAB_PAGE_SIZE}" + resources: + limits: + memory: "${MEMORY_MEMCACHED_LIMIT}" +- apiVersion: v1 + kind: "Service" + metadata: + name: "${DATABASE_SERVICE_NAME}" + annotations: + description: "Exposes the database server" + spec: + ports: + - + name: "postgresql" + port: 5432 + targetPort: 5432 + selector: + name: "${DATABASE_SERVICE_NAME}" +- apiVersion: v1 + kind: ImageStream + metadata: + name: cfme-openshift-postgresql + annotations: + description: "Keeps track of changes in the CloudForms postgresql image" + spec: + dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-postgresql +- apiVersion: v1 + kind: "DeploymentConfig" + metadata: + name: "${DATABASE_SERVICE_NAME}" + annotations: + description: "Defines how to deploy the database" + spec: + strategy: + type: "Recreate" + triggers: + - + type: "ImageChange" + imageChangeParams: + automatic: false + containerNames: + - "postgresql" + from: + kind: "ImageStreamTag" + name: "cfme-openshift-postgresql:${POSTGRESQL_IMG_TAG}" + - + type: "ConfigChange" + replicas: 1 + selector: + name: "${DATABASE_SERVICE_NAME}" + template: + metadata: + name: "${DATABASE_SERVICE_NAME}" + labels: + name: "${DATABASE_SERVICE_NAME}" + spec: + volumes: + - + name: "cfme-pgdb-volume" + persistentVolumeClaim: + claimName: ${DATABASE_SERVICE_NAME} + containers: + - + name: "postgresql" + image: "cloudforms/cfme-openshift-postgresql:${POSTGRESQL_IMG_TAG}" + ports: + - + containerPort: 5432 + readinessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 15 + exec: + command: + - "/bin/sh" + - "-i" + - "-c" + - "psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'" + livenessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 60 + tcpSocket: + port: 5432 + volumeMounts: + - + name: "cfme-pgdb-volume" + mountPath: "/var/lib/pgsql/data" + env: + - + name: "POSTGRESQL_USER" + value: "${DATABASE_USER}" + - + name: "POSTGRESQL_PASSWORD" + value: "${DATABASE_PASSWORD}" + - + name: "POSTGRESQL_DATABASE" + value: "${DATABASE_NAME}" + - + name: "POSTGRESQL_MAX_CONNECTIONS" + value: "${POSTGRESQL_MAX_CONNECTIONS}" + - + name: "POSTGRESQL_SHARED_BUFFERS" + value: "${POSTGRESQL_SHARED_BUFFERS}" + resources: + limits: + memory: "${MEMORY_POSTGRESQL_LIMIT}" + +parameters: + - + name: "NAME" + displayName: Name + required: true + description: "The name assigned to all of the frontend objects defined in this template." + value: cloudforms + - + name: "DATABASE_SERVICE_NAME" + displayName: "PostgreSQL Service Name" + required: true + description: "The name of the OpenShift Service exposed for the PostgreSQL container." + value: "postgresql" + - + name: "DATABASE_USER" + displayName: "PostgreSQL User" + required: true + description: "PostgreSQL user that will access the database." + value: "root" + - + name: "DATABASE_PASSWORD" + displayName: "PostgreSQL Password" + required: true + description: "Password for the PostgreSQL user." + value: "smartvm" + - + name: "DATABASE_NAME" + required: true + displayName: "PostgreSQL Database Name" + description: "Name of the PostgreSQL database accessed." + value: "vmdb_production" + - + name: "DATABASE_REGION" + required: true + displayName: "Application Database Region" + description: "Database region that will be used for application." + value: "0" + - + name: "MEMCACHED_SERVICE_NAME" + required: true + displayName: "Memcached Service Name" + description: "The name of the OpenShift Service exposed for the Memcached container." + value: "memcached" + - + name: "MEMCACHED_MAX_MEMORY" + displayName: "Memcached Max Memory" + description: "Memcached maximum memory for memcached object storage in MB." + value: "64" + - + name: "MEMCACHED_MAX_CONNECTIONS" + displayName: "Memcached Max Connections" + description: "Memcached maximum number of connections allowed." + value: "1024" + - + name: "MEMCACHED_SLAB_PAGE_SIZE" + displayName: "Memcached Slab Page Size" + description: "Memcached size of each slab page." + value: "1m" + - + name: "POSTGRESQL_MAX_CONNECTIONS" + displayName: "PostgreSQL Max Connections" + description: "PostgreSQL maximum number of database connections allowed." + value: "100" + - + name: "POSTGRESQL_SHARED_BUFFERS" + displayName: "PostgreSQL Shared Buffer Amount" + description: "Amount of memory dedicated for PostgreSQL shared memory buffers." + value: "64MB" + - + name: "MEMORY_APPLICATION_MIN" + displayName: "Application Memory Minimum" + required: true + description: "Minimum amount of memory the Application container will need." + value: "4096Mi" + - + name: "MEMORY_POSTGRESQL_LIMIT" + displayName: "PostgreSQL Memory Limit" + required: true + description: "Maximum amount of memory the PostgreSQL container can use." + value: "2048Mi" + - + name: "MEMORY_MEMCACHED_LIMIT" + displayName: "Memcached Memory Limit" + required: true + description: "Maximum amount of memory the Memcached container can use." + value: "256Mi" + - + name: "POSTGRESQL_IMG_TAG" + displayName: "PostgreSQL Image Tag" + description: "This is the PostgreSQL image tag/version requested to deploy." + value: "latest" + - + name: "MEMCACHED_IMG_TAG" + displayName: "Memcached Image Tag" + description: "This is the Memcached image tag/version requested to deploy." + value: "latest" + - + name: "APPLICATION_IMG_TAG" + displayName: "Application Image Tag" + description: "This is the Application image tag/version requested to deploy." + value: "latest" + - + name: "APPLICATION_DOMAIN" + displayName: "Application Hostname" + description: "The exposed hostname that will route to the application service, if left blank a value will be defaulted." + value: "" + - + name: "APPLICATION_INIT_DELAY" + displayName: "Application Init Delay" + required: true + description: "Delay in seconds before we attempt to initialize the application." + value: "30" + - + name: "APPLICATION_VOLUME_CAPACITY" + displayName: "Application Volume Capacity" + required: true + description: "Volume space available for application data." + value: "1Gi" + - + name: "DATABASE_VOLUME_CAPACITY" + displayName: "Database Volume Capacity" + required: true + description: "Volume space available for database." + value: "1Gi" diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 41ae07a48..05b0377bc 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -26,6 +26,8 @@ import struct import socket from distutils.util import strtobool from distutils.version import LooseVersion +from six import string_types +from six import text_type # ignore pylint errors related to the module_utils import # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import @@ -87,7 +89,7 @@ def migrate_docker_facts(facts): # log_options was originally meant to be a comma separated string, but # we now prefer an actual list, with backward compatibility: if 'log_options' in facts['docker'] and \ - isinstance(facts['docker']['log_options'], basestring): + isinstance(facts['docker']['log_options'], string_types): facts['docker']['log_options'] = facts['docker']['log_options'].split(",") return facts @@ -226,7 +228,7 @@ def choose_hostname(hostnames=None, fallback=''): return hostname ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z' - ips = [i for i in hostnames if i is not None and isinstance(i, basestring) and re.match(ip_regex, i)] + ips = [i for i in hostnames if i is not None and isinstance(i, string_types) and re.match(ip_regex, i)] hosts = [i for i in hostnames if i is not None and i != '' and i not in ips] for host_list in (hosts, ips): @@ -363,7 +365,7 @@ def normalize_aws_facts(metadata, facts): var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'} for ips_var, int_var in iteritems(var_map): ips = interface.get(int_var) - if isinstance(ips, basestring): + if isinstance(ips, string_types): int_info[ips_var] = [ips] else: int_info[ips_var] = ips @@ -772,7 +774,7 @@ def set_etcd_facts_if_unset(facts): # Read ETCD_DATA_DIR from /etc/etcd/etcd.conf: try: # Add a fake section for parsing: - ini_str = unicode('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8') + ini_str = text_type('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8') ini_fp = io.StringIO(ini_str) config = ConfigParser.RawConfigParser() config.readfp(ini_fp) @@ -1280,15 +1282,14 @@ def get_hosted_registry_insecure(): hosted_registry_insecure = None if os.path.exists('/etc/sysconfig/docker'): try: - ini_str = unicode('[root]\n' + open('/etc/sysconfig/docker', 'r').read(), 'utf-8') + ini_str = text_type('[root]\n' + open('/etc/sysconfig/docker', 'r').read(), 'utf-8') ini_fp = io.StringIO(ini_str) config = ConfigParser.RawConfigParser() config.readfp(ini_fp) options = config.get('root', 'OPTIONS') if 'insecure-registry' in options: hosted_registry_insecure = True - # pylint: disable=bare-except - except: + except Exception: # pylint: disable=broad-except pass return hosted_registry_insecure @@ -1449,7 +1450,7 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw if key in inventory_json_facts: # Watchout for JSON facts that sometimes load as strings. # (can happen if the JSON contains a boolean) - if isinstance(new[key], basestring): + if isinstance(new[key], string_types): facts[key] = yaml.safe_load(new[key]) else: facts[key] = copy.deepcopy(new[key]) @@ -1511,7 +1512,7 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw for key in new_keys: # Watchout for JSON facts that sometimes load as strings. # (can happen if the JSON contains a boolean) - if key in inventory_json_facts and isinstance(new[key], basestring): + if key in inventory_json_facts and isinstance(new[key], string_types): facts[key] = yaml.safe_load(new[key]) else: facts[key] = copy.deepcopy(new[key]) @@ -1614,7 +1615,7 @@ def set_proxy_facts(facts): if 'common' in facts: common = facts['common'] if 'http_proxy' in common or 'https_proxy' in common: - if 'no_proxy' in common and isinstance(common['no_proxy'], basestring): + if 'no_proxy' in common and isinstance(common['no_proxy'], string_types): common['no_proxy'] = common['no_proxy'].split(",") elif 'no_proxy' not in common: common['no_proxy'] = [] @@ -1636,7 +1637,7 @@ def set_proxy_facts(facts): if 'https_proxy' not in builddefaults and 'https_proxy' in common: builddefaults['https_proxy'] = common['https_proxy'] # make no_proxy into a list if it's not - if 'no_proxy' in builddefaults and isinstance(builddefaults['no_proxy'], basestring): + if 'no_proxy' in builddefaults and isinstance(builddefaults['no_proxy'], string_types): builddefaults['no_proxy'] = builddefaults['no_proxy'].split(",") if 'no_proxy' not in builddefaults and 'no_proxy' in common: builddefaults['no_proxy'] = common['no_proxy'] @@ -2220,12 +2221,12 @@ class OpenShiftFacts(object): key = '{0}_registries'.format(cat) if key in new_local_facts['docker']: val = new_local_facts['docker'][key] - if isinstance(val, basestring): + if isinstance(val, string_types): val = [x.strip() for x in val.split(',')] new_local_facts['docker'][key] = list(set(val) - set([''])) # Convert legacy log_options comma sep string to a list if present: if 'log_options' in new_local_facts['docker'] and \ - isinstance(new_local_facts['docker']['log_options'], basestring): + isinstance(new_local_facts['docker']['log_options'], string_types): new_local_facts['docker']['log_options'] = new_local_facts['docker']['log_options'].split(',') new_local_facts = self.remove_empty_facts(new_local_facts) diff --git a/roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml index 13cef2d66..c47d5361d 100644 --- a/roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml @@ -72,7 +72,6 @@ items: metadata: name: logging-deployer-edit-role roleRef: - kind: ClusterRole name: edit subjects: - kind: ServiceAccount @@ -83,7 +82,6 @@ items: metadata: name: logging-deployer-dsadmin-role roleRef: - kind: ClusterRole name: daemonset-admin subjects: - kind: ServiceAccount diff --git a/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml index ddfda1272..c67058696 100644 --- a/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml @@ -81,7 +81,6 @@ items: metadata: name: logging-deployer-edit-role roleRef: - kind: ClusterRole name: edit subjects: - kind: ServiceAccount @@ -92,7 +91,6 @@ items: metadata: name: logging-deployer-dsadmin-role roleRef: - kind: ClusterRole name: daemonset-admin subjects: - kind: ServiceAccount @@ -103,7 +101,6 @@ items: metadata: name: logging-elasticsearch-view-role roleRef: - kind: ClusterRole name: view subjects: - kind: ServiceAccount diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml index d5ed9c09d..23dcd0440 100644 --- a/roles/openshift_repos/tasks/main.yaml +++ b/roles/openshift_repos/tasks/main.yaml @@ -37,7 +37,7 @@ when: ansible_os_family == "RedHat" and ansible_distribution != "Fedora" and openshift_deployment_type == 'origin' and not openshift.common.is_containerized | bool - and openshift_enable_origin_repo | default(true) + and openshift_enable_origin_repo | default(true) | bool - name: Configure origin yum repositories RHEL/CentOS copy: @@ -47,4 +47,4 @@ when: ansible_os_family == "RedHat" and ansible_distribution != "Fedora" and openshift_deployment_type == 'origin' and not openshift.common.is_containerized | bool - and openshift_enable_origin_repo | default(true) + and openshift_enable_origin_repo | default(true) | bool diff --git a/roles/openshift_repos/templates/yum_repo.j2 b/roles/openshift_repos/templates/yum_repo.j2 index 2d9243545..0ec0045eb 100644 --- a/roles/openshift_repos/templates/yum_repo.j2 +++ b/roles/openshift_repos/templates/yum_repo.j2 @@ -2,9 +2,9 @@ [{{ repo.id }}] name={{ repo.name | default(repo.id) }} baseurl={{ repo.baseurl }} -{% set enable_repo = repo.enabled | default('1') %} +{% set enable_repo = repo.enabled | default(1,True) %} enabled={{ 1 if ( enable_repo == 1 or enable_repo == True ) else 0 }} -{% set enable_gpg_check = repo.gpgcheck | default('1') %} +{% set enable_gpg_check = repo.gpgcheck | default(1,True) %} gpgcheck={{ 1 if ( enable_gpg_check == 1 or enable_gpg_check == True ) else 0 }} {% for key, value in repo.iteritems() %} {% if key not in ['id', 'name', 'baseurl', 'enabled', 'gpgcheck'] and value is defined %} diff --git a/roles/openshift_storage_nfs_lvm/README.md b/roles/openshift_storage_nfs_lvm/README.md index 8b8471745..cc674d3fd 100644 --- a/roles/openshift_storage_nfs_lvm/README.md +++ b/roles/openshift_storage_nfs_lvm/README.md @@ -48,6 +48,13 @@ osnl_volume_num_start: 3 # How many volumes/partitions to build, with the size we stated. osnl_number_of_volumes: 2 +# osnl_volume_reclaim_policy +# Volume reclaim policy of a PersistentVolume tells the cluster +# what to do with the volume after it is released. +# +# Valid values are "Retain" or "Recycle" (default). +osnl_volume_reclaim_policy: "Recycle" + ``` ## Dependencies @@ -71,6 +78,7 @@ exported via NFS. json files are created in /root. osnl_volume_size: 5 osnl_volume_num_start: 3 osnl_number_of_volumes: 2 + osnl_volume_reclaim_policy: "Recycle" ## Full example @@ -96,6 +104,7 @@ exported via NFS. json files are created in /root. osnl_volume_size: 5 osnl_volume_num_start: 3 osnl_number_of_volumes: 2 + osnl_volume_reclaim_policy: "Recycle" * Run the playbook: ``` diff --git a/roles/openshift_storage_nfs_lvm/defaults/main.yml b/roles/openshift_storage_nfs_lvm/defaults/main.yml index f81cdc724..48352187c 100644 --- a/roles/openshift_storage_nfs_lvm/defaults/main.yml +++ b/roles/openshift_storage_nfs_lvm/defaults/main.yml @@ -8,3 +8,10 @@ osnl_mount_dir: /exports/openshift # Volume Group to use. osnl_volume_group: openshiftvg + +# Volume reclaim policy of a PersistentVolume tells the cluster +# what to do with the volume after it is released. +# +# Valid values are "Retain" or "Recycle". +# See https://docs.openshift.com/enterprise/3.0/architecture/additional_concepts/storage.html#pv-recycling-policy +osnl_volume_reclaim_policy: "Recycle" diff --git a/roles/openshift_storage_nfs_lvm/templates/nfs.json.j2 b/roles/openshift_storage_nfs_lvm/templates/nfs.json.j2 index 3c4d2f56c..19e150f7d 100644 --- a/roles/openshift_storage_nfs_lvm/templates/nfs.json.j2 +++ b/roles/openshift_storage_nfs_lvm/templates/nfs.json.j2 @@ -12,7 +12,7 @@ "storage": "{{ osnl_volume_size }}Gi" }, "accessModes": [ "ReadWriteOnce", "ReadWriteMany" ], - "persistentVolumeReclaimPolicy": "Recycle", + "persistentVolumeReclaimPolicy": "{{ osnl_volume_reclaim_policy }}", "nfs": { "Server": "{{ inventory_hostname }}", "Path": "{{ osnl_mount_dir }}/{{ item }}" diff --git a/utils/.coveragerc b/utils/.coveragerc index 0c870af42..e1d918755 100644 --- a/utils/.coveragerc +++ b/utils/.coveragerc @@ -1,4 +1,5 @@ [run] omit= */lib/python*/site-packages/* + */lib/python*/* /usr/* diff --git a/utils/Makefile b/utils/Makefile index c061edd8c..0e1cd79dd 100644 --- a/utils/Makefile +++ b/utils/Makefile @@ -33,8 +33,8 @@ MANPAGES := docs/man/man1/atomic-openshift-installer.1 VERSION := 1.3 # YAMLFILES: Skipping all '/files/' folders due to conflicting yaml file definitions -YAMLFILES = $(shell find ../ -name $(VENV) -prune -o \( -name '*.yml' -o -name '*.yaml' \) ! -path "*/files/*" 2>&1) -PYFILES = $(shell find ../ -name $(VENV) -prune -o -name ooinstall.egg-info -prune -o -name test -prune -o -name "*.py" -print) +YAMLFILES = $(shell find ../ -name $(VENV) -prune -o -name .tox -prune -o \( -name '*.yml' -o -name '*.yaml' \) ! -path "*/files/*" -print 2>&1) +PYFILES = $(shell find ../ -name $(VENV) -prune -o -name ooinstall.egg-info -prune -o -name test -prune -o -name .tox -prune -o -name "*.py" -print) sdist: clean python setup.py sdist @@ -83,7 +83,8 @@ ci-unittests: $(VENV) @echo "#############################################" @echo "# Running Unit Tests in virtualenv" @echo "#############################################" - . $(VENV)/bin/activate && python setup.py nosetests + . $(VENV)/bin/activate && tox -e py27-unit + . $(VENV)/bin/activate && tox -e py35-unit @echo "VIEW CODE COVERAGE REPORT WITH 'xdg-open cover/index.html' or run 'make viewcover'" ci-pylint: $(VENV) @@ -108,12 +109,15 @@ ci-flake8: $(VENV) @echo "#############################################" @echo "# Running Flake8 Compliance Tests in virtualenv" @echo "#############################################" - . $(VENV)/bin/activate && flake8 --config=setup.cfg ../ --exclude="utils,../inventory" - . $(VENV)/bin/activate && python setup.py flake8 + . $(VENV)/bin/activate && tox -e py27-flake8 + . $(VENV)/bin/activate && tox -e py35-flake8 -ci: ci-list-deps ci-unittests ci-flake8 ci-pylint ci-yamllint +ci-tox: + . $(VENV)/bin/activate && tox + +ci: ci-list-deps ci-tox ci-pylint ci-yamllint @echo @echo "##################################################################################" @echo "VIEW CODE COVERAGE REPORT WITH 'xdg-open cover/index.html' or run 'make viewcover'" @echo "To clean your test environment run 'make clean'" - @echo "Other targets you may run with 'make': 'ci-pylint', 'ci-unittests', 'ci-flake8', 'ci-yamllint'" + @echo "Other targets you may run with 'make': 'ci-pylint', 'ci-tox', 'ci-unittests', 'ci-flake8', 'ci-yamllint'" diff --git a/utils/setup.cfg b/utils/setup.cfg index ee3288fc5..ea07eea9f 100644 --- a/utils/setup.cfg +++ b/utils/setup.cfg @@ -17,5 +17,5 @@ cover-branches=1 [flake8] max-line-length=120 -exclude=tests/*,setup.py -ignore=E501,E121,E124 +exclude=test/*,setup.py,oo-installenv +ignore=E501 diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index 7e5ad4144..b70bd1817 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -501,7 +501,7 @@ def get_variant_and_version(multi_master=False): i = 1 combos = get_variant_version_combos() - for (variant, version) in combos: + for (variant, _) in combos: message = "%s\n(%s) %s" % (message, i, variant.description) i = i + 1 message = "%s\n" % message diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py index 64eb340f3..cf14105af 100644 --- a/utils/src/ooinstall/oo_config.py +++ b/utils/src/ooinstall/oo_config.py @@ -1,5 +1,7 @@ # pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,too-many-instance-attributes,too-few-public-methods +from __future__ import (absolute_import, print_function) + import os import sys import logging @@ -50,7 +52,7 @@ Error loading config. {}. See https://docs.openshift.com/enterprise/latest/install_config/install/quick_install.html#defining-an-installation-configuration-file for information on creating a configuration file or delete {} and re-run the installer. """ - print message.format(error, path) + print(message.format(error, path)) class OOConfigFileError(Exception): @@ -103,7 +105,7 @@ class Host(object): # If the property is defined (not None or False), export it: if getattr(self, prop): d[prop] = getattr(self, prop) - for variable, value in self.other_variables.iteritems(): + for variable, value in self.other_variables.items(): d[variable] = value return d @@ -256,16 +258,16 @@ class OOConfig(object): # Parse the hosts into DTO objects: for host in host_list: host['other_variables'] = {} - for variable, value in host.iteritems(): + for variable, value in host.items(): if variable not in HOST_VARIABLES_BLACKLIST: host['other_variables'][variable] = value self.deployment.hosts.append(Host(**host)) # Parse the roles into Objects - for name, variables in role_list.iteritems(): + for name, variables in role_list.items(): self.deployment.roles.update({name: Role(name, variables)}) - except IOError, ferr: + except IOError as ferr: raise OOConfigFileError('Cannot open config file "{}": {}'.format(ferr.filename, ferr.strerror)) except yaml.scanner.ScannerError: @@ -354,14 +356,13 @@ class OOConfig(object): self.settings['ansible_inventory_path'] = \ '{}/hosts'.format(os.path.dirname(self.config_path)) - # pylint: disable=consider-iterating-dictionary - # Disabled because we shouldn't alter the container we're - # iterating over - # # clean up any empty sets - for setting in self.settings.keys(): + empty_keys = [] + for setting in self.settings: if not self.settings[setting]: - self.settings.pop(setting) + empty_keys.append(setting) + for key in empty_keys: + self.settings.pop(key) installer_log.debug("Updated OOConfig settings: %s", self.settings) @@ -410,7 +411,7 @@ class OOConfig(object): for host in self.deployment.hosts: p_settings['deployment']['hosts'].append(host.to_dict()) - for name, role in self.deployment.roles.iteritems(): + for name, role in self.deployment.roles.items(): p_settings['deployment']['roles'][name] = role.variables for setting in self.deployment.variables: @@ -424,7 +425,7 @@ class OOConfig(object): if self.settings['ansible_inventory_directory'] != self._default_ansible_inv_dir(): p_settings['ansible_inventory_directory'] = self.settings['ansible_inventory_directory'] except KeyError as e: - print "Error persisting settings: {}".format(e) + print("Error persisting settings: {}".format(e)) sys.exit(0) return p_settings diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index f542fb376..ce6e54664 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -1,5 +1,7 @@ # pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,global-statement,global-variable-not-assigned +from __future__ import (absolute_import, print_function) + import socket import subprocess import sys @@ -107,12 +109,12 @@ def write_inventory_vars(base_inventory, lb): global CFG base_inventory.write('\n[OSEv3:vars]\n') - for variable, value in CFG.settings.iteritems(): + for variable, value in CFG.settings.items(): inventory_var = VARIABLES_MAP.get(variable, None) if inventory_var and value: base_inventory.write('{}={}\n'.format(inventory_var, value)) - for variable, value in CFG.deployment.variables.iteritems(): + for variable, value in CFG.deployment.variables.items(): inventory_var = VARIABLES_MAP.get(variable, variable) if value: base_inventory.write('{}={}\n'.format(inventory_var, value)) @@ -152,11 +154,11 @@ def write_inventory_vars(base_inventory, lb): "'baseurl': '{}', " "'enabled': 1, 'gpgcheck': 0}}]\n".format(os.environ['OO_INSTALL_PUDDLE_REPO'])) - for name, role_obj in CFG.deployment.roles.iteritems(): + for name, role_obj in CFG.deployment.roles.items(): if role_obj.variables: group_name = ROLES_TO_GROUPS_MAP.get(name, name) base_inventory.write("\n[{}:vars]\n".format(group_name)) - for variable, value in role_obj.variables.iteritems(): + for variable, value in role_obj.variables.items(): inventory_var = VARIABLES_MAP.get(variable, variable) if value: base_inventory.write('{}={}\n'.format(inventory_var, value)) @@ -193,7 +195,7 @@ def write_host(host, role, inventory, schedulable=None): facts += ' {}={}'.format(HOST_VARIABLES_MAP.get(prop), getattr(host, prop)) if host.other_variables: - for variable, value in host.other_variables.iteritems(): + for variable, value in host.other_variables.items(): facts += " {}={}".format(variable, value) if host.node_labels and role == 'node': @@ -210,9 +212,9 @@ def write_host(host, role, inventory, schedulable=None): if installer_host in [host.connect_to, host.hostname, host.public_hostname]: facts += ' ansible_connection=local' if os.geteuid() != 0: - no_pwd_sudo = subprocess.call(['sudo', '-n', 'echo', 'openshift']) + no_pwd_sudo = subprocess.call(['sudo', '-n', 'echo', '-n']) if no_pwd_sudo == 1: - print 'The atomic-openshift-installer requires sudo access without a password.' + print('The atomic-openshift-installer requires sudo access without a password.') sys.exit(1) facts += ' ansible_become=yes' @@ -245,9 +247,9 @@ def load_system_facts(inventory_file, os_facts_path, env_vars, verbose=False): installer_log.debug("Going to try to read this file: %s", CFG.settings['ansible_callback_facts_yaml']) try: callback_facts = yaml.safe_load(callback_facts_file) - except yaml.YAMLError, exc: - print "Error in {}".format(CFG.settings['ansible_callback_facts_yaml']), exc - print "Try deleting and rerunning the atomic-openshift-installer" + except yaml.YAMLError as exc: + print("Error in {}".format(CFG.settings['ansible_callback_facts_yaml']), exc) + print("Try deleting and rerunning the atomic-openshift-installer") sys.exit(1) return callback_facts, 0 diff --git a/utils/src/ooinstall/variants.py b/utils/src/ooinstall/variants.py index 39772bb2e..a45be98bf 100644 --- a/utils/src/ooinstall/variants.py +++ b/utils/src/ooinstall/variants.py @@ -38,32 +38,24 @@ class Variant(object): # WARNING: Keep the versions ordered, most recent first: -OSE = Variant('openshift-enterprise', 'OpenShift Container Platform', - [ - Version('3.4', 'openshift-enterprise'), - ] -) - -REG = Variant('openshift-enterprise', 'Registry', - [ - Version('3.4', 'openshift-enterprise', 'registry'), - ] -) - -origin = Variant('origin', 'OpenShift Origin', - [ - Version('1.4', 'origin'), - ] -) - -LEGACY = Variant('openshift-enterprise', 'OpenShift Container Platform', - [ - Version('3.3', 'openshift-enterprise'), - Version('3.2', 'openshift-enterprise'), - Version('3.1', 'openshift-enterprise'), - Version('3.0', 'openshift-enterprise'), - ] -) +OSE = Variant('openshift-enterprise', 'OpenShift Container Platform', [ + Version('3.4', 'openshift-enterprise'), +]) + +REG = Variant('openshift-enterprise', 'Registry', [ + Version('3.4', 'openshift-enterprise', 'registry'), +]) + +origin = Variant('origin', 'OpenShift Origin', [ + Version('1.4', 'origin'), +]) + +LEGACY = Variant('openshift-enterprise', 'OpenShift Container Platform', [ + Version('3.3', 'openshift-enterprise'), + Version('3.2', 'openshift-enterprise'), + Version('3.1', 'openshift-enterprise'), + Version('3.0', 'openshift-enterprise'), +]) # Ordered list of variants we can install, first is the default. SUPPORTED_VARIANTS = (OSE, REG, origin, LEGACY) diff --git a/utils/test-requirements.txt b/utils/test-requirements.txt index b70a03563..e5c5360c3 100644 --- a/utils/test-requirements.txt +++ b/utils/test-requirements.txt @@ -1,5 +1,4 @@ ansible -enum configparser pylint nose @@ -11,3 +10,4 @@ click backports.functools_lru_cache pyOpenSSL yamllint +tox diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py index 36dc18034..0cb37eaff 100644 --- a/utils/test/cli_installer_tests.py +++ b/utils/test/cli_installer_tests.py @@ -4,7 +4,8 @@ import copy import os -import ConfigParser + +from six.moves import configparser import ooinstall.cli_installer as cli @@ -408,7 +409,7 @@ class UnattendedCliTests(OOCliFixture): result = self.runner.invoke(cli.cli, self.cli_args) if result.exception is None or result.exit_code != 1: - print "Exit code: %s" % result.exit_code + print("Exit code: %s" % result.exit_code) self.fail("Unexpected CLI return") # unattended with config file and all installed hosts (with --force) @@ -523,7 +524,7 @@ class UnattendedCliTests(OOCliFixture): self.assert_result(result, 0) # Check the inventory file looks as we would expect: - inventory = ConfigParser.ConfigParser(allow_no_value=True) + inventory = configparser.ConfigParser(allow_no_value=True) inventory.read(os.path.join(self.work_dir, 'hosts')) self.assertEquals('root', inventory.get('OSEv3:vars', 'ansible_ssh_user')) @@ -566,7 +567,7 @@ class UnattendedCliTests(OOCliFixture): self.assertEquals('3.3', written_config['variant_version']) # Make sure the correct value was passed to ansible: - inventory = ConfigParser.ConfigParser(allow_no_value=True) + inventory = configparser.ConfigParser(allow_no_value=True) inventory.read(os.path.join(self.work_dir, 'hosts')) self.assertEquals('openshift-enterprise', inventory.get('OSEv3:vars', 'deployment_type')) @@ -594,7 +595,7 @@ class UnattendedCliTests(OOCliFixture): # and written to disk: self.assertEquals('3.3', written_config['variant_version']) - inventory = ConfigParser.ConfigParser(allow_no_value=True) + inventory = configparser.ConfigParser(allow_no_value=True) inventory.read(os.path.join(self.work_dir, 'hosts')) self.assertEquals('openshift-enterprise', inventory.get('OSEv3:vars', 'deployment_type')) @@ -830,7 +831,7 @@ class AttendedCliTests(OOCliFixture): written_config = read_yaml(self.config_file) self._verify_config_hosts(written_config, 4) - inventory = ConfigParser.ConfigParser(allow_no_value=True) + inventory = configparser.ConfigParser(allow_no_value=True) inventory.read(os.path.join(self.work_dir, 'hosts')) self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1', 'openshift_schedulable=False') @@ -949,7 +950,7 @@ class AttendedCliTests(OOCliFixture): written_config = read_yaml(self.config_file) self._verify_config_hosts(written_config, 6) - inventory = ConfigParser.ConfigParser(allow_no_value=True) + inventory = configparser.ConfigParser(allow_no_value=True) inventory.read(os.path.join(self.work_dir, 'hosts')) self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1', 'openshift_schedulable=False') @@ -990,7 +991,7 @@ class AttendedCliTests(OOCliFixture): written_config = read_yaml(self.config_file) self._verify_config_hosts(written_config, 5) - inventory = ConfigParser.ConfigParser(allow_no_value=True) + inventory = configparser.ConfigParser(allow_no_value=True) inventory.read(os.path.join(self.work_dir, 'hosts')) self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1', 'openshift_schedulable=True') @@ -1082,7 +1083,7 @@ class AttendedCliTests(OOCliFixture): written_config = read_yaml(self.config_file) self._verify_config_hosts(written_config, 1) - inventory = ConfigParser.ConfigParser(allow_no_value=True) + inventory = configparser.ConfigParser(allow_no_value=True) inventory.read(os.path.join(self.work_dir, 'hosts')) self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1', 'openshift_schedulable=True') @@ -1116,7 +1117,7 @@ class AttendedCliTests(OOCliFixture): written_config = read_yaml(self.config_file) self._verify_config_hosts(written_config, 4) - inventory = ConfigParser.ConfigParser(allow_no_value=True) + inventory = configparser.ConfigParser(allow_no_value=True) inventory.read(os.path.join(self.work_dir, 'hosts')) self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1', 'openshift_schedulable=False') diff --git a/utils/test/fixture.py b/utils/test/fixture.py index 62135c761..5200d275d 100644 --- a/utils/test/fixture.py +++ b/utils/test/fixture.py @@ -65,13 +65,13 @@ class OOCliFixture(OOInstallFixture): def assert_result(self, result, exit_code): if result.exit_code != exit_code: - print "Unexpected result from CLI execution" - print "Exit code: %s" % result.exit_code - print "Exception: %s" % result.exception - print result.exc_info + print("Unexpected result from CLI execution") + print("Exit code: %s" % result.exit_code) + print("Exception: %s" % result.exception) + print(result.exc_info) import traceback traceback.print_exception(*result.exc_info) - print "Output:\n%s" % result.output + print("Output:\n%s" % result.output) self.fail("Exception during CLI execution") def _verify_load_facts(self, load_facts_mock): diff --git a/utils/test/oo_config_tests.py b/utils/test/oo_config_tests.py index 56fd82408..2b4fce512 100644 --- a/utils/test/oo_config_tests.py +++ b/utils/test/oo_config_tests.py @@ -2,13 +2,14 @@ # repo. We will work on these over time. # pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name -import cStringIO import os import unittest import tempfile import shutil import yaml +from six.moves import cStringIO + from ooinstall.oo_config import OOConfig, Host, OOConfigInvalidHostError import ooinstall.openshift_ansible @@ -244,7 +245,7 @@ class HostTests(OOInstallFixture): } new_node = Host(**yaml_props) - inventory = cStringIO.StringIO() + inventory = cStringIO() # This is what the 'write_host' function generates. write_host # has no return value, it just writes directly to the file # 'inventory' which in this test-case is a StringIO object @@ -285,7 +286,7 @@ class HostTests(OOInstallFixture): # } # new_node = Host(**yaml_props) - # inventory = cStringIO.StringIO() + # inventory = cStringIO() # # This is what the original 'write_host' function will # # generate. write_host has no return value, it just writes diff --git a/utils/test/test_utils.py b/utils/test/test_utils.py index 2e59d86f2..b18f85692 100644 --- a/utils/test/test_utils.py +++ b/utils/test/test_utils.py @@ -2,6 +2,7 @@ Unittests for ooinstall utils. """ +import six import unittest import logging import sys @@ -28,9 +29,6 @@ class TestUtils(unittest.TestCase): mock.call('OO_FOO: bar'), ] - # python 2.x has assertItemsEqual, python 3.x has assertCountEqual - if sys.version_info.major > 3: - self.assertItemsEqual = self.assertCountEqual ###################################################################### # Validate ooinstall.utils.debug_env functionality @@ -40,7 +38,7 @@ class TestUtils(unittest.TestCase): with mock.patch('ooinstall.utils.installer_log') as _il: debug_env(self.debug_all_params) - print _il.debug.call_args_list + print(_il.debug.call_args_list) # Debug was called for each item we expect self.assertEqual( @@ -48,7 +46,8 @@ class TestUtils(unittest.TestCase): _il.debug.call_count) # Each item we expect was logged - self.assertItemsEqual( + six.assertCountEqual( + self, self.expected, _il.debug.call_args_list) @@ -67,7 +66,8 @@ class TestUtils(unittest.TestCase): _il.debug.call_count, len(debug_some_params)) - self.assertItemsEqual( + six.assertCountEqual( + self, self.expected, _il.debug.call_args_list) diff --git a/utils/tox.ini b/utils/tox.ini new file mode 100644 index 000000000..747d79dfe --- /dev/null +++ b/utils/tox.ini @@ -0,0 +1,17 @@ +[tox] +minversion=2.3.1 +envlist = + py{27,35}-{flake8,unit} +skipsdist=True +skip_missing_interpreters=True + +[testenv] +usedevelop=True +deps = + -rtest-requirements.txt + py35-flake8: flake8-bugbear + +commands = + flake8: flake8 --config=setup.cfg ../ --exclude="../utils,.tox,../inventory" + flake8: python setup.py flake8 + unit: python setup.py nosetests |