diff options
57 files changed, 722 insertions, 265 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index 23c40682f..a22da0715 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.0.75-1 ./ +3.0.81-1 ./ diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index cd67b69a5..f6cc2edde 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -57,6 +57,19 @@ class FilterModule(object): return [item for sublist in data for item in sublist] @staticmethod + def oo_merge_dicts(first_dict, second_dict): + """ Merge two dictionaries where second_dict values take precedence. + Ex: first_dict={'a': 1, 'b': 2} + second_dict={'b': 3, 'c': 4} + returns {'a': 1, 'b': 3, 'c': 4} + """ + if not isinstance(first_dict, dict) or not isinstance(second_dict, dict): + raise errors.AnsibleFilterError("|failed expects to merge two dicts") + merged = first_dict.copy() + merged.update(second_dict) + return merged + + @staticmethod def oo_collect(data, attribute=None, filters=None): """ This takes a list of dict and collects all attributes specified into a list. If filter is specified then we will include all items that @@ -286,6 +299,69 @@ class FilterModule(object): return [x for x in data if x.has_key(filter_attr) and x[filter_attr]] @staticmethod + def oo_oc_nodes_matching_selector(nodes, selector): + """ Filters a list of nodes by selector. + + Examples: + nodes = [{"kind": "Node", "metadata": {"name": "node1.example.com", + "labels": {"kubernetes.io/hostname": "node1.example.com", + "color": "green"}}}, + {"kind": "Node", "metadata": {"name": "node2.example.com", + "labels": {"kubernetes.io/hostname": "node2.example.com", + "color": "red"}}}] + selector = 'color=green' + returns = ['node1.example.com'] + Args: + nodes (list[dict]): list of node definitions + selector (str): "label=value" node selector to filter `nodes` by + Returns: + list[str]: nodes filtered by selector + """ + if not isinstance(nodes, list): + raise errors.AnsibleFilterError("failed expects nodes to be a list, got {0}".format(type(nodes))) + if not isinstance(selector, basestring): + raise errors.AnsibleFilterError("failed expects selector to be a string") + if not re.match('.*=.*', selector): + raise errors.AnsibleFilterError("failed selector does not match \"label=value\" format") + label = selector.split('=')[0] + value = selector.split('=')[1] + return FilterModule.oo_oc_nodes_with_label(nodes, label, value) + + @staticmethod + def oo_oc_nodes_with_label(nodes, label, value): + """ Filters a list of nodes by label, value. + + Examples: + nodes = [{"kind": "Node", "metadata": {"name": "node1.example.com", + "labels": {"kubernetes.io/hostname": "node1.example.com", + "color": "green"}}}, + {"kind": "Node", "metadata": {"name": "node2.example.com", + "labels": {"kubernetes.io/hostname": "node2.example.com", + "color": "red"}}}] + label = 'color' + value = 'green' + returns = ['node1.example.com'] + Args: + nodes (list[dict]): list of node definitions + label (str): label to filter `nodes` by + value (str): value of `label` to filter `nodes` by + Returns: + list[str]: nodes filtered by selector + """ + if not isinstance(nodes, list): + raise errors.AnsibleFilterError("failed expects nodes to be a list") + if not isinstance(label, basestring): + raise errors.AnsibleFilterError("failed expects label to be a string") + if not isinstance(value, basestring): + raise errors.AnsibleFilterError("failed expects value to be a string") + matching_nodes = [] + for node in nodes: + if label in node['metadata']['labels']: + if node['metadata']['labels'][label] == value: + matching_nodes.append(node['metadata']['name']) + return matching_nodes + + @staticmethod def oo_nodes_with_label(nodes, label, value=None): """ Filters a list of nodes by label and value (if provided) @@ -588,36 +664,38 @@ class FilterModule(object): if persistent_volumes == None: persistent_volumes = [] - for component in hostvars['openshift']['hosted']: - kind = hostvars['openshift']['hosted'][component]['storage']['kind'] - create_pv = hostvars['openshift']['hosted'][component]['storage']['create_pv'] - if kind != None and create_pv: - if kind == 'nfs': - host = hostvars['openshift']['hosted'][component]['storage']['host'] - if host == None: - if len(groups['oo_nfs_to_config']) > 0: - host = groups['oo_nfs_to_config'][0] + if 'hosted' in hostvars['openshift']: + for component in hostvars['openshift']['hosted']: + if 'storage' in hostvars['openshift']['hosted'][component]: + kind = hostvars['openshift']['hosted'][component]['storage']['kind'] + create_pv = hostvars['openshift']['hosted'][component]['storage']['create_pv'] + if kind != None and create_pv: + if kind == 'nfs': + host = hostvars['openshift']['hosted'][component]['storage']['host'] + if host == None: + if len(groups['oo_nfs_to_config']) > 0: + host = groups['oo_nfs_to_config'][0] + else: + raise errors.AnsibleFilterError("|failed no storage host detected") + directory = hostvars['openshift']['hosted'][component]['storage']['nfs']['directory'] + volume = hostvars['openshift']['hosted'][component]['storage']['volume']['name'] + path = directory + '/' + volume + size = hostvars['openshift']['hosted'][component]['storage']['volume']['size'] + access_modes = hostvars['openshift']['hosted'][component]['storage']['access_modes'] + persistent_volume = dict( + name="{0}-volume".format(volume), + capacity=size, + access_modes=access_modes, + storage=dict( + nfs=dict( + server=host, + path=path))) + persistent_volumes.append(persistent_volume) else: - raise errors.AnsibleFilterError("|failed no storage host detected") - directory = hostvars['openshift']['hosted'][component]['storage']['nfs']['directory'] - volume = hostvars['openshift']['hosted'][component]['storage']['volume']['name'] - path = directory + '/' + volume - size = hostvars['openshift']['hosted'][component]['storage']['volume']['size'] - access_modes = hostvars['openshift']['hosted'][component]['storage']['access_modes'] - persistent_volume = dict( - name="{0}-volume".format(volume), - capacity=size, - access_modes=access_modes, - storage=dict( - nfs=dict( - server=host, - path=path))) - persistent_volumes.append(persistent_volume) - else: - msg = "|failed invalid storage kind '{0}' for component '{1}'".format( - kind, - component) - raise errors.AnsibleFilterError(msg) + msg = "|failed invalid storage kind '{0}' for component '{1}'".format( + kind, + component) + raise errors.AnsibleFilterError(msg) return persistent_volumes @staticmethod @@ -632,18 +710,20 @@ class FilterModule(object): if persistent_volume_claims == None: persistent_volume_claims = [] - for component in hostvars['openshift']['hosted']: - kind = hostvars['openshift']['hosted'][component]['storage']['kind'] - create_pv = hostvars['openshift']['hosted'][component]['storage']['create_pv'] - if kind != None and create_pv: - volume = hostvars['openshift']['hosted'][component]['storage']['volume']['name'] - size = hostvars['openshift']['hosted'][component]['storage']['volume']['size'] - access_modes = hostvars['openshift']['hosted'][component]['storage']['access_modes'] - persistent_volume_claim = dict( - name="{0}-claim".format(volume), - capacity=size, - access_modes=access_modes) - persistent_volume_claims.append(persistent_volume_claim) + if 'hosted' in hostvars['openshift']: + for component in hostvars['openshift']['hosted']: + if 'storage' in hostvars['openshift']['hosted'][component]: + kind = hostvars['openshift']['hosted'][component]['storage']['kind'] + create_pv = hostvars['openshift']['hosted'][component]['storage']['create_pv'] + if kind != None and create_pv: + volume = hostvars['openshift']['hosted'][component]['storage']['volume']['name'] + size = hostvars['openshift']['hosted'][component]['storage']['volume']['size'] + access_modes = hostvars['openshift']['hosted'][component]['storage']['access_modes'] + persistent_volume_claim = dict( + name="{0}-claim".format(volume), + capacity=size, + access_modes=access_modes) + persistent_volume_claims.append(persistent_volume_claim) return persistent_volume_claims @staticmethod @@ -711,7 +791,7 @@ class FilterModule(object): return retval @staticmethod - def oo_image_tag_to_rpm_version(version): + def oo_image_tag_to_rpm_version(version, include_dash=False): """ Convert an image tag string to an RPM version if necessary Empty strings and strings that are already in rpm version format are ignored. @@ -722,7 +802,10 @@ class FilterModule(object): raise errors.AnsibleFilterError("|failed expects a string or unicode") if version.startswith("v"): - version = "-" + version.replace("v", "") + version = version.replace("v", "") + + if include_dash: + version = "-" + version return version @@ -755,4 +838,7 @@ class FilterModule(object): "oo_pods_match_component": self.oo_pods_match_component, "oo_get_hosts_from_hostvars": self.oo_get_hosts_from_hostvars, "oo_image_tag_to_rpm_version": self.oo_image_tag_to_rpm_version, + "oo_merge_dicts": self.oo_merge_dicts, + "oo_oc_nodes_matching_selector": self.oo_oc_nodes_matching_selector, + "oo_oc_nodes_with_label": self.oo_oc_nodes_with_label } diff --git a/filter_plugins/openshift_master.py b/filter_plugins/openshift_master.py index d0fb98ec3..34d9aef75 100644 --- a/filter_plugins/openshift_master.py +++ b/filter_plugins/openshift_master.py @@ -57,7 +57,7 @@ class IdentityProviderBase(object): mapping_method = None for key in mm_keys: if key in self._idp: - mapping_method = self._idp[key] + mapping_method = self._idp.pop(key) if mapping_method is None: mapping_method = self.get_default('mappingMethod') self.mapping_method = mapping_method diff --git a/inventory/byo/hosts.aep.example b/inventory/byo/hosts.aep.example index 43b646c93..62891e6c3 100644 --- a/inventory/byo/hosts.aep.example +++ b/inventory/byo/hosts.aep.example @@ -58,6 +58,10 @@ deployment_type=atomic-enterprise # See: https://docs.openshift.com/enterprise/latest/install_config/aggregate_logging.html #openshift_master_logging_public_url=https://kibana.example.com +# Configure imagePolicyConfig in the master config +# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig +#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true} + # Docker Configuration # Add additional, insecure, and blocked registries to global docker configuration # For enterprise deployment types we ensure that registry.access.redhat.com is @@ -173,9 +177,47 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # glusterfs plugin dependencies will be installed, if available. #osn_storage_plugin_deps=['ceph','glusterfs','iscsi'] -# default selectors for router and registry services -# openshift_router_selector='region=infra' -# openshift_registry_selector='region=infra' +# OpenShift Router Options +# +# An OpenShift router will be created during install if there are +# nodes present with labels matching the default router selector, +# "region=infra". Set openshift_node_labels per node as needed in +# order to label nodes. +# +# Example: +# [nodes] +# node.example.com openshift_node_labels="{'region': 'infra'}" +# +# Router selector (optional) +# Router will only be created if nodes matching this label are present. +# Default value: 'region=infra' +#openshift_hosted_router_selector='region=infra' +# +# Router replicas (optional) +# Unless specified, openshift-ansible will calculate the replica count +# based on the number of nodes matching the openshift router selector. +#openshift_hosted_router_replicas=2 +# +# Router certificate (optional) +# Provide local certificate paths which will be configured as the +# router's default certificate. +#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key"} + +# Openshift Registry Options +# +# An OpenShift registry will be created during install if there are +# nodes present with labels matching the default registry selector, +# "region=infra". Set openshift_node_labels per node as needed in +# order to label nodes. +# +# Example: +# [nodes] +# node.example.com openshift_node_labels="{'region': 'infra'}" +# +# Registry selector (optional) +# Registry will only be created if nodes matching this label are present. +# Default value: 'region=infra' +#openshift_registry_selector='region=infra' # Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') # os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example index 8b8dbade0..1f13aade6 100644 --- a/inventory/byo/hosts.origin.example +++ b/inventory/byo/hosts.origin.example @@ -59,6 +59,10 @@ deployment_type=origin # See: https://docs.openshift.org/latest/install_config/aggregate_logging.html #openshift_master_logging_public_url=https://kibana.example.com +# Configure imagePolicyConfig in the master config +# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig +#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true} + # Docker Configuration # Add additional, insecure, and blocked registries to global docker configuration # For enterprise deployment types we ensure that registry.access.redhat.com is @@ -178,9 +182,47 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # glusterfs plugin dependencies will be installed, if available. #osn_storage_plugin_deps=['ceph','glusterfs','iscsi'] -# default selectors for router and registry services -# openshift_router_selector='region=infra' -# openshift_registry_selector='region=infra' +# OpenShift Router Options +# +# An OpenShift router will be created during install if there are +# nodes present with labels matching the default router selector, +# "region=infra". Set openshift_node_labels per node as needed in +# order to label nodes. +# +# Example: +# [nodes] +# node.example.com openshift_node_labels="{'region': 'infra'}" +# +# Router selector (optional) +# Router will only be created if nodes matching this label are present. +# Default value: 'region=infra' +#openshift_hosted_router_selector='region=infra' +# +# Router replicas (optional) +# Unless specified, openshift-ansible will calculate the replica count +# based on the number of nodes matching the openshift router selector. +#openshift_hosted_router_replicas=2 +# +# Router certificate (optional) +# Provide local certificate paths which will be configured as the +# router's default certificate. +#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key"} + +# Openshift Registry Options +# +# An OpenShift registry will be created during install if there are +# nodes present with labels matching the default registry selector, +# "region=infra". Set openshift_node_labels per node as needed in +# order to label nodes. +# +# Example: +# [nodes] +# node.example.com openshift_node_labels="{'region': 'infra'}" +# +# Registry selector (optional) +# Registry will only be created if nodes matching this label are present. +# Default value: 'region=infra' +#openshift_registry_selector='region=infra' # Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') # os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example index 4c6aae0bd..2386eb236 100644 --- a/inventory/byo/hosts.ose.example +++ b/inventory/byo/hosts.ose.example @@ -57,6 +57,10 @@ deployment_type=openshift-enterprise # Configure loggingPublicURL in the master config for aggregate logging # See: https://docs.openshift.com/enterprise/latest/install_config/aggregate_logging.html #openshift_master_logging_public_url=https://kibana.example.com +# +# Configure imagePolicyConfig in the master config +# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig +#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true} # Docker Configuration # Add additional, insecure, and blocked registries to global docker configuration @@ -174,9 +178,47 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # glusterfs plugin dependencies will be installed, if available. #osn_storage_plugin_deps=['ceph','glusterfs'] -# default selectors for router and registry services -# openshift_router_selector='region=infra' -# openshift_registry_selector='region=infra' +# OpenShift Router Options +# +# An OpenShift router will be created during install if there are +# nodes present with labels matching the default router selector, +# "region=infra". Set openshift_node_labels per node as needed in +# order to label nodes. +# +# Example: +# [nodes] +# node.example.com openshift_node_labels="{'region': 'infra'}" +# +# Router selector (optional) +# Router will only be created if nodes matching this label are present. +# Default value: 'region=infra' +#openshift_hosted_router_selector='region=infra' +# +# Router replicas (optional) +# Unless specified, openshift-ansible will calculate the replica count +# based on the number of nodes matching the openshift router selector. +#openshift_hosted_router_replicas=2 +# +# Router certificate (optional) +# Provide local certificate paths which will be configured as the +# router's default certificate. +#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key"} + +# Openshift Registry Options +# +# An OpenShift registry will be created during install if there are +# nodes present with labels matching the default registry selector, +# "region=infra". Set openshift_node_labels per node as needed in +# order to label nodes. +# +# Example: +# [nodes] +# node.example.com openshift_node_labels="{'region': 'infra'}" +# +# Registry selector (optional) +# Registry will only be created if nodes matching this label are present. +# Default value: 'region=infra' +#openshift_registry_selector='region=infra' # Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') # os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 6958c3d4f..98d85ddd9 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -5,7 +5,7 @@ } Name: openshift-ansible -Version: 3.0.75 +Version: 3.0.81 Release: 1%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 @@ -261,6 +261,54 @@ Atomic OpenShift Utilities includes %changelog +* Mon Apr 18 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.81-1 +- IMAGE_PREFIX=openshift3/ for enterprise logging/metrics (sdodson@redhat.com) +- a-o-i: Don't assume storage on 1st master (smunilla@redhat.com) +- Bug 1320829 - Handle OSE 3.0 installs (bleanhar@redhat.com) + +* Fri Apr 15 2016 Troy Dawson <tdawson@redhat.com> 3.0.80-1 +- Refactor docker failed state cleanup (sdodson@redhat.com) +- Support mixed RPM/container installs (bleanhar@redhat.com) +- The openshift_docker role must set the version facts for containerized + installs (bleanhar@redhat.com) +- start it, check for failure, reset it, start again (sdodson@redhat.com) +- Enable docker before potentially resetting the failure (sdodson@redhat.com) +- Fix mappingMethod option in identity provider. (abutcher@redhat.com) +- Support setting imagePolicyConfig JSON in inventory. (dgoodwin@redhat.com) + +* Tue Apr 12 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.79-1 +- Bug 1324728 - Ansible should not downgrade docker when installing 3.2 + containerized env (bleanhar@redhat.com) +- Fixing non-HA master restart conditional (bleanhar@redhat.com) +- Fetching the current version a little more carefully (bleanhar@redhat.com) +- Make sure Docker is restarted after we have correctly configured the + containerized systemd units (bleanhar@redhat.com) +- use RestartSec to avoid default rate limit in systemd (bleanhar@redhat.com) +- Convert image_tag on masters (smunilla@redhat.com) +- Installs and upgrades from authenticated registries are not supported for now + (bleanhar@redhat.com) +- Handle cases where the pacemaker variables aren't set (bleanhar@redhat.com) +- Containerized installs on RHEL were downgrading docker unnecessarily + (bleanhar@redhat.com) + +* Tue Apr 12 2016 Troy Dawson <tdawson@redhat.com> 3.0.78-1 +- Add support for creating secure router. (abutcher@redhat.com) + +* Mon Apr 11 2016 Troy Dawson <tdawson@redhat.com> 3.0.77-1 +- Fix a docker-storage sysconfig bug. (dgoodwin@redhat.com) +- update bootstrap-fedora to include python2-firewall for F24+ + (maxamillion@fedoraproject.org) +- Merge openshift_env hostvars. (abutcher@redhat.com) +- Add openshift_hosted_facts role and remove hosted facts from + openshift_common. (abutcher@redhat.com) + +* Fri Apr 08 2016 Troy Dawson <tdawson@redhat.com> 3.0.76-1 +- a-o-i: Support openshift_image_tag (smunilla@redhat.com) +- Bug 1324729 - Import xPaas image streams failed during 3.2 installation + (bleanhar@redhat.com) +- Test docker_version_result.stdout when determining if docker should be + installed/downgraded. (abutcher@redhat.com) + * Thu Apr 07 2016 Troy Dawson <tdawson@redhat.com> 3.0.75-1 - First attempt at oadm router module (kwoodson@redhat.com) - Remove openshift_common dep from openshift_storage_nfs (abutcher@redhat.com) diff --git a/playbooks/adhoc/bootstrap-fedora.yml b/playbooks/adhoc/bootstrap-fedora.yml index 471c41f16..b380a74d6 100644 --- a/playbooks/adhoc/bootstrap-fedora.yml +++ b/playbooks/adhoc/bootstrap-fedora.yml @@ -1,4 +1,4 @@ - hosts: OSEv3 tasks: - name: install python and deps for ansible modules - raw: dnf install -y python2 python2-dnf libselinux-python libsemanage-python + raw: dnf install -y python2 python2-dnf libselinux-python libsemanage-python python2-firewall diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-cluster/additional_config.yml index 1ac78468a..44bf962c9 100644 --- a/playbooks/common/openshift-cluster/additional_config.yml +++ b/playbooks/common/openshift-cluster/additional_config.yml @@ -49,8 +49,6 @@ openshift_serviceaccounts_namespace: default openshift_serviceaccounts_sccs: - privileged - - role: openshift_router - when: deploy_infra | bool - role: openshift_registry registry_volume_claim: "{{ openshift.hosted.registry.storage.volume.name }}-claim" when: deploy_infra | bool and attach_registry_volume | bool diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 2411e7360..6f908fa7f 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -34,3 +34,5 @@ - include: additional_config.yml - include: ../openshift-node/config.yml + +- include: openshift_hosted.yml diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml new file mode 100644 index 000000000..1cbc0f544 --- /dev/null +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -0,0 +1,5 @@ +- name: Create Hosted Resources + hosts: oo_first_master + roles: + - role: openshift_hosted + openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" diff --git a/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh b/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh index 7a1edf38f..96944a78b 100644 --- a/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh +++ b/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh @@ -3,19 +3,19 @@ # Here we don't really care if this is a master, api, controller or node image. # We just need to know the version of one of them. unit_file=$(ls /etc/systemd/system/${1}*.service | head -n1) -installed_container_name=$(basename -s .service ${unit_file}) -installed=$(docker exec ${installed_container_name} openshift version 2> /dev/null | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v') if [ ${1} == "origin" ]; then image_name="openshift/origin" elif grep aep $unit_file 2>&1 > /dev/null; then - image_name="aep3/aep" + image_name="aep3/node" elif grep openshift3 $unit_file 2>&1 > /dev/null; then - image_name="openshift3/ose" + image_name="openshift3/node" fi +installed=$(docker run --rm --entrypoint=/bin/openshift ${image_name} version 2> /dev/null | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v') + docker pull ${image_name} 2>&1 > /dev/null -available=$(docker run --rm ${image_name} version 2> /dev/null | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v') +available=$(docker run --rm --entrypoint=/bin/openshift ${image_name} version 2> /dev/null | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v') echo "---" echo "curr_version: ${installed}" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml index 6e6f31195..21480ba55 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml @@ -38,7 +38,7 @@ msg: > This upgrade does not support Pacemaker: https://docs.openshift.{{ g_pacemaker_upgrade_url_segment }}/install_config/upgrading/pacemaker_to_native_ha.html - when: openshift.master.cluster_method == 'pacemaker' + when: openshift.master.cluster_method is defined and openshift.master.cluster_method == 'pacemaker' - fail: msg: > @@ -57,6 +57,11 @@ roles: - openshift_facts tasks: + - openshift_facts: + role: master + local_facts: + ha: "{{ groups.oo_masters_to_config | length > 1 }}" + - name: Ensure Master is running service: name: "{{ openshift.common.service_type }}-master" @@ -77,11 +82,6 @@ state: started enabled: yes when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool - post_tasks: - - openshift_facts: - role: master - local_facts: - ha: "{{ groups.oo_masters_to_config | length > 1 }}" - name: Verify upgrade can proceed hosts: oo_nodes_to_config @@ -105,8 +105,12 @@ - include: ../../../../../roles/openshift_master/handlers/main.yml - include: ../../../../../roles/openshift_node/handlers/main.yml roles: + # We want the cli role to evaluate so that the containerized oc/oadm wrappers + # are modified to use the correct image tag. However, this can trigger a + # docker restart if new configuration is laid down which would immediately + # pull the latest image and defeat the purpose of these tasks. - openshift_cli - tasks: + pre_tasks: - name: Clean package cache command: "{{ ansible_pkg_mgr }} clean all" when: not openshift.common.is_atomic | bool @@ -147,6 +151,10 @@ - fail: msg: Verifying the correct version was found + when: g_aos_versions.curr_version == "" + + - fail: + msg: Verifying the correct version was found when: verify_upgrade_version is defined and g_new_version != verify_upgrade_version - include_vars: ../../../../../roles/openshift_master/vars/main.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml index 041ad5a9a..8852bb8de 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml @@ -1,33 +1,4 @@ --- -# This is a workaround for authenticated registries -- name: Download new images - hosts: oo_nodes_to_config - roles: - - openshift_facts - tasks: - - name: Pull Images - command: > - docker pull {{ item }}:v{{ g_new_version }} - with_items: - - "{{ openshift.node.node_image }}" - - "{{ openshift.node.ovs_image }}" - - "{{ openshift.common.pod_image }}" - - "{{ openshift.common.router_image }}" - - "{{ openshift.common.registry_image }}" - - "{{ openshift.common.deployer_image }}" - -# This is a workaround for authenticated registries -- name: Download new images - hosts: oo_masters_to_config - roles: - - openshift_facts - tasks: - - name: Pull Images - command: > - docker pull {{ item }}:v{{ g_new_version }} - with_items: - - "{{ openshift.master.master_image }}" - ############################################################################### # The restart playbook should be run after this playbook completes. ############################################################################### @@ -39,6 +10,29 @@ tasks: - include: docker_upgrade.yml when: not openshift.common.is_atomic | bool + - name: Set post docker install facts + openshift_facts: + role: "{{ item.role }}" + local_facts: "{{ item.local_facts }}" + with_items: + - role: docker + local_facts: + openshift_image_tag: "v{{ g_new_version }}" + openshift_version: "{{ g_new_version }}" + +# The cli image is used by openshift_docker_facts to determine the currently installed +# version. We need to explicitly pull the latest image to handle cases where +# the locally cached 'latest' tag is older the g_new_version. +- name: Download cli image + hosts: oo_masters_to_config:oo_nodes_to_config + roles: + - { role: openshift_docker_facts } + tasks: + - name: Pull Images + command: > + docker pull {{ item }}:latest + with_items: + - "{{ openshift.common.cli_image }}" ############################################################################### # Upgrade Masters @@ -139,7 +133,7 @@ - name: Reconcile Cluster Roles and Cluster Role Bindings and Security Context Constraints hosts: oo_masters_to_config roles: - - { role: openshift_cli, openshift_image_tag: "v{{ g_new_version }}" } + - { role: openshift_cli, openshift_image_tag: "v{{ g_new_version }}" } vars: origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}" ent_reconcile_bindings: true diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 2d207a1a9..878d5fea8 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -7,25 +7,35 @@ register: docker_version_result changed_when: false +- stat: path=/etc/sysconfig/docker-storage + register: docker_storage_check + +- name: Remove deferred deletion for downgrades from 1.9 + command: > + sed -i 's/--storage-opt dm.use_deferred_deletion=true//' /etc/sysconfig/docker-storage + when: docker_storage_check.stat.exists | bool and not docker_version_result | skipped and docker_version_result.stdout | default('0.0', True) | version_compare('1.9', '>=') and docker_version | version_compare('1.9', '<') + - name: Downgrade docker if necessary command: "{{ ansible_pkg_mgr }} downgrade -y docker-{{ docker_version }}" register: docker_downgrade_result when: not docker_version_result | skipped and docker_version_result.stdout | default('0.0', True) | version_compare(docker_version, 'gt') - name: Install docker - action: "{{ ansible_pkg_mgr }} name=docker{{ '-' + docker_version if docker_version != '' else '' }} state=present" - when: not openshift.common.is_atomic | bool and not docker_downgrade_result | changed + action: "{{ ansible_pkg_mgr }} name=docker{{ '-' + docker_version if docker_version is defined and docker_version != '' else '' }} state=present" + when: not openshift.common.is_atomic | bool and not docker_version_result | skipped and docker_version_result.stdout | default('0.0', True) | version_compare(docker_version, 'lt') -- stat: path=/etc/sysconfig/docker - register: docker_check - when: docker_downgrade_result | changed +# If docker were enabled and started before we downgraded it may have entered a +# failed state. Check for that and clear it if necessary. +- name: Check that docker hasn't entered failed state + command: systemctl show docker + register: docker_state + changed_when: False -- name: Remove deferred deletion for downgrades from 1.9 - command: > - sed -i 's/--storage-opt dm.use_deferred_deletion=true//' /etc/sysconfig/docker-storage - when: docker_downgrade_result | changed and docker_check.stat.exists | bool and docker_version_result.stdout | default('0.0', True) | version_compare('1.9', '>=') and docker_version | version_compare('1.9', '<') +- name: Reset docker service state + command: systemctl reset-failed docker.service + when: " 'ActiveState=failed' in docker_state.stdout " -- name: enable and start the docker service +- name: Start the docker service service: name: docker enabled: yes diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml index a71b36237..36906b347 100644 --- a/roles/etcd/meta/main.yml +++ b/roles/etcd/meta/main.yml @@ -16,5 +16,6 @@ galaxy_info: - cloud - system dependencies: +- { role: openshift_docker } - { role: os_firewall } - { role: etcd_common } diff --git a/roles/etcd/templates/etcd.docker.service b/roles/etcd/templates/etcd.docker.service index b8dbefa64..cf957ede8 100644 --- a/roles/etcd/templates/etcd.docker.service +++ b/roles/etcd/templates/etcd.docker.service @@ -11,6 +11,7 @@ ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v /var/lib/etcd:/v ExecStop=/usr/bin/docker stop {{ etcd_service }} SyslogIdentifier=etcd_container Restart=always +RestartSec=5s [Install] WantedBy=docker.service diff --git a/roles/openshift_cli/defaults/main.yml b/roles/openshift_cli/defaults/main.yml index 4d0c87497..7baa87ab8 100644 --- a/roles/openshift_cli/defaults/main.yml +++ b/roles/openshift_cli/defaults/main.yml @@ -1,2 +1,2 @@ --- -openshift_version: "{{ openshift_image_tag | default(openshift.common.image_tag) | default('') }}" +openshift_version: "{{ openshift_image_tag | default(openshift.docker.openshift_image_tag | default('')) }}" diff --git a/roles/openshift_cloud_provider/tasks/main.yml b/roles/openshift_cloud_provider/tasks/main.yml index 471fd686b..d7442924f 100644 --- a/roles/openshift_cloud_provider/tasks/main.yml +++ b/roles/openshift_cloud_provider/tasks/main.yml @@ -2,14 +2,12 @@ - name: Set cloud provider facts openshift_facts: role: cloudprovider - openshift_env: "{{ item | oo_openshift_env }}" + openshift_env: "{{ hostvars[inventory_hostname] + | oo_merge_dicts(hostvars) + | oo_openshift_env }}" openshift_env_structures: - 'openshift.cloudprovider.aws.*' - 'openshift.cloudprovider.openstack.*' - no_log: true - with_items: - - "{{ hostvars[inventory_hostname] }}" - - "{{ hostvars }}" - name: Create cloudprovider config dir file: diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml index b6074ff64..eda43b9f8 100644 --- a/roles/openshift_common/tasks/main.yml +++ b/roles/openshift_common/tasks/main.yml @@ -33,15 +33,11 @@ # versions or image tags. openshift_common's usage requires that it be a RPM # version and openshift_cli expects it to be an image tag. - name: Install the base package for versioning - action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}{{ openshift_version | default('') | oo_image_tag_to_rpm_version }} state=present" + action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}{{ openshift_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }} state=present" when: not openshift.common.is_containerized | bool -# This invocation also updates the version facts which are necessary -# for setting the hostname below. -- name: openshift_facts +- name: Set version facts openshift_facts: - role: hosted - openshift_env: "{{ hostvars[inventory_hostname] | oo_openshift_env }}" # For enterprise versions < 3.1 and origin versions < 1.1 we want to set the # hostname by default. diff --git a/roles/openshift_docker/tasks/main.yml b/roles/openshift_docker/tasks/main.yml new file mode 100644 index 000000000..23613b762 --- /dev/null +++ b/roles/openshift_docker/tasks/main.yml @@ -0,0 +1,28 @@ +--- +# It's important that we don't explicitly pull this image here. Otherwise we +# could result in upgrading a preinstalled environment. We'll have to set +# openshift_image_tag correctly for upgrades. +- name: Set version when containerized + command: > + docker run --rm {{ openshift.common.cli_image }}:latest version + register: cli_image_version + when: openshift.common.is_containerized is defined and openshift.common.is_containerized | bool and openshift_image_tag is not defined + +- set_fact: + l_image_tag: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0] }}" + when: openshift.common.is_containerized is defined and openshift.common.is_containerized | bool and openshift_image_tag is not defined + +- set_fact: + l_image_tag: "{{ openshift_image_tag }}" + when: openshift.common.is_containerized is defined and openshift.common.is_containerized | bool and openshift_image_tag is defined + +- name: Set post docker install facts + openshift_facts: + role: "{{ item.role }}" + local_facts: "{{ item.local_facts }}" + with_items: + - role: docker + local_facts: + openshift_image_tag: "{{ l_image_tag }}" + openshift_version: "{{ l_image_tag if l_image_tag is defined else '' | oo_image_tag_to_rpm_version }}" + when: openshift.common.is_containerized is defined and openshift.common.is_containerized | bool diff --git a/roles/openshift_docker_facts/defaults/main.yml b/roles/openshift_docker_facts/defaults/main.yml new file mode 100644 index 000000000..7baa87ab8 --- /dev/null +++ b/roles/openshift_docker_facts/defaults/main.yml @@ -0,0 +1,2 @@ +--- +openshift_version: "{{ openshift_image_tag | default(openshift.docker.openshift_image_tag | default('')) }}" diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml index eb762e33f..89393168b 100644 --- a/roles/openshift_docker_facts/tasks/main.yml +++ b/roles/openshift_docker_facts/tasks/main.yml @@ -46,10 +46,24 @@ register: common_version failed_when: false changed_when: false - when: not openshift.common.is_atomic | bool + when: not openshift.common.is_containerized | bool + +- set_fact: + l_common_version: "{{ openshift_version | default('0.0', True) | oo_image_tag_to_rpm_version }}" + when: openshift.common.is_containerized | bool + +- set_fact: + l_common_version: "{{ common_version.stdout | default('0.0', True) }}" + when: not openshift.common.is_containerized | bool - name: Set docker version to be installed set_fact: docker_version: "{{ '1.8.2' }}" - when: " ( common_version.stdout | default('0.0', True) | version_compare('3.2','<') and openshift.common.service_type == 'atomic-openshift' ) or - ( common_version.stdout | default('0.0', True) | version_compare('1.1.4','<') and openshift.common.service_type == 'origin' )" + when: " ( l_common_version | version_compare('3.2','<') and openshift.common.service_type in ['openshift', 'atomic-openshift'] ) or + ( l_common_version | version_compare('1.1.4','<') and openshift.common.service_type == 'origin' )" + +- name: Set docker version to be installed + set_fact: + docker_version: "{{ '1.9.1' }}" + when: " ( l_common_version | version_compare('3.2','>') and openshift.common.service_type == 'atomic-openshift' ) or + ( l_common_version | version_compare('1.2','>') and openshift.common.service_type == 'origin' )" diff --git a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/metrics-deployer.yaml b/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/metrics-deployer.yaml index e8e8713be..67e49f327 100644 --- a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/metrics-deployer.yaml +++ b/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/metrics-deployer.yaml @@ -79,7 +79,7 @@ parameters: - description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"' name: IMAGE_PREFIX - value: "openshift/origin-" + value: "registry.access.redhat.com/openshift3/" - description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"' name: IMAGE_VERSION diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 32e608e86..9218e12ae 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -63,7 +63,16 @@ def migrate_local_facts(facts): migrated_facts = copy.deepcopy(facts) return migrate_docker_facts(migrated_facts) - +def migrate_hosted_facts(facts): + """ Apply migrations for master facts """ + if 'master' in facts: + if 'router_selector' in facts['master']: + if 'hosted' not in facts: + facts['hosted'] = {} + if 'router' not in facts['hosted']: + facts['hosted']['router'] = {} + facts['hosted']['router']['selector'] = facts['master'].pop('router_selector') + return facts def first_ip(network): """ Return the first IPv4 address in network @@ -394,7 +403,7 @@ def set_node_schedulability(facts): facts['node']['schedulable'] = True return facts -def set_master_selectors(facts): +def set_selectors(facts): """ Set selectors facts if not already present in facts dict Args: facts (dict): existing facts @@ -403,16 +412,21 @@ def set_master_selectors(facts): facts if they were not already present """ + deployment_type = facts['common']['deployment_type'] + if deployment_type == 'online': + selector = "type=infra" + else: + selector = "region=infra" + + if 'hosted' not in facts: + facts['hosted'] = {} + if 'router' not in facts['hosted']: + facts['hosted']['router'] = {} + if 'selector' not in facts['hosted']['router'] or facts['hosted']['router']['selector'] in [None, 'None']: + facts['hosted']['router']['selector'] = selector + if 'master' in facts: if 'infra_nodes' in facts['master']: - deployment_type = facts['common']['deployment_type'] - if deployment_type == 'online': - selector = "type=infra" - else: - selector = "region=infra" - - if 'router_selector' not in facts['master']: - facts['master']['router_selector'] = selector if 'registry_selector' not in facts['master']: facts['master']['registry_selector'] = selector return facts @@ -1034,7 +1048,7 @@ def get_docker_version_info(): } return result -def get_openshift_version(facts, cli_image=None): +def get_openshift_version(facts): """ Get current version of openshift on the host Args: @@ -1056,29 +1070,14 @@ def get_openshift_version(facts, cli_image=None): _, output, _ = module.run_command(['/usr/bin/openshift', 'version']) version = parse_openshift_version(output) + # openshift_facts runs before openshift_docker_facts. However, it will be + # called again and set properly throughout the playbook run. This could be + # refactored to simply set the openshift.common.version in the + # openshift_docker_facts role but it would take reworking some assumptions + # on how get_openshift_version is called. if 'is_containerized' in facts['common'] and safe_get_bool(facts['common']['is_containerized']): - container = None - if 'master' in facts: - if 'cluster_method' in facts['master']: - container = facts['common']['service_type'] + '-master-api' - else: - container = facts['common']['service_type'] + '-master' - elif 'node' in facts: - container = facts['common']['service_type'] + '-node' - - if container is not None: - exit_code, output, _ = module.run_command(['docker', 'exec', container, 'openshift', 'version']) - # if for some reason the container is installed but not running - # we'll fall back to using docker run later in this method. - if exit_code == 0: - version = parse_openshift_version(output) - - if version is None and cli_image is not None: - # Assume we haven't installed the environment yet and we need - # to query the latest image, but only if docker is installed - if 'docker' in facts and 'version' in facts['docker']: - exit_code, output, _ = module.run_command(['docker', 'run', '--rm', cli_image, 'version']) - version = parse_openshift_version(output) + if 'docker' in facts and 'openshift_version' in facts['docker']: + version = facts['docker']['openshift_version'] return version @@ -1142,17 +1141,23 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw protected_facts = ['ha', 'master_count'] # Facts we do not ever want to merge. These originate in inventory variables - # and typically contain JSON dicts. We don't ever want to trigger a merge + # and contain JSON dicts. We don't ever want to trigger a merge # here, just completely overwrite with the new if they are present there. - overwrite_facts = ['admission_plugin_config', - 'kube_admission_plugin_config'] + inventory_json_facts = ['admission_plugin_config', + 'kube_admission_plugin_config', + 'image_policy_config'] facts = dict() for key, value in orig.iteritems(): # Key exists in both old and new facts. if key in new: - if key in overwrite_facts: - facts[key] = copy.deepcopy(new[key]) + if key in inventory_json_facts: + # Watchout for JSON facts that sometimes load as strings. + # (can happen if the JSON contains a boolean) + if isinstance(new[key], str): + facts[key] = yaml.safe_load(new[key]) + else: + facts[key] = copy.deepcopy(new[key]) # Continue to recurse if old and new fact is a dictionary. elif isinstance(value, dict) and isinstance(new[key], dict): # Collect the subset of additive facts to overwrite if @@ -1336,10 +1341,6 @@ def set_container_facts_if_unset(facts): if safe_get_bool(facts['common']['is_containerized']): facts['common']['admin_binary'] = '/usr/local/bin/oadm' facts['common']['client_binary'] = '/usr/local/bin/oc' - openshift_version = get_openshift_version(facts, cli_image) - if openshift_version is not None: - base_version = openshift_version.split('-')[0] - facts['common']['image_tag'] = "v" + base_version return facts @@ -1479,7 +1480,7 @@ class OpenShiftFacts(object): facts = set_flannel_facts_if_unset(facts) facts = set_nuage_facts_if_unset(facts) facts = set_node_schedulability(facts) - facts = set_master_selectors(facts) + facts = set_selectors(facts) facts = set_metrics_facts_if_unset(facts) facts = set_identity_providers_if_unset(facts) facts = set_sdn_facts_if_unset(facts, self.system_facts) @@ -1573,23 +1574,25 @@ class OpenShiftFacts(object): if 'cloudprovider' in roles: defaults['cloudprovider'] = dict(kind=None) - defaults['hosted'] = dict( - registry=dict( - storage=dict( - kind=None, - volume=dict( - name='registry', - size='5Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)'), - host=None, - access_modes=['ReadWriteMany'], - create_pv=True - ) + if 'hosted' in roles or self.role == 'hosted': + defaults['hosted'] = dict( + registry=dict( + storage=dict( + kind=None, + volume=dict( + name='registry', + size='5Gi' + ), + nfs=dict( + directory='/exports', + options='*(rw,root_squash)'), + host=None, + access_modes=['ReadWriteMany'], + create_pv=True + ) + ), + router=dict() ) - ) return defaults diff --git a/roles/openshift_hosted/README.md b/roles/openshift_hosted/README.md new file mode 100644 index 000000000..633ec0937 --- /dev/null +++ b/roles/openshift_hosted/README.md @@ -0,0 +1,55 @@ +OpenShift Hosted +================ + +OpenShift Hosted Resources + +* OpenShift Router + +Requirements +------------ + +This role requires a running OpenShift cluster with nodes labeled to +match the openshift_hosted_router_selector (default: region=infra). + +Role Variables +-------------- + +From this role: + +| Name | Default value | Description | +|-------------------------------------|------------------------------------------|----------------------------------------------------------------------------------------------------------------------| +| openshift_hosted_router_certificate | None | Dictionary containing "certfile" and "keyfile" keys with values containing paths to local certificate files. | +| openshift_hosted_router_registryurl | 'openshift3/ose-${component}:${version}' | The image to base the OpenShift router on. | +| openshift_hosted_router_replicas | Number of nodes matching selector | The number of replicas to configure. | +| openshift_hosted_router_selector | region=infra | Node selector used when creating router. The OpenShift router will only be deployed to nodes matching this selector. | + +Dependencies +------------ + +* openshift_common +* openshift_hosted_facts + +Example Playbook +---------------- + +``` +- name: Create hosted resources + hosts: oo_first_master + roles: + - role: openshift_hosted + openshift_hosted_router_certificate: + certfile: /path/to/my-router.crt + keyfile: /path/to/my-router.key + openshift_hosted_router_registryurl: 'registry.access.redhat.com/openshift3/ose-haproxy-router:v3.0.2.0' + openshift_hosted_router_selector: 'type=infra' +``` + +License +------- + +Apache License, Version 2.0 + +Author Information +------------------ + +Red Hat openshift@redhat.com diff --git a/roles/openshift_router/handlers/main.yml b/roles/openshift_hosted/handlers/main.yml index e69de29bb..e69de29bb 100644 --- a/roles/openshift_router/handlers/main.yml +++ b/roles/openshift_hosted/handlers/main.yml diff --git a/roles/openshift_router/meta/main.yml b/roles/openshift_hosted/meta/main.yml index c2b0777b5..75dfc24c3 100644 --- a/roles/openshift_router/meta/main.yml +++ b/roles/openshift_hosted/meta/main.yml @@ -11,5 +11,6 @@ galaxy_info: - 7 categories: - cloud - dependencies: - - openshift_facts +dependencies: +- openshift_common +- openshift_hosted_facts diff --git a/roles/openshift_hosted/tasks/main.yml b/roles/openshift_hosted/tasks/main.yml new file mode 100644 index 000000000..d42a4e365 --- /dev/null +++ b/roles/openshift_hosted/tasks/main.yml @@ -0,0 +1,3 @@ +--- + +- include: router.yml diff --git a/roles/openshift_hosted/tasks/router.yml b/roles/openshift_hosted/tasks/router.yml new file mode 100644 index 000000000..6a36f74b2 --- /dev/null +++ b/roles/openshift_hosted/tasks/router.yml @@ -0,0 +1,64 @@ +--- +- fail: + msg: "Both 'certfile' and 'keyfile' keys must be specified when supplying the openshift_hosted_router_certificate variable." + when: openshift_hosted_router_certificate is defined and ('certfile' not in openshift_hosted_router_certificate or 'keyfile' not in openshift_hosted_router_certificate) + +- name: Read router certificate and key + slurp: + src: "{{ item }}" + register: openshift_router_certificate_output + with_items: + - "{{ openshift_hosted_router_certificate.certfile }}" + - "{{ openshift_hosted_router_certificate.keyfile }}" + delegate_to: localhost + when: openshift_hosted_router_certificate is defined + +- name: Persist certificate contents + openshift_facts: + role: hosted + openshift_env: + openshift_hosted_router_certificate_contents: "{% for certificate in openshift_router_certificate_output.results -%}{{ certificate.content | b64decode }}{% endfor -%}" + when: openshift_hosted_router_certificate is defined + +- name: Create PEM certificate + copy: + content: "{{ openshift.hosted.router.certificate.contents }}" + dest: "{{ openshift_master_config_dir }}/openshift-router.pem" + mode: 0600 + when: openshift.hosted.router.certificate | default(None) != None + +- name: Retrieve list of openshift nodes + command: > + {{ openshift.common.client_binary }} --api-version='v1' -o json + get nodes -n default --config={{ openshift.common.config_base }}/master/admin.kubeconfig + register: openshift_hosted_router_nodes_json + when: openshift.hosted.router.replicas | default(None) == None + +- name: Collect nodes matching router selector + set_fact: + openshift_hosted_router_nodes: > + {{ (openshift_hosted_router_nodes_json.stdout|from_json)['items'] + | oo_oc_nodes_matching_selector(openshift.hosted.router.selector) }} + when: openshift.hosted.router.replicas | default(None) == None + +- name: Create OpenShift router + command: > + {{ openshift.common.admin_binary }} router --create + {% if openshift.hosted.router.replicas | default(None) != None -%} + --replicas={{ openshift.hosted.router.replicas }} + {% else -%} + --replicas={{ openshift_hosted_router_nodes | length }} + {% endif %} + {% if openshift.hosted.router.certificate | default(None) != None -%} + --default-cert={{ openshift_master_config_dir }}/openshift-router.pem + {% endif -%} + --namespace=default + --service-account=router + --selector='{{ openshift.hosted.router.selector }}' + --credentials={{ openshift_master_config_dir }}/openshift-router.kubeconfig + {% if openshift.hosted.router.registryurl | default(None)!= None -%} + --images='{{ openshift.hosted.router.registryurl }}' + {% endif -%} + register: openshift_hosted_router_results + changed_when: "'service exists' not in openshift_hosted_router_results.stdout" + when: openshift.hosted.router.replicas | default(None) != None or (openshift_hosted_router_nodes is defined and openshift_hosted_router_nodes | length > 0) diff --git a/roles/openshift_hosted/vars/main.yml b/roles/openshift_hosted/vars/main.yml new file mode 100644 index 000000000..9967e26f4 --- /dev/null +++ b/roles/openshift_hosted/vars/main.yml @@ -0,0 +1,2 @@ +--- +openshift_master_config_dir: "{{ openshift.common.config_base }}/master" diff --git a/roles/openshift_hosted_facts/meta/main.yml b/roles/openshift_hosted_facts/meta/main.yml new file mode 100644 index 000000000..dd2de07bc --- /dev/null +++ b/roles/openshift_hosted_facts/meta/main.yml @@ -0,0 +1,15 @@ +--- +galaxy_info: + author: Andrew Butcher + description: OpenShift Hosted Facts + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.9 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud +dependencies: +- role: openshift_facts diff --git a/roles/openshift_hosted_facts/tasks/main.yml b/roles/openshift_hosted_facts/tasks/main.yml new file mode 100644 index 000000000..f595e1e81 --- /dev/null +++ b/roles/openshift_hosted_facts/tasks/main.yml @@ -0,0 +1,7 @@ +--- +- name: Set hosted facts + openshift_facts: + role: hosted + openshift_env: "{{ hostvars[inventory_hostname] + | oo_merge_dicts(hostvars) + | oo_openshift_env }}" diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index afc81a414..6bf28ff2b 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -29,4 +29,4 @@ os_firewall_deny: - service: former etcd peer port port: 7001/tcp -openshift_version: "{{ openshift_pkg_version | default(openshift_image_tag) | default(openshift.common.image_tag) | default('') }}" +openshift_version: "{{ openshift_pkg_version | default(openshift_image_tag | default(openshift.docker.openshift_image_tag | default(''))) }}" diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml index 70c7ef4e4..f7dfb11f7 100644 --- a/roles/openshift_master/handlers/main.yml +++ b/roles/openshift_master/handlers/main.yml @@ -1,17 +1,17 @@ --- - name: restart master service: name={{ openshift.common.service_type }}-master state=restarted - when: (openshift.master.ha is defined and not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool)) + when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool)) notify: Verify API Server - name: restart master api service: name={{ openshift.common.service_type }}-master-api state=restarted - when: (openshift.master.ha is defined and openshift_master_ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' notify: Verify API Server - name: restart master controllers service: name={{ openshift.common.service_type }}-master-controllers state=restarted - when: (openshift.master.ha is defined and openshift_master_ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' - name: Verify API Server # Using curl here since the uri module requires python-httplib2 and diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml index 7ab913eea..0d4241e2c 100644 --- a/roles/openshift_master/meta/main.yml +++ b/roles/openshift_master/meta/main.yml @@ -16,3 +16,4 @@ dependencies: - role: openshift_cli - role: openshift_cloud_provider - role: openshift_master_facts +- role: openshift_hosted_facts diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 23994cdcf..18a42bf93 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -24,7 +24,7 @@ when: openshift_master_ha | bool and openshift_master_cluster_method == "pacemaker" and openshift.common.is_containerized | bool - name: Install Master package - action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-master{{ openshift_version }} state=present" + action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-master{{ openshift_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }} state=present" when: not openshift.common.is_containerized | bool - name: Pull master image diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 index 66d76978e..df1dbb85e 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 @@ -20,6 +20,7 @@ LimitCORE=infinity WorkingDirectory={{ openshift.common.data_dir }} SyslogIdentifier={{ openshift.common.service_type }}-master-api Restart=always +RestartSec=5s [Install] WantedBy=docker.service diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 index 75759c133..5ff2edae4 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 @@ -19,6 +19,7 @@ LimitCORE=infinity WorkingDirectory={{ openshift.common.data_dir }} SyslogIdentifier={{ openshift.common.service_type }}-master-controllers Restart=on-failure +RestartSec=5s [Install] WantedBy=docker.service diff --git a/roles/openshift_master/templates/docker/master.docker.service.j2 b/roles/openshift_master/templates/docker/master.docker.service.j2 index d02fc5342..97f698b68 100644 --- a/roles/openshift_master/templates/docker/master.docker.service.j2 +++ b/roles/openshift_master/templates/docker/master.docker.service.j2 @@ -12,6 +12,7 @@ ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.c ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master Restart=always +RestartSec=5s [Install] WantedBy=docker.service diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index e89fdc0ce..1009aa318 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -89,6 +89,9 @@ etcdStorageConfig: imageConfig: format: {{ openshift.master.registry_url }} latest: false +{% if 'image_policy_config' in openshift.master %} +imagePolicyConfig:{{ openshift.master.image_policy_config | to_padded_yaml(level=1) }} +{% endif %} kind: MasterConfig kubeletClientInfo: {# TODO: allow user specified kubelet port #} diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 index e61418eb6..1f50fdce1 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 @@ -15,6 +15,7 @@ LimitNOFILE=131072 LimitCORE=infinity WorkingDirectory={{ openshift.common.data_dir }} SyslogIdentifier=atomic-openshift-master-api +RestartSec=5s [Install] WantedBy=multi-user.target diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 index ba4c1e0e8..bf62696f0 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 @@ -20,6 +20,7 @@ LimitCORE=infinity WorkingDirectory={{ openshift.common.data_dir }} SyslogIdentifier={{ openshift.common.service_type }}-master-controllers Restart=on-failure +RestartSec=5s [Install] WantedBy=multi-user.target diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml index f43b8c59d..4d7c04065 100644 --- a/roles/openshift_master_facts/tasks/main.yml +++ b/roles/openshift_master_facts/tasks/main.yml @@ -72,3 +72,4 @@ oauth_template: "{{ openshift_master_oauth_template | default(None) }}" # deprecated in origin 1.2 / OSE 3.2 oauth_templates: "{{ openshift_master_oauth_templates | default(None) }}" oauth_always_show_provider_selection: "{{ openshift_master_oauth_always_show_provider_selection | default(None) }}" + image_policy_config: "{{ openshift_master_image_policy_config | default(None) }}" diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index c4f718bfb..91aed7aa3 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -13,4 +13,4 @@ os_firewall_allow: - service: OpenShift OVS sdn port: 4789/udp when: openshift.node.use_openshift_sdn | bool -openshift_version: "{{ openshift_pkg_version | default(openshift_image_tag) | default(openshift.common.image_tag) | default('') }}" +openshift_version: "{{ openshift_pkg_version | default(openshift_image_tag | default(openshift.docker.openshift_image_tag | default(''))) }}" diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index eca4848c1..8987e0191 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -36,11 +36,11 @@ # We have to add tuned-profiles in the same transaction otherwise we run into depsolving # problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging. - name: Install Node package - action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-node{{ openshift_version }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_version }} state=present" + action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-node{{ openshift_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }} state=present" when: not openshift.common.is_containerized | bool - name: Install sdn-ovs package - action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }} state=present" + action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-sdn-ovs{{ openshift_version | oo_image_tag_to_rpm_version(include_dash=True) }} state=present" when: openshift.common.use_openshift_sdn and not openshift.common.is_containerized | bool - name: Pull node image diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service index ff5a97fe0..30f09b250 100644 --- a/roles/openshift_node/templates/openshift.docker.node.service +++ b/roles/openshift_node/templates/openshift.docker.node.service @@ -17,6 +17,7 @@ ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node SyslogIdentifier={{ openshift.common.service_type }}-node Restart=always +RestartSec=5s [Install] WantedBy=docker.service diff --git a/roles/openshift_node/templates/openvswitch.docker.service b/roles/openshift_node/templates/openvswitch.docker.service index 8052a3a39..1e1f8967d 100644 --- a/roles/openshift_node/templates/openvswitch.docker.service +++ b/roles/openshift_node/templates/openvswitch.docker.service @@ -11,6 +11,7 @@ ExecStartPost=/usr/bin/sleep 5 ExecStop=/usr/bin/docker stop openvswitch SyslogIdentifier=openvswitch Restart=always +RestartSec=5s [Install] WantedBy=docker.service diff --git a/roles/openshift_persistent_volumes/meta/main.yml b/roles/openshift_persistent_volumes/meta/main.yml index d9f6fc01a..25e5a38dd 100644 --- a/roles/openshift_persistent_volumes/meta/main.yml +++ b/roles/openshift_persistent_volumes/meta/main.yml @@ -10,4 +10,5 @@ galaxy_info: versions: - 7 dependencies: -- { role: openshift_common } +- role: openshift_common +- role: openshift_hosted_facts diff --git a/roles/openshift_registry/meta/main.yml b/roles/openshift_registry/meta/main.yml index b220a020e..e6db8c537 100644 --- a/roles/openshift_registry/meta/main.yml +++ b/roles/openshift_registry/meta/main.yml @@ -11,5 +11,5 @@ galaxy_info: - 7 categories: - cloud - dependencies: - - openshift_facts +dependencies: +- role: openshift_hosted_facts diff --git a/roles/openshift_router/README.md b/roles/openshift_router/README.md deleted file mode 100644 index d490e1038..000000000 --- a/roles/openshift_router/README.md +++ /dev/null @@ -1,35 +0,0 @@ -OpenShift Container Router -========================== - -OpenShift Router service installation - -Requirements ------------- - -Running OpenShift cluster - -Role Variables --------------- - -From this role: -| Name | Default value | | -|--------------------|-------------------------------------------------------|---------------------| -| | | | - -Dependencies ------------- - -Example Playbook ----------------- - -TODO - -License -------- - -Apache License, Version 2.0 - -Author Information ------------------- - -Red Hat openshift@redhat.com diff --git a/roles/openshift_router/tasks/main.yml b/roles/openshift_router/tasks/main.yml deleted file mode 100644 index 40365d04d..000000000 --- a/roles/openshift_router/tasks/main.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- name: Deploy OpenShift Router - command: > - {{ openshift.common.admin_binary }} router - --create --replicas={{ openshift.master.infra_nodes | length }} - --namespace=default - --service-account=router {{ ortr_selector }} - --credentials={{ openshift_master_config_dir }}/openshift-router.kubeconfig {{ ortr_images }} - register: ortr_results - changed_when: "'service exists' not in ortr_results.stdout" diff --git a/roles/openshift_router/vars/main.yml b/roles/openshift_router/vars/main.yml deleted file mode 100644 index bcac12068..000000000 --- a/roles/openshift_router/vars/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -openshift_master_config_dir: "{{ openshift.common.config_base }}/master" -ortr_images: "--images='{{ openshift.master.registry_url }}'" -ortr_selector: "--selector='{{ openshift.master.router_selector }}'" diff --git a/roles/openshift_storage_nfs/meta/main.yml b/roles/openshift_storage_nfs/meta/main.yml index c6815d697..d675e0750 100644 --- a/roles/openshift_storage_nfs/meta/main.yml +++ b/roles/openshift_storage_nfs/meta/main.yml @@ -10,6 +10,6 @@ galaxy_info: versions: - 7 dependencies: -- { role: os_firewall } -- { role: openshift_facts } -- { role: openshift_repos } +- role: os_firewall +- role: openshift_hosted_facts +- role: openshift_repos diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index dd9d517f1..aaf3b7972 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -163,11 +163,10 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen if masters_set or num_masters != 2: more_hosts = click.confirm('Do you want to add additional hosts?') - if num_masters == 1: - master = next((host for host in hosts if host.master), None) - master.storage = True - elif num_masters >= 3: + if num_masters >= 3: collect_master_lb(hosts) + + if not existing_env: collect_storage_host(hosts) return hosts @@ -306,12 +305,17 @@ def collect_storage_host(hosts): message = """ Setting up High Availability Masters requires a storage host. Please provide a host that will be configured as a Registry Storage. + +Note: Containerized storage hosts are not currently supported. """ click.echo(message) host_props = {} + first_master = next(host for host in hosts if host.master) + hostname_or_ip = click.prompt('Enter hostname or IP address', - value_proc=validate_prompt_hostname) + value_proc=validate_prompt_hostname, + default=first_master) existing, existing_host = is_host_already_node_or_master(hostname_or_ip, hosts) if existing and existing_host.node: existing_host.storage = True diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index 28b157e8e..cf2ca51ca 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -130,7 +130,9 @@ def write_inventory_vars(base_inventory, multiple_masters, proxy): if CFG.settings.get('master_routingconfig_subdomain', False): base_inventory.write( "openshift_master_default_subdomain={}\n".format(CFG.settings['master_routingconfig_subdomain'])) - + if CFG.settings.get('variant_version', None) == '3.1': + #base_inventory.write('openshift_image_tag=v{}\n'.format(CFG.settings.get('variant_version'))) + base_inventory.write('openshift_image_tag=v{}\n'.format('3.1.1.6')) def write_host(host, inventory, schedulable=None): diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py index 524df08c4..66ed66660 100644 --- a/utils/test/cli_installer_tests.py +++ b/utils/test/cli_installer_tests.py @@ -37,6 +37,14 @@ MOCK_FACTS = { 'public_hostname': 'node2.example.com' } }, + '10.1.0.1': { + 'common': { + 'ip': '10.1.0.1', + 'public_ip': '10.1.0.1', + 'hostname': 'storage-private.example.com', + 'public_hostname': 'storage.example.com' + } + }, } MOCK_FACTS_QUICKHA = { @@ -250,6 +258,12 @@ hosts: public_hostname: node2.example.com node: true master: true + - connect_to: 10.1.0.1 + ip: 10.1.0.1 + hostname: storage-private.example.com + public_ip: 24.222.0.6 + public_hostname: storage.example.com + storage: true """ QUICKHA_CONFIG_PRECONFIGURED_LB = """ @@ -720,17 +734,18 @@ class AttendedCliTests(OOCliFixture): ('10.0.0.3', False, False)], ssh_user='root', variant_num=1, - confirm_facts='y') + confirm_facts='y', + storage='10.1.0.1',) self.cli_args.append("install") result = self.runner.invoke(cli.cli, self.cli_args, input=cli_input) self.assert_result(result, 0) self._verify_load_facts(load_facts_mock) - self._verify_run_playbook(run_playbook_mock, 3, 3) + self._verify_run_playbook(run_playbook_mock, 4, 4) written_config = read_yaml(self.config_file) - self._verify_config_hosts(written_config, 3) + self._verify_config_hosts(written_config, 4) inventory = ConfigParser.ConfigParser(allow_no_value=True) inventory.read(os.path.join(self.work_dir, 'hosts')) @@ -762,7 +777,8 @@ class AttendedCliTests(OOCliFixture): add_nodes=[('10.0.0.3', False, False)], ssh_user='root', variant_num=1, - confirm_facts='y') + confirm_facts='y', + storage='10.0.0.1',) self.cli_args.append("install") result = self.runner.invoke(cli.cli, self.cli_args, @@ -813,7 +829,8 @@ class AttendedCliTests(OOCliFixture): ssh_user='root', variant_num=1, schedulable_masters_ok=True, - confirm_facts='y') + confirm_facts='y', + storage='10.0.0.1',) self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, run_playbook_mock, @@ -970,7 +987,8 @@ class AttendedCliTests(OOCliFixture): ('10.0.0.1', True, False)], ssh_user='root', variant_num=1, - confirm_facts='y') + confirm_facts='y', + storage='10.0.0.1') self.cli_args.append("install") result = self.runner.invoke(cli.cli, self.cli_args, input=cli_input) @@ -998,7 +1016,8 @@ class AttendedCliTests(OOCliFixture): ('10.0.0.1', True, False)], ssh_user='root', variant_num=3, - confirm_facts='y') + confirm_facts='y', + storage='10.1.0.1',) self.cli_args.append("install") result = self.runner.invoke(cli.cli, self.cli_args, input=cli_input) @@ -1019,7 +1038,8 @@ class AttendedCliTests(OOCliFixture): ('10.0.0.3', False, False)], ssh_user='root', variant_num=1, - confirm_facts='y') + confirm_facts='y', + storage='10.1.0.1',) self.cli_args.append("install") self.cli_args.append("--gen-inventory") result = self.runner.invoke(cli.cli, self.cli_args, @@ -1032,7 +1052,7 @@ class AttendedCliTests(OOCliFixture): self.assertEquals(0, len(run_playbook_mock.mock_calls)) written_config = read_yaml(self.config_file) - self._verify_config_hosts(written_config, 3) + self._verify_config_hosts(written_config, 4) inventory = ConfigParser.ConfigParser(allow_no_value=True) inventory.read(os.path.join(self.work_dir, 'hosts')) |