diff options
44 files changed, 591 insertions, 219 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index 23c40682f..fef844c09 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.0.75-1 ./ +3.0.79-1 ./ diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index cd67b69a5..f6cc2edde 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -57,6 +57,19 @@ class FilterModule(object): return [item for sublist in data for item in sublist] @staticmethod + def oo_merge_dicts(first_dict, second_dict): + """ Merge two dictionaries where second_dict values take precedence. + Ex: first_dict={'a': 1, 'b': 2} + second_dict={'b': 3, 'c': 4} + returns {'a': 1, 'b': 3, 'c': 4} + """ + if not isinstance(first_dict, dict) or not isinstance(second_dict, dict): + raise errors.AnsibleFilterError("|failed expects to merge two dicts") + merged = first_dict.copy() + merged.update(second_dict) + return merged + + @staticmethod def oo_collect(data, attribute=None, filters=None): """ This takes a list of dict and collects all attributes specified into a list. If filter is specified then we will include all items that @@ -286,6 +299,69 @@ class FilterModule(object): return [x for x in data if x.has_key(filter_attr) and x[filter_attr]] @staticmethod + def oo_oc_nodes_matching_selector(nodes, selector): + """ Filters a list of nodes by selector. + + Examples: + nodes = [{"kind": "Node", "metadata": {"name": "node1.example.com", + "labels": {"kubernetes.io/hostname": "node1.example.com", + "color": "green"}}}, + {"kind": "Node", "metadata": {"name": "node2.example.com", + "labels": {"kubernetes.io/hostname": "node2.example.com", + "color": "red"}}}] + selector = 'color=green' + returns = ['node1.example.com'] + Args: + nodes (list[dict]): list of node definitions + selector (str): "label=value" node selector to filter `nodes` by + Returns: + list[str]: nodes filtered by selector + """ + if not isinstance(nodes, list): + raise errors.AnsibleFilterError("failed expects nodes to be a list, got {0}".format(type(nodes))) + if not isinstance(selector, basestring): + raise errors.AnsibleFilterError("failed expects selector to be a string") + if not re.match('.*=.*', selector): + raise errors.AnsibleFilterError("failed selector does not match \"label=value\" format") + label = selector.split('=')[0] + value = selector.split('=')[1] + return FilterModule.oo_oc_nodes_with_label(nodes, label, value) + + @staticmethod + def oo_oc_nodes_with_label(nodes, label, value): + """ Filters a list of nodes by label, value. + + Examples: + nodes = [{"kind": "Node", "metadata": {"name": "node1.example.com", + "labels": {"kubernetes.io/hostname": "node1.example.com", + "color": "green"}}}, + {"kind": "Node", "metadata": {"name": "node2.example.com", + "labels": {"kubernetes.io/hostname": "node2.example.com", + "color": "red"}}}] + label = 'color' + value = 'green' + returns = ['node1.example.com'] + Args: + nodes (list[dict]): list of node definitions + label (str): label to filter `nodes` by + value (str): value of `label` to filter `nodes` by + Returns: + list[str]: nodes filtered by selector + """ + if not isinstance(nodes, list): + raise errors.AnsibleFilterError("failed expects nodes to be a list") + if not isinstance(label, basestring): + raise errors.AnsibleFilterError("failed expects label to be a string") + if not isinstance(value, basestring): + raise errors.AnsibleFilterError("failed expects value to be a string") + matching_nodes = [] + for node in nodes: + if label in node['metadata']['labels']: + if node['metadata']['labels'][label] == value: + matching_nodes.append(node['metadata']['name']) + return matching_nodes + + @staticmethod def oo_nodes_with_label(nodes, label, value=None): """ Filters a list of nodes by label and value (if provided) @@ -588,36 +664,38 @@ class FilterModule(object): if persistent_volumes == None: persistent_volumes = [] - for component in hostvars['openshift']['hosted']: - kind = hostvars['openshift']['hosted'][component]['storage']['kind'] - create_pv = hostvars['openshift']['hosted'][component]['storage']['create_pv'] - if kind != None and create_pv: - if kind == 'nfs': - host = hostvars['openshift']['hosted'][component]['storage']['host'] - if host == None: - if len(groups['oo_nfs_to_config']) > 0: - host = groups['oo_nfs_to_config'][0] + if 'hosted' in hostvars['openshift']: + for component in hostvars['openshift']['hosted']: + if 'storage' in hostvars['openshift']['hosted'][component]: + kind = hostvars['openshift']['hosted'][component]['storage']['kind'] + create_pv = hostvars['openshift']['hosted'][component]['storage']['create_pv'] + if kind != None and create_pv: + if kind == 'nfs': + host = hostvars['openshift']['hosted'][component]['storage']['host'] + if host == None: + if len(groups['oo_nfs_to_config']) > 0: + host = groups['oo_nfs_to_config'][0] + else: + raise errors.AnsibleFilterError("|failed no storage host detected") + directory = hostvars['openshift']['hosted'][component]['storage']['nfs']['directory'] + volume = hostvars['openshift']['hosted'][component]['storage']['volume']['name'] + path = directory + '/' + volume + size = hostvars['openshift']['hosted'][component]['storage']['volume']['size'] + access_modes = hostvars['openshift']['hosted'][component]['storage']['access_modes'] + persistent_volume = dict( + name="{0}-volume".format(volume), + capacity=size, + access_modes=access_modes, + storage=dict( + nfs=dict( + server=host, + path=path))) + persistent_volumes.append(persistent_volume) else: - raise errors.AnsibleFilterError("|failed no storage host detected") - directory = hostvars['openshift']['hosted'][component]['storage']['nfs']['directory'] - volume = hostvars['openshift']['hosted'][component]['storage']['volume']['name'] - path = directory + '/' + volume - size = hostvars['openshift']['hosted'][component]['storage']['volume']['size'] - access_modes = hostvars['openshift']['hosted'][component]['storage']['access_modes'] - persistent_volume = dict( - name="{0}-volume".format(volume), - capacity=size, - access_modes=access_modes, - storage=dict( - nfs=dict( - server=host, - path=path))) - persistent_volumes.append(persistent_volume) - else: - msg = "|failed invalid storage kind '{0}' for component '{1}'".format( - kind, - component) - raise errors.AnsibleFilterError(msg) + msg = "|failed invalid storage kind '{0}' for component '{1}'".format( + kind, + component) + raise errors.AnsibleFilterError(msg) return persistent_volumes @staticmethod @@ -632,18 +710,20 @@ class FilterModule(object): if persistent_volume_claims == None: persistent_volume_claims = [] - for component in hostvars['openshift']['hosted']: - kind = hostvars['openshift']['hosted'][component]['storage']['kind'] - create_pv = hostvars['openshift']['hosted'][component]['storage']['create_pv'] - if kind != None and create_pv: - volume = hostvars['openshift']['hosted'][component]['storage']['volume']['name'] - size = hostvars['openshift']['hosted'][component]['storage']['volume']['size'] - access_modes = hostvars['openshift']['hosted'][component]['storage']['access_modes'] - persistent_volume_claim = dict( - name="{0}-claim".format(volume), - capacity=size, - access_modes=access_modes) - persistent_volume_claims.append(persistent_volume_claim) + if 'hosted' in hostvars['openshift']: + for component in hostvars['openshift']['hosted']: + if 'storage' in hostvars['openshift']['hosted'][component]: + kind = hostvars['openshift']['hosted'][component]['storage']['kind'] + create_pv = hostvars['openshift']['hosted'][component]['storage']['create_pv'] + if kind != None and create_pv: + volume = hostvars['openshift']['hosted'][component]['storage']['volume']['name'] + size = hostvars['openshift']['hosted'][component]['storage']['volume']['size'] + access_modes = hostvars['openshift']['hosted'][component]['storage']['access_modes'] + persistent_volume_claim = dict( + name="{0}-claim".format(volume), + capacity=size, + access_modes=access_modes) + persistent_volume_claims.append(persistent_volume_claim) return persistent_volume_claims @staticmethod @@ -711,7 +791,7 @@ class FilterModule(object): return retval @staticmethod - def oo_image_tag_to_rpm_version(version): + def oo_image_tag_to_rpm_version(version, include_dash=False): """ Convert an image tag string to an RPM version if necessary Empty strings and strings that are already in rpm version format are ignored. @@ -722,7 +802,10 @@ class FilterModule(object): raise errors.AnsibleFilterError("|failed expects a string or unicode") if version.startswith("v"): - version = "-" + version.replace("v", "") + version = version.replace("v", "") + + if include_dash: + version = "-" + version return version @@ -755,4 +838,7 @@ class FilterModule(object): "oo_pods_match_component": self.oo_pods_match_component, "oo_get_hosts_from_hostvars": self.oo_get_hosts_from_hostvars, "oo_image_tag_to_rpm_version": self.oo_image_tag_to_rpm_version, + "oo_merge_dicts": self.oo_merge_dicts, + "oo_oc_nodes_matching_selector": self.oo_oc_nodes_matching_selector, + "oo_oc_nodes_with_label": self.oo_oc_nodes_with_label } diff --git a/inventory/byo/hosts.aep.example b/inventory/byo/hosts.aep.example index bedaffbdc..62891e6c3 100644 --- a/inventory/byo/hosts.aep.example +++ b/inventory/byo/hosts.aep.example @@ -177,9 +177,47 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # glusterfs plugin dependencies will be installed, if available. #osn_storage_plugin_deps=['ceph','glusterfs','iscsi'] -# default selectors for router and registry services -# openshift_router_selector='region=infra' -# openshift_registry_selector='region=infra' +# OpenShift Router Options +# +# An OpenShift router will be created during install if there are +# nodes present with labels matching the default router selector, +# "region=infra". Set openshift_node_labels per node as needed in +# order to label nodes. +# +# Example: +# [nodes] +# node.example.com openshift_node_labels="{'region': 'infra'}" +# +# Router selector (optional) +# Router will only be created if nodes matching this label are present. +# Default value: 'region=infra' +#openshift_hosted_router_selector='region=infra' +# +# Router replicas (optional) +# Unless specified, openshift-ansible will calculate the replica count +# based on the number of nodes matching the openshift router selector. +#openshift_hosted_router_replicas=2 +# +# Router certificate (optional) +# Provide local certificate paths which will be configured as the +# router's default certificate. +#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key"} + +# Openshift Registry Options +# +# An OpenShift registry will be created during install if there are +# nodes present with labels matching the default registry selector, +# "region=infra". Set openshift_node_labels per node as needed in +# order to label nodes. +# +# Example: +# [nodes] +# node.example.com openshift_node_labels="{'region': 'infra'}" +# +# Registry selector (optional) +# Registry will only be created if nodes matching this label are present. +# Default value: 'region=infra' +#openshift_registry_selector='region=infra' # Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') # os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example index 3ecf898fb..1f13aade6 100644 --- a/inventory/byo/hosts.origin.example +++ b/inventory/byo/hosts.origin.example @@ -182,9 +182,47 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # glusterfs plugin dependencies will be installed, if available. #osn_storage_plugin_deps=['ceph','glusterfs','iscsi'] -# default selectors for router and registry services -# openshift_router_selector='region=infra' -# openshift_registry_selector='region=infra' +# OpenShift Router Options +# +# An OpenShift router will be created during install if there are +# nodes present with labels matching the default router selector, +# "region=infra". Set openshift_node_labels per node as needed in +# order to label nodes. +# +# Example: +# [nodes] +# node.example.com openshift_node_labels="{'region': 'infra'}" +# +# Router selector (optional) +# Router will only be created if nodes matching this label are present. +# Default value: 'region=infra' +#openshift_hosted_router_selector='region=infra' +# +# Router replicas (optional) +# Unless specified, openshift-ansible will calculate the replica count +# based on the number of nodes matching the openshift router selector. +#openshift_hosted_router_replicas=2 +# +# Router certificate (optional) +# Provide local certificate paths which will be configured as the +# router's default certificate. +#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key"} + +# Openshift Registry Options +# +# An OpenShift registry will be created during install if there are +# nodes present with labels matching the default registry selector, +# "region=infra". Set openshift_node_labels per node as needed in +# order to label nodes. +# +# Example: +# [nodes] +# node.example.com openshift_node_labels="{'region': 'infra'}" +# +# Registry selector (optional) +# Registry will only be created if nodes matching this label are present. +# Default value: 'region=infra' +#openshift_registry_selector='region=infra' # Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') # os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example index 126d44487..2386eb236 100644 --- a/inventory/byo/hosts.ose.example +++ b/inventory/byo/hosts.ose.example @@ -178,9 +178,47 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # glusterfs plugin dependencies will be installed, if available. #osn_storage_plugin_deps=['ceph','glusterfs'] -# default selectors for router and registry services -# openshift_router_selector='region=infra' -# openshift_registry_selector='region=infra' +# OpenShift Router Options +# +# An OpenShift router will be created during install if there are +# nodes present with labels matching the default router selector, +# "region=infra". Set openshift_node_labels per node as needed in +# order to label nodes. +# +# Example: +# [nodes] +# node.example.com openshift_node_labels="{'region': 'infra'}" +# +# Router selector (optional) +# Router will only be created if nodes matching this label are present. +# Default value: 'region=infra' +#openshift_hosted_router_selector='region=infra' +# +# Router replicas (optional) +# Unless specified, openshift-ansible will calculate the replica count +# based on the number of nodes matching the openshift router selector. +#openshift_hosted_router_replicas=2 +# +# Router certificate (optional) +# Provide local certificate paths which will be configured as the +# router's default certificate. +#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key"} + +# Openshift Registry Options +# +# An OpenShift registry will be created during install if there are +# nodes present with labels matching the default registry selector, +# "region=infra". Set openshift_node_labels per node as needed in +# order to label nodes. +# +# Example: +# [nodes] +# node.example.com openshift_node_labels="{'region': 'infra'}" +# +# Registry selector (optional) +# Registry will only be created if nodes matching this label are present. +# Default value: 'region=infra' +#openshift_registry_selector='region=infra' # Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') # os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 845455444..c3bd81d49 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -5,7 +5,7 @@ } Name: openshift-ansible -Version: 3.0.75 +Version: 3.0.79 Release: 1%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 @@ -279,6 +279,39 @@ Atomic OpenShift Utilities includes %changelog +* Tue Apr 12 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.79-1 +- Bug 1324728 - Ansible should not downgrade docker when installing 3.2 + containerized env (bleanhar@redhat.com) +- Fixing non-HA master restart conditional (bleanhar@redhat.com) +- Fetching the current version a little more carefully (bleanhar@redhat.com) +- Make sure Docker is restarted after we have correctly configured the + containerized systemd units (bleanhar@redhat.com) +- use RestartSec to avoid default rate limit in systemd (bleanhar@redhat.com) +- Convert image_tag on masters (smunilla@redhat.com) +- Installs and upgrades from authenticated registries are not supported for now + (bleanhar@redhat.com) +- Handle cases where the pacemaker variables aren't set (bleanhar@redhat.com) +- Containerized installs on RHEL were downgrading docker unnecessarily + (bleanhar@redhat.com) + +* Tue Apr 12 2016 Troy Dawson <tdawson@redhat.com> 3.0.78-1 +- Add support for creating secure router. (abutcher@redhat.com) + +* Mon Apr 11 2016 Troy Dawson <tdawson@redhat.com> 3.0.77-1 +- Fix a docker-storage sysconfig bug. (dgoodwin@redhat.com) +- update bootstrap-fedora to include python2-firewall for F24+ + (maxamillion@fedoraproject.org) +- Merge openshift_env hostvars. (abutcher@redhat.com) +- Add openshift_hosted_facts role and remove hosted facts from + openshift_common. (abutcher@redhat.com) + +* Fri Apr 08 2016 Troy Dawson <tdawson@redhat.com> 3.0.76-1 +- a-o-i: Support openshift_image_tag (smunilla@redhat.com) +- Bug 1324729 - Import xPaas image streams failed during 3.2 installation + (bleanhar@redhat.com) +- Test docker_version_result.stdout when determining if docker should be + installed/downgraded. (abutcher@redhat.com) + * Thu Apr 07 2016 Troy Dawson <tdawson@redhat.com> 3.0.75-1 - First attempt at oadm router module (kwoodson@redhat.com) - Remove openshift_common dep from openshift_storage_nfs (abutcher@redhat.com) diff --git a/playbooks/adhoc/bootstrap-fedora.yml b/playbooks/adhoc/bootstrap-fedora.yml index 471c41f16..b380a74d6 100644 --- a/playbooks/adhoc/bootstrap-fedora.yml +++ b/playbooks/adhoc/bootstrap-fedora.yml @@ -1,4 +1,4 @@ - hosts: OSEv3 tasks: - name: install python and deps for ansible modules - raw: dnf install -y python2 python2-dnf libselinux-python libsemanage-python + raw: dnf install -y python2 python2-dnf libselinux-python libsemanage-python python2-firewall diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-cluster/additional_config.yml index 1ac78468a..44bf962c9 100644 --- a/playbooks/common/openshift-cluster/additional_config.yml +++ b/playbooks/common/openshift-cluster/additional_config.yml @@ -49,8 +49,6 @@ openshift_serviceaccounts_namespace: default openshift_serviceaccounts_sccs: - privileged - - role: openshift_router - when: deploy_infra | bool - role: openshift_registry registry_volume_claim: "{{ openshift.hosted.registry.storage.volume.name }}-claim" when: deploy_infra | bool and attach_registry_volume | bool diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 2411e7360..6f908fa7f 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -34,3 +34,5 @@ - include: additional_config.yml - include: ../openshift-node/config.yml + +- include: openshift_hosted.yml diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml new file mode 100644 index 000000000..1cbc0f544 --- /dev/null +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -0,0 +1,5 @@ +- name: Create Hosted Resources + hosts: oo_first_master + roles: + - role: openshift_hosted + openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" diff --git a/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh b/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh index 7a1edf38f..96944a78b 100644 --- a/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh +++ b/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh @@ -3,19 +3,19 @@ # Here we don't really care if this is a master, api, controller or node image. # We just need to know the version of one of them. unit_file=$(ls /etc/systemd/system/${1}*.service | head -n1) -installed_container_name=$(basename -s .service ${unit_file}) -installed=$(docker exec ${installed_container_name} openshift version 2> /dev/null | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v') if [ ${1} == "origin" ]; then image_name="openshift/origin" elif grep aep $unit_file 2>&1 > /dev/null; then - image_name="aep3/aep" + image_name="aep3/node" elif grep openshift3 $unit_file 2>&1 > /dev/null; then - image_name="openshift3/ose" + image_name="openshift3/node" fi +installed=$(docker run --rm --entrypoint=/bin/openshift ${image_name} version 2> /dev/null | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v') + docker pull ${image_name} 2>&1 > /dev/null -available=$(docker run --rm ${image_name} version 2> /dev/null | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v') +available=$(docker run --rm --entrypoint=/bin/openshift ${image_name} version 2> /dev/null | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v') echo "---" echo "curr_version: ${installed}" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml index 6e6f31195..21480ba55 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml @@ -38,7 +38,7 @@ msg: > This upgrade does not support Pacemaker: https://docs.openshift.{{ g_pacemaker_upgrade_url_segment }}/install_config/upgrading/pacemaker_to_native_ha.html - when: openshift.master.cluster_method == 'pacemaker' + when: openshift.master.cluster_method is defined and openshift.master.cluster_method == 'pacemaker' - fail: msg: > @@ -57,6 +57,11 @@ roles: - openshift_facts tasks: + - openshift_facts: + role: master + local_facts: + ha: "{{ groups.oo_masters_to_config | length > 1 }}" + - name: Ensure Master is running service: name: "{{ openshift.common.service_type }}-master" @@ -77,11 +82,6 @@ state: started enabled: yes when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool - post_tasks: - - openshift_facts: - role: master - local_facts: - ha: "{{ groups.oo_masters_to_config | length > 1 }}" - name: Verify upgrade can proceed hosts: oo_nodes_to_config @@ -105,8 +105,12 @@ - include: ../../../../../roles/openshift_master/handlers/main.yml - include: ../../../../../roles/openshift_node/handlers/main.yml roles: + # We want the cli role to evaluate so that the containerized oc/oadm wrappers + # are modified to use the correct image tag. However, this can trigger a + # docker restart if new configuration is laid down which would immediately + # pull the latest image and defeat the purpose of these tasks. - openshift_cli - tasks: + pre_tasks: - name: Clean package cache command: "{{ ansible_pkg_mgr }} clean all" when: not openshift.common.is_atomic | bool @@ -147,6 +151,10 @@ - fail: msg: Verifying the correct version was found + when: g_aos_versions.curr_version == "" + + - fail: + msg: Verifying the correct version was found when: verify_upgrade_version is defined and g_new_version != verify_upgrade_version - include_vars: ../../../../../roles/openshift_master/vars/main.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml index 041ad5a9a..eb12fcabe 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml @@ -1,33 +1,4 @@ --- -# This is a workaround for authenticated registries -- name: Download new images - hosts: oo_nodes_to_config - roles: - - openshift_facts - tasks: - - name: Pull Images - command: > - docker pull {{ item }}:v{{ g_new_version }} - with_items: - - "{{ openshift.node.node_image }}" - - "{{ openshift.node.ovs_image }}" - - "{{ openshift.common.pod_image }}" - - "{{ openshift.common.router_image }}" - - "{{ openshift.common.registry_image }}" - - "{{ openshift.common.deployer_image }}" - -# This is a workaround for authenticated registries -- name: Download new images - hosts: oo_masters_to_config - roles: - - openshift_facts - tasks: - - name: Pull Images - command: > - docker pull {{ item }}:v{{ g_new_version }} - with_items: - - "{{ openshift.master.master_image }}" - ############################################################################### # The restart playbook should be run after this playbook completes. ############################################################################### @@ -40,6 +11,20 @@ - include: docker_upgrade.yml when: not openshift.common.is_atomic | bool +# The cli image is used by openshift_facts to determine the currently installed +# version. We need to explicitly pull the latest image to handle cases where +# the locally cached 'latest' tag is older the g_new_version. +- name: Download cli image + hosts: oo_masters_to_config:oo_nodes_to_config + roles: + - openshift_facts + tasks: + - name: Pull Images + command: > + docker pull {{ item }}:latest + with_items: + - "{{ openshift.common.cli_image }}" + ############################################################################### # Upgrade Masters ############################################################################### diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 2d207a1a9..89648b321 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -7,23 +7,27 @@ register: docker_version_result changed_when: false +- stat: path=/etc/sysconfig/docker-storage + register: docker_storage_check + +- name: Remove deferred deletion for downgrades from 1.9 + command: > + sed -i 's/--storage-opt dm.use_deferred_deletion=true//' /etc/sysconfig/docker-storage + when: docker_storage_check.stat.exists | bool and not docker_version_result | skipped and docker_version_result.stdout | default('0.0', True) | version_compare('1.9', '>=') and docker_version | version_compare('1.9', '<') + - name: Downgrade docker if necessary command: "{{ ansible_pkg_mgr }} downgrade -y docker-{{ docker_version }}" register: docker_downgrade_result when: not docker_version_result | skipped and docker_version_result.stdout | default('0.0', True) | version_compare(docker_version, 'gt') - name: Install docker - action: "{{ ansible_pkg_mgr }} name=docker{{ '-' + docker_version if docker_version != '' else '' }} state=present" - when: not openshift.common.is_atomic | bool and not docker_downgrade_result | changed + action: "{{ ansible_pkg_mgr }} name=docker{{ '-' + docker_version if docker_version is defined and docker_version != '' else '' }} state=present" + when: not openshift.common.is_atomic | bool and not docker_version_result | skipped and docker_version_result.stdout | default('0.0', True) | version_compare(docker_version, 'lt') -- stat: path=/etc/sysconfig/docker - register: docker_check - when: docker_downgrade_result | changed - -- name: Remove deferred deletion for downgrades from 1.9 - command: > - sed -i 's/--storage-opt dm.use_deferred_deletion=true//' /etc/sysconfig/docker-storage - when: docker_downgrade_result | changed and docker_check.stat.exists | bool and docker_version_result.stdout | default('0.0', True) | version_compare('1.9', '>=') and docker_version | version_compare('1.9', '<') +# We're getting ready to start docker. This is a workaround for cases where it +# seems a package install/upgrade/downgrade has rebooted docker and crashed it. +- name: Reset docker service state + command: systemctl reset-failed docker.service - name: enable and start the docker service service: diff --git a/roles/etcd/templates/etcd.docker.service b/roles/etcd/templates/etcd.docker.service index b8dbefa64..cf957ede8 100644 --- a/roles/etcd/templates/etcd.docker.service +++ b/roles/etcd/templates/etcd.docker.service @@ -11,6 +11,7 @@ ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v /var/lib/etcd:/v ExecStop=/usr/bin/docker stop {{ etcd_service }} SyslogIdentifier=etcd_container Restart=always +RestartSec=5s [Install] WantedBy=docker.service diff --git a/roles/openshift_cloud_provider/tasks/main.yml b/roles/openshift_cloud_provider/tasks/main.yml index 471fd686b..d7442924f 100644 --- a/roles/openshift_cloud_provider/tasks/main.yml +++ b/roles/openshift_cloud_provider/tasks/main.yml @@ -2,14 +2,12 @@ - name: Set cloud provider facts openshift_facts: role: cloudprovider - openshift_env: "{{ item | oo_openshift_env }}" + openshift_env: "{{ hostvars[inventory_hostname] + | oo_merge_dicts(hostvars) + | oo_openshift_env }}" openshift_env_structures: - 'openshift.cloudprovider.aws.*' - 'openshift.cloudprovider.openstack.*' - no_log: true - with_items: - - "{{ hostvars[inventory_hostname] }}" - - "{{ hostvars }}" - name: Create cloudprovider config dir file: diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml index b6074ff64..eda43b9f8 100644 --- a/roles/openshift_common/tasks/main.yml +++ b/roles/openshift_common/tasks/main.yml @@ -33,15 +33,11 @@ # versions or image tags. openshift_common's usage requires that it be a RPM # version and openshift_cli expects it to be an image tag. - name: Install the base package for versioning - action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}{{ openshift_version | default('') | oo_image_tag_to_rpm_version }} state=present" + action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}{{ openshift_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }} state=present" when: not openshift.common.is_containerized | bool -# This invocation also updates the version facts which are necessary -# for setting the hostname below. -- name: openshift_facts +- name: Set version facts openshift_facts: - role: hosted - openshift_env: "{{ hostvars[inventory_hostname] | oo_openshift_env }}" # For enterprise versions < 3.1 and origin versions < 1.1 we want to set the # hostname by default. diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml index eb762e33f..7ea359af1 100644 --- a/roles/openshift_docker_facts/tasks/main.yml +++ b/roles/openshift_docker_facts/tasks/main.yml @@ -46,10 +46,24 @@ register: common_version failed_when: false changed_when: false - when: not openshift.common.is_atomic | bool + when: not openshift.common.is_containerized | bool + +- set_fact: + l_common_version: "{{ openshift.common.image_tag | default('0.0', True) | oo_image_tag_to_rpm_version }}" + when: openshift.common.is_containerized | bool + +- set_fact: + l_common_version: "{{ common_version.stdout | default('0.0', True) }}" + when: not openshift.common.is_containerized | bool - name: Set docker version to be installed set_fact: docker_version: "{{ '1.8.2' }}" - when: " ( common_version.stdout | default('0.0', True) | version_compare('3.2','<') and openshift.common.service_type == 'atomic-openshift' ) or - ( common_version.stdout | default('0.0', True) | version_compare('1.1.4','<') and openshift.common.service_type == 'origin' )" + when: " ( l_common_version | version_compare('3.2','<') and openshift.common.service_type == 'atomic-openshift' ) or + ( l_common_version | version_compare('1.1.4','<') and openshift.common.service_type == 'origin' )" + +- name: Set docker version to be installed + set_fact: + docker_version: "{{ '1.9.1' }}" + when: " ( l_common_version | version_compare('3.2','>') and openshift.common.service_type == 'atomic-openshift' ) or + ( l_common_version | version_compare('1.2','>') and openshift.common.service_type == 'origin' )" diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 4313790bf..6de2c1496 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -63,7 +63,16 @@ def migrate_local_facts(facts): migrated_facts = copy.deepcopy(facts) return migrate_docker_facts(migrated_facts) - +def migrate_hosted_facts(facts): + """ Apply migrations for master facts """ + if 'master' in facts: + if 'router_selector' in facts['master']: + if 'hosted' not in facts: + facts['hosted'] = {} + if 'router' not in facts['hosted']: + facts['hosted']['router'] = {} + facts['hosted']['router']['selector'] = facts['master'].pop('router_selector') + return facts def first_ip(network): """ Return the first IPv4 address in network @@ -394,7 +403,7 @@ def set_node_schedulability(facts): facts['node']['schedulable'] = True return facts -def set_master_selectors(facts): +def set_selectors(facts): """ Set selectors facts if not already present in facts dict Args: facts (dict): existing facts @@ -403,16 +412,21 @@ def set_master_selectors(facts): facts if they were not already present """ + deployment_type = facts['common']['deployment_type'] + if deployment_type == 'online': + selector = "type=infra" + else: + selector = "region=infra" + + if 'hosted' not in facts: + facts['hosted'] = {} + if 'router' not in facts['hosted']: + facts['hosted']['router'] = {} + if 'selector' not in facts['hosted']['router'] or facts['hosted']['router']['selector'] in [None, 'None']: + facts['hosted']['router']['selector'] = selector + if 'master' in facts: if 'infra_nodes' in facts['master']: - deployment_type = facts['common']['deployment_type'] - if deployment_type == 'online': - selector = "type=infra" - else: - selector = "region=infra" - - if 'router_selector' not in facts['master']: - facts['master']['router_selector'] = selector if 'registry_selector' not in facts['master']: facts['master']['registry_selector'] = selector return facts @@ -1066,13 +1080,9 @@ def get_openshift_version(facts, cli_image=None): elif 'node' in facts: container = facts['common']['service_type'] + '-node' - if container is not None: - exit_code, output, _ = module.run_command(['docker', 'exec', container, 'openshift', 'version']) - # if for some reason the container is installed but not running - # we'll fall back to using docker run later in this method. - if exit_code == 0: - version = parse_openshift_version(output) - + # Try to get the version fromthe available cli image _before_ resorting + # to exec'ing in to the running container. This is to be more fault + # tolerant in environments where the container is not running. if version is None and cli_image is not None: # Assume we haven't installed the environment yet and we need # to query the latest image, but only if docker is installed @@ -1080,6 +1090,13 @@ def get_openshift_version(facts, cli_image=None): exit_code, output, _ = module.run_command(['docker', 'run', '--rm', cli_image, 'version']) version = parse_openshift_version(output) + if version is None and container is not None: + exit_code, output, _ = module.run_command(['docker', 'exec', container, 'openshift', 'version']) + # if for some reason the container is installed but not running + # we'll fall back to using docker run later in this method. + if exit_code == 0: + version = parse_openshift_version(output) + return version def parse_openshift_version(output): @@ -1343,7 +1360,7 @@ def set_container_facts_if_unset(facts): facts['common']['admin_binary'] = '/usr/local/bin/oadm' facts['common']['client_binary'] = '/usr/local/bin/oc' openshift_version = get_openshift_version(facts, cli_image) - if openshift_version is not None: + if openshift_version is not None and openshift_version is not "": base_version = openshift_version.split('-')[0] facts['common']['image_tag'] = "v" + base_version @@ -1485,7 +1502,7 @@ class OpenShiftFacts(object): facts = set_flannel_facts_if_unset(facts) facts = set_nuage_facts_if_unset(facts) facts = set_node_schedulability(facts) - facts = set_master_selectors(facts) + facts = set_selectors(facts) facts = set_metrics_facts_if_unset(facts) facts = set_identity_providers_if_unset(facts) facts = set_sdn_facts_if_unset(facts, self.system_facts) @@ -1579,23 +1596,25 @@ class OpenShiftFacts(object): if 'cloudprovider' in roles: defaults['cloudprovider'] = dict(kind=None) - defaults['hosted'] = dict( - registry=dict( - storage=dict( - kind=None, - volume=dict( - name='registry', - size='5Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)'), - host=None, - access_modes=['ReadWriteMany'], - create_pv=True - ) + if 'hosted' in roles or self.role == 'hosted': + defaults['hosted'] = dict( + registry=dict( + storage=dict( + kind=None, + volume=dict( + name='registry', + size='5Gi' + ), + nfs=dict( + directory='/exports', + options='*(rw,root_squash)'), + host=None, + access_modes=['ReadWriteMany'], + create_pv=True + ) + ), + router=dict() ) - ) return defaults diff --git a/roles/openshift_hosted/README.md b/roles/openshift_hosted/README.md new file mode 100644 index 000000000..633ec0937 --- /dev/null +++ b/roles/openshift_hosted/README.md @@ -0,0 +1,55 @@ +OpenShift Hosted +================ + +OpenShift Hosted Resources + +* OpenShift Router + +Requirements +------------ + +This role requires a running OpenShift cluster with nodes labeled to +match the openshift_hosted_router_selector (default: region=infra). + +Role Variables +-------------- + +From this role: + +| Name | Default value | Description | +|-------------------------------------|------------------------------------------|----------------------------------------------------------------------------------------------------------------------| +| openshift_hosted_router_certificate | None | Dictionary containing "certfile" and "keyfile" keys with values containing paths to local certificate files. | +| openshift_hosted_router_registryurl | 'openshift3/ose-${component}:${version}' | The image to base the OpenShift router on. | +| openshift_hosted_router_replicas | Number of nodes matching selector | The number of replicas to configure. | +| openshift_hosted_router_selector | region=infra | Node selector used when creating router. The OpenShift router will only be deployed to nodes matching this selector. | + +Dependencies +------------ + +* openshift_common +* openshift_hosted_facts + +Example Playbook +---------------- + +``` +- name: Create hosted resources + hosts: oo_first_master + roles: + - role: openshift_hosted + openshift_hosted_router_certificate: + certfile: /path/to/my-router.crt + keyfile: /path/to/my-router.key + openshift_hosted_router_registryurl: 'registry.access.redhat.com/openshift3/ose-haproxy-router:v3.0.2.0' + openshift_hosted_router_selector: 'type=infra' +``` + +License +------- + +Apache License, Version 2.0 + +Author Information +------------------ + +Red Hat openshift@redhat.com diff --git a/roles/openshift_router/handlers/main.yml b/roles/openshift_hosted/handlers/main.yml index e69de29bb..e69de29bb 100644 --- a/roles/openshift_router/handlers/main.yml +++ b/roles/openshift_hosted/handlers/main.yml diff --git a/roles/openshift_router/meta/main.yml b/roles/openshift_hosted/meta/main.yml index c2b0777b5..75dfc24c3 100644 --- a/roles/openshift_router/meta/main.yml +++ b/roles/openshift_hosted/meta/main.yml @@ -11,5 +11,6 @@ galaxy_info: - 7 categories: - cloud - dependencies: - - openshift_facts +dependencies: +- openshift_common +- openshift_hosted_facts diff --git a/roles/openshift_hosted/tasks/main.yml b/roles/openshift_hosted/tasks/main.yml new file mode 100644 index 000000000..d42a4e365 --- /dev/null +++ b/roles/openshift_hosted/tasks/main.yml @@ -0,0 +1,3 @@ +--- + +- include: router.yml diff --git a/roles/openshift_hosted/tasks/router.yml b/roles/openshift_hosted/tasks/router.yml new file mode 100644 index 000000000..6a36f74b2 --- /dev/null +++ b/roles/openshift_hosted/tasks/router.yml @@ -0,0 +1,64 @@ +--- +- fail: + msg: "Both 'certfile' and 'keyfile' keys must be specified when supplying the openshift_hosted_router_certificate variable." + when: openshift_hosted_router_certificate is defined and ('certfile' not in openshift_hosted_router_certificate or 'keyfile' not in openshift_hosted_router_certificate) + +- name: Read router certificate and key + slurp: + src: "{{ item }}" + register: openshift_router_certificate_output + with_items: + - "{{ openshift_hosted_router_certificate.certfile }}" + - "{{ openshift_hosted_router_certificate.keyfile }}" + delegate_to: localhost + when: openshift_hosted_router_certificate is defined + +- name: Persist certificate contents + openshift_facts: + role: hosted + openshift_env: + openshift_hosted_router_certificate_contents: "{% for certificate in openshift_router_certificate_output.results -%}{{ certificate.content | b64decode }}{% endfor -%}" + when: openshift_hosted_router_certificate is defined + +- name: Create PEM certificate + copy: + content: "{{ openshift.hosted.router.certificate.contents }}" + dest: "{{ openshift_master_config_dir }}/openshift-router.pem" + mode: 0600 + when: openshift.hosted.router.certificate | default(None) != None + +- name: Retrieve list of openshift nodes + command: > + {{ openshift.common.client_binary }} --api-version='v1' -o json + get nodes -n default --config={{ openshift.common.config_base }}/master/admin.kubeconfig + register: openshift_hosted_router_nodes_json + when: openshift.hosted.router.replicas | default(None) == None + +- name: Collect nodes matching router selector + set_fact: + openshift_hosted_router_nodes: > + {{ (openshift_hosted_router_nodes_json.stdout|from_json)['items'] + | oo_oc_nodes_matching_selector(openshift.hosted.router.selector) }} + when: openshift.hosted.router.replicas | default(None) == None + +- name: Create OpenShift router + command: > + {{ openshift.common.admin_binary }} router --create + {% if openshift.hosted.router.replicas | default(None) != None -%} + --replicas={{ openshift.hosted.router.replicas }} + {% else -%} + --replicas={{ openshift_hosted_router_nodes | length }} + {% endif %} + {% if openshift.hosted.router.certificate | default(None) != None -%} + --default-cert={{ openshift_master_config_dir }}/openshift-router.pem + {% endif -%} + --namespace=default + --service-account=router + --selector='{{ openshift.hosted.router.selector }}' + --credentials={{ openshift_master_config_dir }}/openshift-router.kubeconfig + {% if openshift.hosted.router.registryurl | default(None)!= None -%} + --images='{{ openshift.hosted.router.registryurl }}' + {% endif -%} + register: openshift_hosted_router_results + changed_when: "'service exists' not in openshift_hosted_router_results.stdout" + when: openshift.hosted.router.replicas | default(None) != None or (openshift_hosted_router_nodes is defined and openshift_hosted_router_nodes | length > 0) diff --git a/roles/openshift_hosted/vars/main.yml b/roles/openshift_hosted/vars/main.yml new file mode 100644 index 000000000..9967e26f4 --- /dev/null +++ b/roles/openshift_hosted/vars/main.yml @@ -0,0 +1,2 @@ +--- +openshift_master_config_dir: "{{ openshift.common.config_base }}/master" diff --git a/roles/openshift_hosted_facts/meta/main.yml b/roles/openshift_hosted_facts/meta/main.yml new file mode 100644 index 000000000..dd2de07bc --- /dev/null +++ b/roles/openshift_hosted_facts/meta/main.yml @@ -0,0 +1,15 @@ +--- +galaxy_info: + author: Andrew Butcher + description: OpenShift Hosted Facts + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.9 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud +dependencies: +- role: openshift_facts diff --git a/roles/openshift_hosted_facts/tasks/main.yml b/roles/openshift_hosted_facts/tasks/main.yml new file mode 100644 index 000000000..f595e1e81 --- /dev/null +++ b/roles/openshift_hosted_facts/tasks/main.yml @@ -0,0 +1,7 @@ +--- +- name: Set hosted facts + openshift_facts: + role: hosted + openshift_env: "{{ hostvars[inventory_hostname] + | oo_merge_dicts(hostvars) + | oo_openshift_env }}" diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml index 70c7ef4e4..f7dfb11f7 100644 --- a/roles/openshift_master/handlers/main.yml +++ b/roles/openshift_master/handlers/main.yml @@ -1,17 +1,17 @@ --- - name: restart master service: name={{ openshift.common.service_type }}-master state=restarted - when: (openshift.master.ha is defined and not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool)) + when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool)) notify: Verify API Server - name: restart master api service: name={{ openshift.common.service_type }}-master-api state=restarted - when: (openshift.master.ha is defined and openshift_master_ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' notify: Verify API Server - name: restart master controllers service: name={{ openshift.common.service_type }}-master-controllers state=restarted - when: (openshift.master.ha is defined and openshift_master_ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' - name: Verify API Server # Using curl here since the uri module requires python-httplib2 and diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml index 7ab913eea..0d4241e2c 100644 --- a/roles/openshift_master/meta/main.yml +++ b/roles/openshift_master/meta/main.yml @@ -16,3 +16,4 @@ dependencies: - role: openshift_cli - role: openshift_cloud_provider - role: openshift_master_facts +- role: openshift_hosted_facts diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 23994cdcf..18a42bf93 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -24,7 +24,7 @@ when: openshift_master_ha | bool and openshift_master_cluster_method == "pacemaker" and openshift.common.is_containerized | bool - name: Install Master package - action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-master{{ openshift_version }} state=present" + action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-master{{ openshift_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }} state=present" when: not openshift.common.is_containerized | bool - name: Pull master image diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 index 66d76978e..df1dbb85e 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 @@ -20,6 +20,7 @@ LimitCORE=infinity WorkingDirectory={{ openshift.common.data_dir }} SyslogIdentifier={{ openshift.common.service_type }}-master-api Restart=always +RestartSec=5s [Install] WantedBy=docker.service diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 index 75759c133..5ff2edae4 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 @@ -19,6 +19,7 @@ LimitCORE=infinity WorkingDirectory={{ openshift.common.data_dir }} SyslogIdentifier={{ openshift.common.service_type }}-master-controllers Restart=on-failure +RestartSec=5s [Install] WantedBy=docker.service diff --git a/roles/openshift_master/templates/docker/master.docker.service.j2 b/roles/openshift_master/templates/docker/master.docker.service.j2 index d02fc5342..97f698b68 100644 --- a/roles/openshift_master/templates/docker/master.docker.service.j2 +++ b/roles/openshift_master/templates/docker/master.docker.service.j2 @@ -12,6 +12,7 @@ ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.c ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master Restart=always +RestartSec=5s [Install] WantedBy=docker.service diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 index e61418eb6..1f50fdce1 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 @@ -15,6 +15,7 @@ LimitNOFILE=131072 LimitCORE=infinity WorkingDirectory={{ openshift.common.data_dir }} SyslogIdentifier=atomic-openshift-master-api +RestartSec=5s [Install] WantedBy=multi-user.target diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 index ba4c1e0e8..bf62696f0 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 @@ -20,6 +20,7 @@ LimitCORE=infinity WorkingDirectory={{ openshift.common.data_dir }} SyslogIdentifier={{ openshift.common.service_type }}-master-controllers Restart=on-failure +RestartSec=5s [Install] WantedBy=multi-user.target diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service index ff5a97fe0..30f09b250 100644 --- a/roles/openshift_node/templates/openshift.docker.node.service +++ b/roles/openshift_node/templates/openshift.docker.node.service @@ -17,6 +17,7 @@ ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node SyslogIdentifier={{ openshift.common.service_type }}-node Restart=always +RestartSec=5s [Install] WantedBy=docker.service diff --git a/roles/openshift_node/templates/openvswitch.docker.service b/roles/openshift_node/templates/openvswitch.docker.service index 8052a3a39..1e1f8967d 100644 --- a/roles/openshift_node/templates/openvswitch.docker.service +++ b/roles/openshift_node/templates/openvswitch.docker.service @@ -11,6 +11,7 @@ ExecStartPost=/usr/bin/sleep 5 ExecStop=/usr/bin/docker stop openvswitch SyslogIdentifier=openvswitch Restart=always +RestartSec=5s [Install] WantedBy=docker.service diff --git a/roles/openshift_persistent_volumes/meta/main.yml b/roles/openshift_persistent_volumes/meta/main.yml index d9f6fc01a..25e5a38dd 100644 --- a/roles/openshift_persistent_volumes/meta/main.yml +++ b/roles/openshift_persistent_volumes/meta/main.yml @@ -10,4 +10,5 @@ galaxy_info: versions: - 7 dependencies: -- { role: openshift_common } +- role: openshift_common +- role: openshift_hosted_facts diff --git a/roles/openshift_registry/meta/main.yml b/roles/openshift_registry/meta/main.yml index b220a020e..e6db8c537 100644 --- a/roles/openshift_registry/meta/main.yml +++ b/roles/openshift_registry/meta/main.yml @@ -11,5 +11,5 @@ galaxy_info: - 7 categories: - cloud - dependencies: - - openshift_facts +dependencies: +- role: openshift_hosted_facts diff --git a/roles/openshift_router/README.md b/roles/openshift_router/README.md deleted file mode 100644 index d490e1038..000000000 --- a/roles/openshift_router/README.md +++ /dev/null @@ -1,35 +0,0 @@ -OpenShift Container Router -========================== - -OpenShift Router service installation - -Requirements ------------- - -Running OpenShift cluster - -Role Variables --------------- - -From this role: -| Name | Default value | | -|--------------------|-------------------------------------------------------|---------------------| -| | | | - -Dependencies ------------- - -Example Playbook ----------------- - -TODO - -License -------- - -Apache License, Version 2.0 - -Author Information ------------------- - -Red Hat openshift@redhat.com diff --git a/roles/openshift_router/tasks/main.yml b/roles/openshift_router/tasks/main.yml deleted file mode 100644 index 40365d04d..000000000 --- a/roles/openshift_router/tasks/main.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- name: Deploy OpenShift Router - command: > - {{ openshift.common.admin_binary }} router - --create --replicas={{ openshift.master.infra_nodes | length }} - --namespace=default - --service-account=router {{ ortr_selector }} - --credentials={{ openshift_master_config_dir }}/openshift-router.kubeconfig {{ ortr_images }} - register: ortr_results - changed_when: "'service exists' not in ortr_results.stdout" diff --git a/roles/openshift_router/vars/main.yml b/roles/openshift_router/vars/main.yml deleted file mode 100644 index bcac12068..000000000 --- a/roles/openshift_router/vars/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -openshift_master_config_dir: "{{ openshift.common.config_base }}/master" -ortr_images: "--images='{{ openshift.master.registry_url }}'" -ortr_selector: "--selector='{{ openshift.master.router_selector }}'" diff --git a/roles/openshift_storage_nfs/meta/main.yml b/roles/openshift_storage_nfs/meta/main.yml index c6815d697..d675e0750 100644 --- a/roles/openshift_storage_nfs/meta/main.yml +++ b/roles/openshift_storage_nfs/meta/main.yml @@ -10,6 +10,6 @@ galaxy_info: versions: - 7 dependencies: -- { role: os_firewall } -- { role: openshift_facts } -- { role: openshift_repos } +- role: os_firewall +- role: openshift_hosted_facts +- role: openshift_repos diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index 28b157e8e..cf2ca51ca 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -130,7 +130,9 @@ def write_inventory_vars(base_inventory, multiple_masters, proxy): if CFG.settings.get('master_routingconfig_subdomain', False): base_inventory.write( "openshift_master_default_subdomain={}\n".format(CFG.settings['master_routingconfig_subdomain'])) - + if CFG.settings.get('variant_version', None) == '3.1': + #base_inventory.write('openshift_image_tag=v{}\n'.format(CFG.settings.get('variant_version'))) + base_inventory.write('openshift_image_tag=v{}\n'.format('3.1.1.6')) def write_host(host, inventory, schedulable=None): |