diff options
165 files changed, 644 insertions, 1839 deletions
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index f9564499d..45795d097 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -710,229 +710,6 @@ def oo_openshift_env(hostvars): return facts -# pylint: disable=too-many-branches, too-many-nested-blocks, too-many-statements, too-many-locals -def oo_component_persistent_volumes(hostvars, groups, component, subcomponent=None): - """ Generate list of persistent volumes based on oo_openshift_env - storage options set in host variables for a specific component. - """ - if not issubclass(type(hostvars), dict): - raise errors.AnsibleFilterError("|failed expects hostvars is a dict") - if not issubclass(type(groups), dict): - raise errors.AnsibleFilterError("|failed expects groups is a dict") - - persistent_volume = None - - if component in hostvars['openshift']: - if subcomponent is not None: - storage_component = hostvars['openshift'][component][subcomponent] - else: - storage_component = hostvars['openshift'][component] - - if 'storage' in storage_component: - params = storage_component['storage'] - kind = params['kind'] - if 'create_pv' in params: - create_pv = params['create_pv'] - if kind is not None and create_pv: - if kind == 'nfs': - host = params['host'] - if host is None: - if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0: - host = groups['oo_nfs_to_config'][0] - else: - raise errors.AnsibleFilterError("|failed no storage host detected") - directory = params['nfs']['directory'] - volume = params['volume']['name'] - path = directory + '/' + volume - size = params['volume']['size'] - if 'labels' in params: - labels = params['labels'] - else: - labels = dict() - access_modes = params['access']['modes'] - persistent_volume = dict( - name="{0}-volume".format(volume), - capacity=size, - labels=labels, - access_modes=access_modes, - storage=dict( - nfs=dict( - server=host, - path=path))) - - elif kind == 'openstack': - volume = params['volume']['name'] - size = params['volume']['size'] - if 'labels' in params: - labels = params['labels'] - else: - labels = dict() - access_modes = params['access']['modes'] - filesystem = params['openstack']['filesystem'] - volume_id = params['openstack']['volumeID'] - persistent_volume = dict( - name="{0}-volume".format(volume), - capacity=size, - labels=labels, - access_modes=access_modes, - storage=dict( - cinder=dict( - fsType=filesystem, - volumeID=volume_id))) - - elif kind == 'glusterfs': - volume = params['volume']['name'] - size = params['volume']['size'] - if 'labels' in params: - labels = params['labels'] - else: - labels = dict() - access_modes = params['access']['modes'] - endpoints = params['glusterfs']['endpoints'] - path = params['glusterfs']['path'] - read_only = params['glusterfs']['readOnly'] - persistent_volume = dict( - name="{0}-volume".format(volume), - capacity=size, - labels=labels, - access_modes=access_modes, - storage=dict( - glusterfs=dict( - endpoints=endpoints, - path=path, - readOnly=read_only))) - - elif not (kind == 'object' or kind == 'dynamic'): - msg = "|failed invalid storage kind '{0}' for component '{1}'".format( - kind, - component) - raise errors.AnsibleFilterError(msg) - return persistent_volume - - -# pylint: disable=too-many-branches, too-many-nested-blocks, too-many-statements -def oo_persistent_volumes(hostvars, groups, persistent_volumes=None): - """ Generate list of persistent volumes based on oo_openshift_env - storage options set in host variables. - """ - if not issubclass(type(hostvars), dict): - raise errors.AnsibleFilterError("|failed expects hostvars is a dict") - if not issubclass(type(groups), dict): - raise errors.AnsibleFilterError("|failed expects groups is a dict") - if persistent_volumes is not None and not issubclass(type(persistent_volumes), list): - raise errors.AnsibleFilterError("|failed expects persistent_volumes is a list") - - if persistent_volumes is None: - persistent_volumes = [] - if 'hosted' in hostvars['openshift']: - for component in hostvars['openshift']['hosted']: - persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'hosted', component) - if persistent_volume is not None: - persistent_volumes.append(persistent_volume) - - if 'logging' in hostvars['openshift']: - persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'logging') - if persistent_volume is not None: - persistent_volumes.append(persistent_volume) - if 'loggingops' in hostvars['openshift']: - persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'loggingops') - if persistent_volume is not None: - persistent_volumes.append(persistent_volume) - if 'metrics' in hostvars['openshift']: - persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'metrics') - if persistent_volume is not None: - persistent_volumes.append(persistent_volume) - if 'prometheus' in hostvars['openshift']: - persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'prometheus') - if persistent_volume is not None: - persistent_volumes.append(persistent_volume) - if 'alertmanager' in hostvars['openshift']['prometheus']: - persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'prometheus', 'alertmanager') - if persistent_volume is not None: - persistent_volumes.append(persistent_volume) - if 'alertbuffer' in hostvars['openshift']['prometheus']: - persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'prometheus', 'alertbuffer') - if persistent_volume is not None: - persistent_volumes.append(persistent_volume) - return persistent_volumes - - -def oo_component_pv_claims(hostvars, component, subcomponent=None): - """ Generate list of persistent volume claims based on oo_openshift_env - storage options set in host variables for a speicific component. - """ - if not issubclass(type(hostvars), dict): - raise errors.AnsibleFilterError("|failed expects hostvars is a dict") - - if component in hostvars['openshift']: - if subcomponent is not None: - storage_component = hostvars['openshift'][component][subcomponent] - else: - storage_component = hostvars['openshift'][component] - - if 'storage' in storage_component: - params = storage_component['storage'] - kind = params['kind'] - if 'create_pv' in params: - if 'create_pvc' in params: - create_pv = params['create_pv'] - create_pvc = params['create_pvc'] - if kind not in [None, 'object'] and create_pv and create_pvc: - volume = params['volume']['name'] - size = params['volume']['size'] - access_modes = params['access']['modes'] - persistent_volume_claim = dict( - name="{0}-claim".format(volume), - capacity=size, - access_modes=access_modes) - return persistent_volume_claim - return None - - -def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None): - """ Generate list of persistent volume claims based on oo_openshift_env - storage options set in host variables. - """ - if not issubclass(type(hostvars), dict): - raise errors.AnsibleFilterError("|failed expects hostvars is a dict") - if persistent_volume_claims is not None and not issubclass(type(persistent_volume_claims), list): - raise errors.AnsibleFilterError("|failed expects persistent_volume_claims is a list") - - if persistent_volume_claims is None: - persistent_volume_claims = [] - if 'hosted' in hostvars['openshift']: - for component in hostvars['openshift']['hosted']: - persistent_volume_claim = oo_component_pv_claims(hostvars, 'hosted', component) - if persistent_volume_claim is not None: - persistent_volume_claims.append(persistent_volume_claim) - - if 'logging' in hostvars['openshift']: - persistent_volume_claim = oo_component_pv_claims(hostvars, 'logging') - if persistent_volume_claim is not None: - persistent_volume_claims.append(persistent_volume_claim) - if 'loggingops' in hostvars['openshift']: - persistent_volume_claim = oo_component_pv_claims(hostvars, 'loggingops') - if persistent_volume_claim is not None: - persistent_volume_claims.append(persistent_volume_claim) - if 'metrics' in hostvars['openshift']: - persistent_volume_claim = oo_component_pv_claims(hostvars, 'metrics') - if persistent_volume_claim is not None: - persistent_volume_claims.append(persistent_volume_claim) - if 'prometheus' in hostvars['openshift']: - persistent_volume_claim = oo_component_pv_claims(hostvars, 'prometheus') - if persistent_volume_claim is not None: - persistent_volume_claims.append(persistent_volume_claim) - if 'alertmanager' in hostvars['openshift']['prometheus']: - persistent_volume_claim = oo_component_pv_claims(hostvars, 'prometheus', 'alertmanager') - if persistent_volume_claim is not None: - persistent_volume_claims.append(persistent_volume_claim) - if 'alertbuffer' in hostvars['openshift']['prometheus']: - persistent_volume_claim = oo_component_pv_claims(hostvars, 'prometheus', 'alertbuffer') - if persistent_volume_claim is not None: - persistent_volume_claims.append(persistent_volume_claim) - return persistent_volume_claims - - def oo_31_rpm_rename_conversion(rpms, openshift_version=None): """ Filters a list of 3.0 rpms and return the corresponding 3.1 rpms names with proper version (if provided) @@ -1220,8 +997,6 @@ class FilterModule(object): "oo_generate_secret": oo_generate_secret, "oo_nodes_with_label": oo_nodes_with_label, "oo_openshift_env": oo_openshift_env, - "oo_persistent_volumes": oo_persistent_volumes, - "oo_persistent_volume_claims": oo_persistent_volume_claims, "oo_31_rpm_rename_conversion": oo_31_rpm_rename_conversion, "oo_pods_match_component": oo_pods_match_component, "oo_get_hosts_from_hostvars": oo_get_hosts_from_hostvars, diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example index 3a9944ba4..e3b56d7a1 100644 --- a/inventory/byo/hosts.example +++ b/inventory/byo/hosts.example @@ -298,24 +298,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Set cockpit plugins #osm_cockpit_plugins=['cockpit-kubernetes'] -# Native high availability cluster method with optional load balancer. +# Native high availability (default cluster method) # If no lb group is defined, the installer assumes that a load balancer has # been preconfigured. For installation the value of # openshift_master_cluster_hostname must resolve to the load balancer # or to one or all of the masters defined in the inventory if no load # balancer is present. -#openshift_master_cluster_method=native -#openshift_master_cluster_hostname=openshift-ansible.test.example.com -#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com - -# Pacemaker high availability cluster method. -# Pacemaker HA environment must be able to self provision the -# configured VIP. For installation openshift_master_cluster_hostname -# must resolve to the configured VIP. -#openshift_master_cluster_method=pacemaker -#openshift_master_cluster_password=openshift_cluster -#openshift_master_cluster_vip=192.168.133.25 -#openshift_master_cluster_public_vip=192.168.133.25 #openshift_master_cluster_hostname=openshift-ansible.test.example.com #openshift_master_cluster_public_hostname=openshift-ansible.test.example.com diff --git a/playbooks/adhoc/openshift_hosted_logging_efk.yaml b/playbooks/adhoc/openshift_hosted_logging_efk.yaml index 44a2ef534..69b2541bb 100644 --- a/playbooks/adhoc/openshift_hosted_logging_efk.yaml +++ b/playbooks/adhoc/openshift_hosted_logging_efk.yaml @@ -8,7 +8,7 @@ hosts: masters:!masters[0] pre_tasks: - set_fact: - openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" + openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ openshift_master_default_subdomain }}" tasks: - include_role: name: openshift_logging diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 0c2a2c7e8..ed7a7bd1a 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -62,7 +62,6 @@ - origin-master - origin-master-api - origin-master-controllers - - pcsd failed_when: false - hosts: etcd @@ -384,8 +383,6 @@ - origin-excluder - origin-docker-excluder - origin-master - - pacemaker - - pcs register: result until: result | success @@ -456,8 +453,6 @@ - /etc/sysconfig/origin-master-api - /etc/sysconfig/origin-master-controllers - /usr/share/openshift/examples - - /var/lib/pacemaker - - /var/lib/pcsd - /usr/lib/systemd/system/atomic-openshift-master-api.service - /usr/lib/systemd/system/atomic-openshift-master-controllers.service - /usr/lib/systemd/system/origin-master-api.service diff --git a/playbooks/aws/openshift-cluster/hosted.yml b/playbooks/aws/openshift-cluster/hosted.yml index da7ec9d21..1dabae357 100644 --- a/playbooks/aws/openshift-cluster/hosted.yml +++ b/playbooks/aws/openshift-cluster/hosted.yml @@ -4,7 +4,7 @@ - include: ../../openshift-metrics/private/config.yml when: openshift_metrics_install_metrics | default(false) | bool -- include: ../../common/openshift-cluster/openshift_logging.yml +- include: ../../openshift-logging/private/config.yml when: openshift_logging_install_logging | default(false) | bool - include: ../../openshift-prometheus/private/config.yml diff --git a/playbooks/byo/openshift-cluster/openshift-logging.yml b/playbooks/byo/openshift-cluster/openshift-logging.yml index 74e186f33..76bd47c4f 100644 --- a/playbooks/byo/openshift-cluster/openshift-logging.yml +++ b/playbooks/byo/openshift-cluster/openshift-logging.yml @@ -1,9 +1,3 @@ --- -# -# This playbook is a preview of upcoming changes for installing -# Hosted logging on. See inventory/byo/hosts.*.example for the -# currently supported method. -# -- include: ../../init/main.yml - -- include: ../../common/openshift-cluster/openshift_logging.yml +# TODO (rteague): Temporarily leaving this playbook to allow CI tests to operate until CI jobs are updated. +- include: ../../openshift-logging/config.yml diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 2eeb81b86..a8ca5e686 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -23,7 +23,7 @@ - include: ../../openshift-metrics/private/config.yml when: openshift_metrics_install_metrics | default(false) | bool -- include: openshift_logging.yml +- include: ../../openshift-logging/private/config.yml when: openshift_logging_install_logging | default(false) | bool - include: ../../openshift-prometheus/private/config.yml diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml index 83f16ac0d..3b779becb 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml @@ -6,10 +6,6 @@ retries: 3 delay: 30 -- name: Update docker facts - openshift_facts: - role: docker - - name: Restart containerized services service: name={{ item }} state=started with_items: diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml index 446f315d6..84b740227 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -6,7 +6,7 @@ - name: Update oreg_auth docker login credentials if necessary include_role: - name: docker + name: container_runtime tasks_from: registry_auth.yml when: oreg_auth_user is defined diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index d7a52707c..503d75ba0 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -143,10 +143,6 @@ roles: - { role: openshift_cli } vars: - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" - # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe - # restart. - skip_docker_role: True __master_shared_resource_viewer_file: "shared_resource_viewer_role.yaml" tasks: - name: Reconcile Cluster Roles diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml index 6cb6a665f..5f9c56867 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -73,12 +73,6 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - - include: ../../../../openshift-master/private/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index 8f48bedcc..1aac3d014 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -77,12 +77,6 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - - include: ../../../../openshift-master/private/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml index f25cfe0d0..306b76422 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml @@ -66,12 +66,6 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - - name: Verify masters are already upgraded hosts: oo_masters_to_config tags: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml index 2b99568c7..6d4949542 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml @@ -77,12 +77,6 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - - include: ../../../../openshift-master/private/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml index d3d2046e6..0a592896b 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -81,12 +81,6 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - - include: ../../../../openshift-master/private/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml index c0546bd2d..b381d606a 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml @@ -66,12 +66,6 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - - name: Verify masters are already upgraded hosts: oo_masters_to_config tags: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml index b602cdd0e..e7d7756d1 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml @@ -77,12 +77,6 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - - include: ../../../../openshift-master/private/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml index da81e6dea..be362e3ff 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml @@ -81,12 +81,6 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - - include: ../../../../openshift-master/private/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml index abd56e762..6e68116b0 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml @@ -66,12 +66,6 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - - name: Verify masters are already upgraded hosts: oo_masters_to_config tags: diff --git a/playbooks/init/facts.yml b/playbooks/init/facts.yml index 1166ac538..d41f365dc 100644 --- a/playbooks/init/facts.yml +++ b/playbooks/init/facts.yml @@ -135,11 +135,13 @@ - openshift_http_proxy is defined or openshift_https_proxy is defined - openshift_generate_no_proxy_hosts | default(True) | bool + - name: Initialize openshift.node.sdn_mtu + openshift_facts: + role: node + local_facts: + sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" + - name: initialize_facts set_fact repoquery command set_fact: repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}" repoquery_installed: "{{ 'dnf repoquery --latest-limit 1 -d 0 --disableexcludes=all --installed' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins --installed' }}" - - - name: initialize_facts set_fact on openshift_docker_hosted_registry_network - set_fact: - openshift_docker_hosted_registry_network: "{{ '' if 'oo_first_master' not in groups else hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" diff --git a/playbooks/init/main.yml b/playbooks/init/main.yml index 1d4f41ffc..5a7483b72 100644 --- a/playbooks/init/main.yml +++ b/playbooks/init/main.yml @@ -24,6 +24,7 @@ - import_playbook: repos.yml - import_playbook: version.yml + when: not (skip_verison | default(False)) - name: Initialization Checkpoint End hosts: all diff --git a/playbooks/openshift-glusterfs/private/registry.yml b/playbooks/openshift-glusterfs/private/registry.yml index 75c1f0300..917b729f9 100644 --- a/playbooks/openshift-glusterfs/private/registry.yml +++ b/playbooks/openshift-glusterfs/private/registry.yml @@ -1,40 +1,11 @@ --- - import_playbook: config.yml -- name: Initialize GlusterFS registry PV and PVC vars - hosts: oo_first_master - tags: hosted - tasks: - - set_fact: - glusterfs_pv: [] - glusterfs_pvc: [] - - - set_fact: - glusterfs_pv: - - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-volume" - capacity: "{{ openshift.hosted.registry.storage.volume.size }}" - access_modes: "{{ openshift.hosted.registry.storage.access.modes }}" - storage: - glusterfs: - endpoints: "{{ openshift.hosted.registry.storage.glusterfs.endpoints }}" - path: "{{ openshift.hosted.registry.storage.glusterfs.path }}" - readOnly: "{{ openshift.hosted.registry.storage.glusterfs.readOnly }}" - glusterfs_pvc: - - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim" - capacity: "{{ openshift.hosted.registry.storage.volume.size }}" - access_modes: "{{ openshift.hosted.registry.storage.access.modes }}" - when: openshift.hosted.registry.storage.glusterfs.swap - - name: Create persistent volumes hosts: oo_first_master - tags: - - hosted - vars: - persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups, glusterfs_pv) }}" - persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims(glusterfs_pvc) }}" roles: - role: openshift_persistent_volumes - when: persistent_volumes | union(glusterfs_pv) | length > 0 or persistent_volume_claims | union(glusterfs_pvc) | length > 0 + when: openshift_hosted_registry_storage_glusterfs_swap | default(False) - name: Create Hosted Resources hosts: oo_first_master diff --git a/playbooks/openshift-hosted/private/cockpit-ui.yml b/playbooks/openshift-hosted/private/cockpit-ui.yml index 359132dd0..d6529425b 100644 --- a/playbooks/openshift-hosted/private/cockpit-ui.yml +++ b/playbooks/openshift-hosted/private/cockpit-ui.yml @@ -5,4 +5,4 @@ - role: cockpit-ui when: - openshift_hosted_manage_registry | default(true) | bool - - not openshift.docker.hosted_registry_insecure | default(false) | bool + - not (openshift_docker_hosted_registry_insecure | default(false)) | bool diff --git a/playbooks/openshift-hosted/private/create_persistent_volumes.yml b/playbooks/openshift-hosted/private/create_persistent_volumes.yml index 8a60a30b8..41ae2eb69 100644 --- a/playbooks/openshift-hosted/private/create_persistent_volumes.yml +++ b/playbooks/openshift-hosted/private/create_persistent_volumes.yml @@ -1,9 +1,5 @@ --- - name: Create Hosted Resources - persistent volumes hosts: oo_first_master - vars: - persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}" - persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}" roles: - role: openshift_persistent_volumes - when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0 diff --git a/playbooks/openshift-loadbalancer/private/config.yml b/playbooks/openshift-loadbalancer/private/config.yml index d737b836b..78fe663db 100644 --- a/playbooks/openshift-loadbalancer/private/config.yml +++ b/playbooks/openshift-loadbalancer/private/config.yml @@ -11,14 +11,12 @@ status: "In Progress" start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" -- name: Configure firewall and docker for load balancers +- name: Configure firewall load balancers hosts: oo_lb_to_config:!oo_masters_to_config:!oo_nodes_to_config vars: openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}" roles: - role: os_firewall - - role: openshift_docker - when: openshift.common.is_containerized | default(False) | bool and not skip_docker_role | default(False) | bool - name: Configure load balancers hosts: oo_lb_to_config diff --git a/playbooks/openshift-logging/config.yml b/playbooks/openshift-logging/config.yml new file mode 100644 index 000000000..8837a2d32 --- /dev/null +++ b/playbooks/openshift-logging/config.yml @@ -0,0 +1,9 @@ +--- +# +# This playbook is a preview of upcoming changes for installing +# Hosted logging on. See inventory/byo/hosts.*.example for the +# currently supported method. +# +- include: ../init/main.yml + +- include: private/config.yml diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/openshift-logging/private/config.yml index bc59bd95a..bc59bd95a 100644 --- a/playbooks/common/openshift-cluster/openshift_logging.yml +++ b/playbooks/openshift-logging/private/config.yml diff --git a/playbooks/openshift-logging/private/library b/playbooks/openshift-logging/private/library new file mode 120000 index 000000000..ba40d2f56 --- /dev/null +++ b/playbooks/openshift-logging/private/library @@ -0,0 +1 @@ +../../../library
\ No newline at end of file diff --git a/playbooks/openshift-logging/private/roles b/playbooks/openshift-logging/private/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/openshift-logging/private/roles @@ -0,0 +1 @@ +../../../roles
\ No newline at end of file diff --git a/playbooks/openshift-master/private/additional_config.yml b/playbooks/openshift-master/private/additional_config.yml index b7cfbe4e4..a90cd6b22 100644 --- a/playbooks/openshift-master/private/additional_config.yml +++ b/playbooks/openshift-master/private/additional_config.yml @@ -19,8 +19,6 @@ openshift_master_ha: "{{ groups.oo_masters | length > 1 }}" omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}" roles: - - role: openshift_master_cluster - when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker" - role: openshift_project_request_template when: openshift_project_request_template_manage - role: openshift_examples diff --git a/playbooks/openshift-master/private/tasks/wire_aggregator.yml b/playbooks/openshift-master/private/tasks/wire_aggregator.yml index 97acc5d5d..ecf8f15d9 100644 --- a/playbooks/openshift-master/private/tasks/wire_aggregator.yml +++ b/playbooks/openshift-master/private/tasks/wire_aggregator.yml @@ -183,7 +183,6 @@ systemd: name={{ openshift.common.service_type }}-master-api state=restarted when: - yedit_output.changed - - openshift.master.cluster_method == 'native' # We retry the controllers because the API may not be 100% initialized yet. - name: restart master controllers @@ -194,7 +193,6 @@ until: result.rc == 0 when: - yedit_output.changed - - openshift.master.cluster_method == 'native' - name: Verify API Server # Using curl here since the uri module requires python-httplib2 and diff --git a/playbooks/openshift-master/private/validate_restart.yml b/playbooks/openshift-master/private/validate_restart.yml index 5dbb21502..1077d0b9c 100644 --- a/playbooks/openshift-master/private/validate_restart.yml +++ b/playbooks/openshift-master/private/validate_restart.yml @@ -14,9 +14,6 @@ - role: common local_facts: rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}" - - role: master - local_facts: - cluster_method: "{{ openshift_master_cluster_method | default(None) }}" # Creating a temp file on localhost, we then check each system that will # be rebooted to see if that file exists, if so we know we're running diff --git a/playbooks/openshift-node/private/configure_nodes.yml b/playbooks/openshift-node/private/configure_nodes.yml index 06f3df9fa..dc5d7a57e 100644 --- a/playbooks/openshift-node/private/configure_nodes.yml +++ b/playbooks/openshift-node/private/configure_nodes.yml @@ -4,7 +4,6 @@ vars: openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] | union(groups['oo_masters_to_config']) | union(groups['oo_etcd_to_config'] | default([]))) diff --git a/playbooks/openshift-node/private/containerized_nodes.yml b/playbooks/openshift-node/private/containerized_nodes.yml index 3c3ac3646..5afa83be7 100644 --- a/playbooks/openshift-node/private/containerized_nodes.yml +++ b/playbooks/openshift-node/private/containerized_nodes.yml @@ -5,7 +5,6 @@ vars: openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] | union(groups['oo_masters_to_config']) | union(groups['oo_etcd_to_config'] | default([]))) diff --git a/playbooks/openshift-node/private/restart.yml b/playbooks/openshift-node/private/restart.yml index c3beb59b7..41eb00f99 100644 --- a/playbooks/openshift-node/private/restart.yml +++ b/playbooks/openshift-node/private/restart.yml @@ -16,10 +16,6 @@ retries: 3 delay: 30 - - name: Update docker facts - openshift_facts: - role: docker - - name: Restart containerized services service: name: "{{ item }}" diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md index c762169eb..f567242cd 100644 --- a/playbooks/openstack/README.md +++ b/playbooks/openstack/README.md @@ -24,7 +24,7 @@ The OpenStack release must be Newton (for Red Hat OpenStack this is version 10) or newer. It must also satisfy these requirements: * Heat (Orchestration) must be available -* The deployment image (CentOS 7 or RHEL 7) must be loaded +* The deployment image (CentOS 7.4 or RHEL 7) must be loaded * The deployment flavor must be available to your user - `m1.medium` / 4GB RAM + 40GB disk should be enough for testing - look at @@ -183,9 +183,14 @@ Then run the provision + install playbook -- this will create the OpenStack resources: ```bash -$ ansible-playbook --user openshift -i inventory openshift-ansible/playbooks/openstack/openshift-cluster/provision_install.yaml +$ ansible-playbook --user openshift -i inventory \ + openshift-ansible/playbooks/openstack/openshift-cluster/provision_install.yaml \ + -e openshift_repos_enable_testing=true ``` +Note, you may want to use the testing repo for development purposes only. +Normally, `openshift_repos_enable_testing` should not be specified. + If you're using multiple inventories, make sure you pass the path to the right one to `-i`. @@ -210,7 +215,6 @@ advanced configuration: * [External Dns][external-dns] * Multiple Clusters (TODO) * [Cinder Registry][cinder-registry] -* [Bastion Node][bastion] [ansible]: https://www.ansible.com/ @@ -229,4 +233,3 @@ advanced configuration: [loadbalancer]: ./advanced-configuration.md#multi-master-configuration [external-dns]: ./advanced-configuration.md#dns-configuration-variables [cinder-registry]: ./advanced-configuration.md#creating-and-using-a-cinder-volume-for-the-openshift-registry -[bastion]: ./advanced-configuration.md#configure-static-inventory-and-access-via-a-bastion-node diff --git a/playbooks/openstack/advanced-configuration.md b/playbooks/openstack/advanced-configuration.md index c0bdf5020..f22243fbd 100644 --- a/playbooks/openstack/advanced-configuration.md +++ b/playbooks/openstack/advanced-configuration.md @@ -328,14 +328,6 @@ The `openshift_openstack_required_packages` variable also provides a list of the prerequisite packages to be installed before to deploy an OpenShift cluster. Those are ignored though, if the `manage_packages: False`. -The `openstack_inventory` controls either a static inventory will be created after the -cluster nodes provisioned on OpenStack cloud. Note, the fully dynamic inventory -is yet to be supported, so the static inventory will be created anyway. - -The `openstack_inventory_path` points the directory to host the generated static inventory. -It should point to the copied example inventory directory, otherwise ti creates -a new one for you. - ## Multi-master configuration Please refer to the official documentation for the @@ -345,7 +337,6 @@ variables](https://docs.openshift.com/container-platform/3.6/install_config/inst in `inventory/group_vars/OSEv3.yml`. For example, given a load balancer node under the ansible group named `ext_lb`: - openshift_master_cluster_method: native openshift_master_cluster_hostname: "{{ groups.ext_lb.0 }}" openshift_master_cluster_public_hostname: "{{ groups.ext_lb.0 }}" @@ -538,43 +529,6 @@ You can also run the registry setup playbook directly: -## Configure static inventory and access via a bastion node - -Example inventory variables: - - openshift_openstack_use_bastion: true - openshift_openstack_bastion_ingress_cidr: "{{openshift_openstack_subnet_prefix}}.0/24" - openstack_private_ssh_key: ~/.ssh/id_rsa - openstack_inventory: static - openstack_inventory_path: ../../../../inventory - openstack_ssh_config_path: /tmp/ssh.config.openshift.ansible.openshift.example.com - -The `openshift_openstack_subnet_prefix` is the openstack private network for your cluster. -And the `openshift_openstack_bastion_ingress_cidr` defines accepted range for SSH connections to nodes -additionally to the `openshift_openstack_ssh_ingress_cidr`` (see the security notes above). - -The SSH config will be stored on the ansible control node by the -gitven path. Ansible uses it automatically. To access the cluster nodes with -that ssh config, use the `-F` prefix, f.e.: - - ssh -F /tmp/ssh.config.openshift.ansible.openshift.example.com master-0.openshift.example.com echo OK - -Note, relative paths will not work for the `openstack_ssh_config_path`, but it -works for the `openstack_private_ssh_key` and `openstack_inventory_path`. In this -guide, the latter points to the current directory, where you run ansible commands -from. - -To verify nodes connectivity, use the command: - - ansible -v -i inventory/hosts -m ping all - -If something is broken, double-check the inventory variables, paths and the -generated `<openstack_inventory_path>/hosts` and `openstack_ssh_config_path` files. - -The `inventory: dynamic` can be used instead to access cluster nodes directly via -floating IPs. In this mode you can not use a bastion node and should specify -the dynamic inventory file in your ansible commands , like `-i openstack.py`. - ## Using Docker on the Ansible host If you don't want to worry about the dependencies, you can use the @@ -604,28 +558,6 @@ the playbooks: ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml -### Run the playbook - -Assuming your OpenStack (Keystone) credentials are in the `keystonerc` -this is how you stat the provisioning process from your ansible control node: - - . keystonerc - ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml - -Note, here you start with an empty inventory. The static inventory will be populated -with data so you can omit providing additional arguments for future ansible commands. - -If bastion enabled, the generates SSH config must be applied for ansible. -Otherwise, it is auto included by the previous step. In order to execute it -as a separate playbook, use the following command: - - ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/post-provision-openstack.yml - -The first infra node then becomes a bastion node as well and proxies access -for future ansible commands. The post-provision step also configures Satellite, -if requested, and DNS server, and ensures other OpenShift requirements to be met. - - ## Running Custom Post-Provision Actions A custom playbook can be run like this: @@ -733,21 +665,6 @@ Once it succeeds, you can install openshift by running: OpenShift UI may be accessed via the 1st master node FQDN, port 8443. -When using a bastion, you may want to make an SSH tunnel from your control node -to access UI on the `https://localhost:8443`, with this inventory variable: - - openshift_openstack_ui_ssh_tunnel: True - -Note, this requires sudo rights on the ansible control node and an absolute path -for the `openstack_private_ssh_key`. You should also update the control node's -`/etc/hosts`: - - 127.0.0.1 master-0.openshift.example.com - -In order to access UI, the ssh-tunnel service will be created and started on the -control node. Make sure to remove these changes and the service manually, when not -needed anymore. - ## Scale Deployment up/down ### Scaling up @@ -766,5 +683,3 @@ Usage: ``` ansible-playbook -i <path to inventory> openshift-ansible-contrib/playbooks/provisioning/openstack/scale-up.yaml` [-e increment_by=<number>] [-e openshift_ansible_dir=<path to openshift-ansible>] ``` - -Note: This playbook works only without a bastion node (`openshift_openstack_use_bastion: False`). diff --git a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml index 90608bbc0..933117127 100644 --- a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml +++ b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml @@ -1,10 +1,11 @@ --- +## Openshift product versions and repos to install from openshift_deployment_type: origin +#openshift_repos_enable_testing: true #openshift_deployment_type: openshift-enterprise #openshift_release: v3.5 openshift_master_default_subdomain: "apps.{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}" -openshift_master_cluster_method: native openshift_master_cluster_public_hostname: "console.{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}" osm_default_node_selector: 'region=primary' diff --git a/playbooks/prerequisites.yml b/playbooks/prerequisites.yml index 582dfe794..7dd59c5d8 100644 --- a/playbooks/prerequisites.yml +++ b/playbooks/prerequisites.yml @@ -1,7 +1,12 @@ --- -- name: Place holder for prerequisites - hosts: localhost - gather_facts: false +- include: init/main.yml + vars: + skip_verison: True + +- hosts: "{{ l_containerized_host_groups }}" + vars: + l_chg_temp: "{{ openshift_containerized_host_groups | default([]) }}" + l_containerized_host_groups: "{{ (['oo_nodes_to_config'] | union(l_chg_temp)) | join(':') }}" tasks: - - name: Debug placeholder - debug: msg="Prerequisites ran." + - include_role: + name: container_runtime diff --git a/playbooks/roles b/playbooks/roles new file mode 120000 index 000000000..d8c4472ca --- /dev/null +++ b/playbooks/roles @@ -0,0 +1 @@ +../roles
\ No newline at end of file diff --git a/roles/calico/handlers/main.yml b/roles/calico/handlers/main.yml index 67fc0065f..9cc0604a3 100644 --- a/roles/calico/handlers/main.yml +++ b/roles/calico/handlers/main.yml @@ -3,10 +3,10 @@ become: yes systemd: name=calico state=restarted -- name: restart docker +- name: restart container runtime become: yes systemd: - name: "{{ openshift.docker.service_name }}" + name: "{{ openshift_docker_service_name }}" state: restarted register: l_docker_restart_docker_in_calico_result until: not l_docker_restart_docker_in_calico_result | failed diff --git a/roles/calico/templates/calico.service.j2 b/roles/calico/templates/calico.service.j2 index 7653e19b1..a7809b9f9 100644 --- a/roles/calico/templates/calico.service.j2 +++ b/roles/calico/templates/calico.service.j2 @@ -1,7 +1,7 @@ [Unit] Description=calico -After={{ openshift.docker.service_name }}.service -Requires={{ openshift.docker.service_name }}.service +After={{ openshift_docker_service_name }}.service +Requires={{ openshift_docker_service_name }}.service [Service] Restart=always diff --git a/roles/docker/README.md b/roles/container_runtime/README.md index 19908c036..e363c1714 100644 --- a/roles/docker/README.md +++ b/roles/container_runtime/README.md @@ -10,27 +10,23 @@ Requirements Ansible 2.2 -Role Variables +Mandator Role Variables -------------- -docker_conf_dir: location of the Docker configuration directory -docker_systemd_dir location of the systemd directory for Docker -docker_udev_workaround: raises udevd timeout to 5 minutes (https://bugzilla.redhat.com/show_bug.cgi?id=1272446) -udevw_udevd_dir: location of systemd config for systemd-udevd.service + Dependencies ------------ -Depends on the os_firewall role. +Depends on openshift_facts having already been run. Example Playbook ---------------- - hosts: servers roles: - - role: docker + - role: container_runtime docker_udev_workaround: "true" - docker_use_system_container: False License ------- diff --git a/roles/docker/defaults/main.yml b/roles/container_runtime/defaults/main.yml index 224844a06..62b3e141a 100644 --- a/roles/docker/defaults/main.yml +++ b/roles/container_runtime/defaults/main.yml @@ -2,18 +2,34 @@ docker_cli_auth_config_path: '/root/.docker' openshift_docker_signature_verification: False +repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}" + openshift_docker_alternative_creds: False # oreg_url is defined by user input. oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}" oreg_auth_credentials_replace: False +openshift_docker_use_system_container: False +openshift_docker_disable_push_dockerhub: False # bool +openshift_docker_selinux_enabled: True +openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" + +openshift_docker_hosted_registry_insecure: False # bool + +openshift_docker_hosted_registry_network_default: "{{ openshift_portal_net | default(False) }}" +openshift_docker_hosted_registry_network: "{{ openshift_docker_hosted_registry_network_default }}" + openshift_docker_additional_registries: [] openshift_docker_blocked_registries: [] openshift_docker_insecure_registries: [] openshift_docker_ent_reg: 'registry.access.redhat.com' +openshift_docker_options: False # str +openshift_docker_log_driver: False # str +openshift_docker_log_options: [] + # The l2_docker_* variables convert csv strings to lists, if # necessary. These variables should be used in place of their respective # openshift_docker_* counterparts to ensure the properly formatted lists are @@ -21,6 +37,7 @@ openshift_docker_ent_reg: 'registry.access.redhat.com' l2_docker_additional_registries: "{% if openshift_docker_additional_registries is string %}{% if openshift_docker_additional_registries == '' %}[]{% elif ',' in openshift_docker_additional_registries %}{{ openshift_docker_additional_registries.split(',') | list }}{% else %}{{ [ openshift_docker_additional_registries ] }}{% endif %}{% else %}{{ openshift_docker_additional_registries }}{% endif %}" l2_docker_blocked_registries: "{% if openshift_docker_blocked_registries is string %}{% if openshift_docker_blocked_registries == '' %}[]{% elif ',' in openshift_docker_blocked_registries %}{{ openshift_docker_blocked_registries.split(',') | list }}{% else %}{{ [ openshift_docker_blocked_registries ] }}{% endif %}{% else %}{{ openshift_docker_blocked_registries }}{% endif %}" l2_docker_insecure_registries: "{% if openshift_docker_insecure_registries is string %}{% if openshift_docker_insecure_registries == '' %}[]{% elif ',' in openshift_docker_insecure_registries %}{{ openshift_docker_insecure_registries.split(',') | list }}{% else %}{{ [ openshift_docker_insecure_registries ] }}{% endif %}{% else %}{{ openshift_docker_insecure_registries }}{% endif %}" +l2_docker_log_options: "{% if openshift_docker_log_options is string %}{% if ',' in openshift_docker_log_options %}{{ openshift_docker_log_options.split(',') | list }}{% else %}{{ [ openshift_docker_log_options ] }}{% endif %}{% else %}{{ openshift_docker_log_options }}{% endif %}" openshift_docker_use_etc_containers: False containers_registries_conf_path: /etc/containers/registries.conf @@ -38,3 +55,26 @@ openshift_docker_is_node_or_master: "{{ True if inventory_hostname in (groups['o docker_alt_storage_path: /var/lib/containers/docker docker_default_storage_path: /var/lib/docker + +# Set local versions of facts that must be in json format for container-daemon.json +# NOTE: When jinja2.9+ is used the container-daemon.json file can move to using tojson +l_docker_log_options: "{{ l2_docker_log_options | to_json }}" +l_docker_additional_registries: "{{ l2_docker_additional_registries | to_json }}" +l_docker_blocked_registries: "{{ l2_docker_blocked_registries | to_json }}" +l_docker_insecure_registries: "{{ l2_docker_insecure_registries | to_json }}" +l_docker_selinux_enabled: "{{ openshift_docker_selinux_enabled | to_json }}" + +docker_http_proxy: "{{ openshift_http_proxy | default('') }}" +docker_https_proxy: "{{ openshift.common.https_proxy | default('') }}" +docker_no_proxy: "{{ openshift.common.no_proxy | default('') }}" + +openshift_use_crio: False +openshift_use_crio_only: False + + +l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l2_docker_insecure_registries)) }}" +l_crio_registries: "{{ l2_docker_additional_registries + ['docker.io'] }}" +l_additional_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l_crio_registries)) }}" + +l_openshift_image_tag_default: "{{ openshift_release }}" +l_openshift_image_tag: "{{ openshift_image_tag | default(l_openshift_image_tag_default) | string}}" diff --git a/roles/docker/handlers/main.yml b/roles/container_runtime/handlers/main.yml index 866ed0452..67cd6d782 100644 --- a/roles/docker/handlers/main.yml +++ b/roles/container_runtime/handlers/main.yml @@ -1,8 +1,8 @@ --- -- name: restart docker +- name: restart container runtime systemd: - name: "{{ openshift.docker.service_name }}" + name: "{{ openshift_docker_service_name }}" state: restarted daemon_reload: yes register: r_docker_restart_docker_result diff --git a/roles/docker/meta/main.yml b/roles/container_runtime/meta/main.yml index d5faae8df..02fceb745 100644 --- a/roles/docker/meta/main.yml +++ b/roles/container_runtime/meta/main.yml @@ -1,7 +1,7 @@ --- galaxy_info: author: OpenShift - description: docker package install + description: container runtime install and configure company: Red Hat, Inc license: ASL 2.0 min_ansible_version: 2.2 diff --git a/roles/docker/tasks/crio_firewall.yml b/roles/container_runtime/tasks/crio_firewall.yml index fbd1ff515..fbd1ff515 100644 --- a/roles/docker/tasks/crio_firewall.yml +++ b/roles/container_runtime/tasks/crio_firewall.yml diff --git a/roles/docker/tasks/main.yml b/roles/container_runtime/tasks/main.yml index b02a74711..6d68082b1 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/container_runtime/tasks/main.yml @@ -1,15 +1,7 @@ --- -# These tasks dispatch to the proper set of docker tasks based on the -# inventory:openshift_docker_use_system_container variable - - include_tasks: udev_workaround.yml when: docker_udev_workaround | default(False) | bool -- set_fact: - l_use_system_container: "{{ openshift.docker.use_system_container | default(False) }}" - l_use_crio: "{{ openshift_use_crio | default(False) }}" - l_use_crio_only: "{{ openshift_use_crio_only | default(False) }}" - - name: Add enterprise registry, if necessary set_fact: l2_docker_additional_registries: "{{ l2_docker_additional_registries + [openshift_docker_ent_reg] }}" @@ -17,13 +9,13 @@ - openshift.common.deployment_type == 'openshift-enterprise' - openshift_docker_ent_reg != '' - openshift_docker_ent_reg not in l2_docker_additional_registries - - not l_use_crio_only + - not openshift_use_crio_only | bool - name: Use Package Docker if Requested include_tasks: package_docker.yml when: - - not l_use_system_container - - not l_use_crio_only + - not openshift_docker_use_system_container + - not openshift_use_crio_only - name: Ensure /var/lib/containers exists file: @@ -37,13 +29,13 @@ - name: Use System Container Docker if Requested include_tasks: systemcontainer_docker.yml when: - - l_use_system_container - - not l_use_crio_only + - openshift_docker_use_system_container + - not openshift_use_crio_only - name: Add CRI-O usage Requested include_tasks: systemcontainer_crio.yml when: - - l_use_crio + - openshift_use_crio - openshift_docker_is_node_or_master | bool - name: stat the docker data dir @@ -52,13 +44,13 @@ register: dockerstat - when: - - l_use_crio + - openshift_use_crio - dockerstat.stat.islnk is defined and not (dockerstat.stat.islnk | bool) block: - name: stop the current running docker systemd: state: stopped - name: "{{ openshift.docker.service_name }}" + name: "{{ openshift_docker_service_name }}" - name: copy "{{ docker_default_storage_path }}" to "{{ docker_alt_storage_path }}" command: "cp -r {{ docker_default_storage_path }} {{ docker_alt_storage_path }}" @@ -90,4 +82,4 @@ - name: start docker systemd: state: started - name: "{{ openshift.docker.service_name }}" + name: "{{ openshift_docker_service_name }}" diff --git a/roles/docker/tasks/package_docker.yml b/roles/container_runtime/tasks/package_docker.yml index 044b04478..40ab75a25 100644 --- a/roles/docker/tasks/package_docker.yml +++ b/roles/container_runtime/tasks/package_docker.yml @@ -52,7 +52,7 @@ dest: "{{ docker_systemd_dir }}/custom.conf" src: custom.conf.j2 notify: - - restart docker + - restart container runtime when: not (os_firewall_use_firewalld | default(False)) | bool - stat: path=/etc/sysconfig/docker @@ -78,7 +78,7 @@ reg_fact_val: "{{ l2_docker_insecure_registries }}" reg_flag: --insecure-registry notify: - - restart docker + - restart container runtime - name: Place additional/blocked/insecure registries in /etc/containers/registries.conf template: @@ -86,7 +86,7 @@ src: registries.conf when: openshift_docker_use_etc_containers | bool notify: - - restart docker + - restart container runtime - name: Set Proxy Settings lineinfile: @@ -96,30 +96,34 @@ state: "{{ 'present' if item.reg_fact_val != '' else 'absent'}}" with_items: - reg_conf_var: HTTP_PROXY - reg_fact_val: "{{ docker_http_proxy | default('') }}" + reg_fact_val: "{{ docker_http_proxy }}" - reg_conf_var: HTTPS_PROXY - reg_fact_val: "{{ docker_https_proxy | default('') }}" + reg_fact_val: "{{ docker_https_proxy }}" - reg_conf_var: NO_PROXY - reg_fact_val: "{{ docker_no_proxy | default('') }}" + reg_fact_val: "{{ docker_no_proxy }}" notify: - - restart docker + - restart container runtime when: - - docker_check.stat.isreg is defined and docker_check.stat.isreg and '"http_proxy" in openshift.common or "https_proxy" in openshift.common' + - docker_check.stat.isreg is defined + - docker_check.stat.isreg + - docker_http_proxy != '' or docker_https_proxy != '' - name: Set various Docker options lineinfile: dest: /etc/sysconfig/docker regexp: '^OPTIONS=.*$' line: "OPTIONS='\ - {% if ansible_selinux.status | default(None) == 'enabled' and docker_selinux_enabled | default(true) | bool %} --selinux-enabled {% endif %} \ - {% if docker_log_driver is defined %} --log-driver {{ docker_log_driver }}{% endif %} \ - {% if docker_log_options is defined %} {{ docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %} \ + {% if ansible_selinux.status | default(None) == 'enabled' and openshift_docker_selinux_enabled | default(true) | bool %} --selinux-enabled {% endif %} \ + {% if openshift_docker_log_driver | bool %} --log-driver {{ openshift_docker_log_driver }}{% endif %} \ + {% if l2_docker_log_options != [] %} {{ l2_docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %} \ + {% if openshift_docker_hosted_registry_insecure and (openshift_docker_hosted_registry_network | bool) %} --insecure-registry={{ openshift_docker_hosted_registry_network }} {% endif %} \ {% if docker_options is defined %} {{ docker_options }}{% endif %} \ - {% if docker_disable_push_dockerhub is defined %} --confirm-def-push={{ docker_disable_push_dockerhub | bool }}{% endif %} \ + {% if openshift_docker_options %} {{ openshift_docker_options }}{% endif %} \ + {% if openshift_docker_disable_push_dockerhub %} --confirm-def-push={{ openshift_docker_disable_push_dockerhub | bool }}{% endif %} \ --signature-verification={{ openshift_docker_signature_verification | bool }}'" when: docker_check.stat.isreg is defined and docker_check.stat.isreg notify: - - restart docker + - restart container runtime - stat: path=/etc/sysconfig/docker-network register: sysconfig_docker_network_check @@ -134,7 +138,7 @@ - sysconfig_docker_network_check.stat.isreg is defined - sysconfig_docker_network_check.stat.isreg notify: - - restart docker + - restart container runtime # The following task is needed as the systemd module may report a change in # state even though docker is already running. diff --git a/roles/docker/tasks/registry_auth.yml b/roles/container_runtime/tasks/registry_auth.yml index 2c7bc5711..2c7bc5711 100644 --- a/roles/docker/tasks/registry_auth.yml +++ b/roles/container_runtime/tasks/registry_auth.yml diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/container_runtime/tasks/systemcontainer_crio.yml index 3439aa353..8dcfe60ef 100644 --- a/roles/docker/tasks/systemcontainer_crio.yml +++ b/roles/container_runtime/tasks/systemcontainer_crio.yml @@ -1,28 +1,5 @@ --- - # TODO: Much of this file is shared with container engine tasks -- set_fact: - l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l2_docker_insecure_registries)) }}" -- set_fact: - l_crio_registries: "{{ l2_docker_additional_registries + ['docker.io'] }}" -- set_fact: - l_additional_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l_crio_registries)) }}" - -- set_fact: - l_openshift_image_tag: "{{ openshift_image_tag | string }}" - when: openshift_image_tag is defined - -- set_fact: - l_openshift_image_tag: "latest" - when: - - openshift_image_tag is not defined - - openshift_release == "latest" - -- set_fact: - l_openshift_image_tag: "{{ openshift_release | string }}" - when: - - openshift_image_tag is not defined - - openshift_release != "latest" - name: Ensure container-selinux is installed package: @@ -184,4 +161,4 @@ # 'docker login' - include_tasks: registry_auth.yml vars: - openshift_docker_alternative_creds: "{{ l_use_crio_only }}" + openshift_docker_alternative_creds: "{{ openshift_use_crio_only }}" diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/container_runtime/tasks/systemcontainer_docker.yml index 881d83f50..84217e50c 100644 --- a/roles/docker/tasks/systemcontainer_docker.yml +++ b/roles/container_runtime/tasks/systemcontainer_docker.yml @@ -1,28 +1,10 @@ --- - -- set_fact: - l_openshift_image_tag: "{{ openshift_image_tag | string }}" - when: openshift_image_tag is defined - -- set_fact: - l_openshift_image_tag: "latest" - when: - - openshift_image_tag is not defined - - openshift_release == "latest" - -- set_fact: - l_openshift_image_tag: "{{ openshift_release | string }}" - when: - - openshift_image_tag is not defined - - openshift_release != "latest" - # If docker_options are provided we should fail. We should not install docker and ignore # the users configuration. NOTE: docker_options == inventory:openshift_docker_options - name: Fail quickly if openshift_docker_options are set assert: that: - - docker_options is defined - - docker_options != "" + - "{% if not openshift_docker_options %}1{% else %}0{% endif %}" msg: | Docker via System Container does not allow for the use of the openshift_docker_options variable. If you want to use openshift_docker_options you will need to use the @@ -106,7 +88,7 @@ - name: Set the full image name set_fact: - l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift.docker.service_name }}:{{ l_docker_image_tag }}" + l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift_docker_service_name }}:{{ l_docker_image_tag }}" # For https://github.com/openshift/openshift-ansible/pull/5354#issuecomment-328552959 - name: Use a specific image if requested @@ -125,7 +107,7 @@ command: "atomic pull --storage ostree {{ l_docker_image }}" changed_when: false environment: - NO_PROXY: "{{ openshift.common.no_proxy | default('') }}" + NO_PROXY: "{{ docker_no_proxy }}" - name: Ensure container-engine.service.d directory exists @@ -140,7 +122,7 @@ - name: Install Container Engine System Container oc_atomic_container: - name: "{{ openshift.docker.service_name }}" + name: "{{ openshift_docker_service_name }}" image: "{{ l_docker_image }}" state: latest @@ -149,15 +131,6 @@ dest: "{{ container_engine_systemd_dir }}/custom.conf" src: systemcontainercustom.conf.j2 -# Set local versions of facts that must be in json format for container-daemon.json -# NOTE: When jinja2.9+ is used the container-daemon.json file can move to using tojson -- set_fact: - l_docker_insecure_registries: "{{ l2_docker_insecure_registries | default([]) | to_json }}" - l_docker_log_options: "{{ docker_log_options | default({}) | to_json }}" - l_docker_additional_registries: "{{ l2_docker_additional_registries | default([]) | to_json }}" - l_docker_blocked_registries: "{{ l2_docker_blocked_registries | default([]) | to_json }}" - l_docker_selinux_enabled: "{{ docker_selinux_enabled | default(true) | to_json }}" - # Configure container-engine using the container-daemon.json file # NOTE: daemon.json and container-daemon.json have been seperated to avoid # collision. @@ -169,7 +142,7 @@ # Enable and start the container-engine service - name: Start the Container Engine service systemd: - name: "{{ openshift.docker.service_name }}" + name: "{{ openshift_docker_service_name }}" enabled: yes state: started daemon_reload: yes diff --git a/roles/docker/tasks/udev_workaround.yml b/roles/container_runtime/tasks/udev_workaround.yml index 257c3123d..257c3123d 100644 --- a/roles/docker/tasks/udev_workaround.yml +++ b/roles/container_runtime/tasks/udev_workaround.yml diff --git a/roles/docker/templates/80-openshift-sdn.conf.j2 b/roles/container_runtime/templates/80-openshift-sdn.conf.j2 index a693aea5f..a693aea5f 100644 --- a/roles/docker/templates/80-openshift-sdn.conf.j2 +++ b/roles/container_runtime/templates/80-openshift-sdn.conf.j2 diff --git a/roles/docker/templates/crio.conf.j2 b/roles/container_runtime/templates/crio.conf.j2 index 3f066a17f..3f066a17f 100644 --- a/roles/docker/templates/crio.conf.j2 +++ b/roles/container_runtime/templates/crio.conf.j2 diff --git a/roles/docker/templates/custom.conf.j2 b/roles/container_runtime/templates/custom.conf.j2 index 713412473..713412473 100644 --- a/roles/docker/templates/custom.conf.j2 +++ b/roles/container_runtime/templates/custom.conf.j2 diff --git a/roles/docker/templates/daemon.json b/roles/container_runtime/templates/daemon.json index a41b7cdbd..383963bd3 100644 --- a/roles/docker/templates/daemon.json +++ b/roles/container_runtime/templates/daemon.json @@ -5,8 +5,8 @@ "disable-legacy-registry": false, "exec-opts": ["native.cgroupdriver=systemd"], "insecure-registries": {{ l_docker_insecure_registries }}, -{% if docker_log_driver is defined %} - "log-driver": "{{ docker_log_driver }}", +{% if openshift_docker_log_driver is defined %} + "log-driver": "{{ openshift_docker_log_driver }}", {%- endif %} "log-opts": {{ l_docker_log_options }}, "runtimes": { diff --git a/roles/docker/templates/overlay.conf.j2 b/roles/container_runtime/templates/overlay.conf.j2 index 782f46c2e..782f46c2e 100644 --- a/roles/docker/templates/overlay.conf.j2 +++ b/roles/container_runtime/templates/overlay.conf.j2 diff --git a/roles/docker/templates/registries.conf b/roles/container_runtime/templates/registries.conf index d379b2be0..d379b2be0 100644 --- a/roles/docker/templates/registries.conf +++ b/roles/container_runtime/templates/registries.conf diff --git a/roles/docker/templates/systemcontainercustom.conf.j2 b/roles/container_runtime/templates/systemcontainercustom.conf.j2 index 86eebfba6..86eebfba6 100644 --- a/roles/docker/templates/systemcontainercustom.conf.j2 +++ b/roles/container_runtime/templates/systemcontainercustom.conf.j2 diff --git a/roles/docker/vars/main.yml b/roles/container_runtime/vars/main.yml index 4e940b7f5..4e940b7f5 100644 --- a/roles/docker/vars/main.yml +++ b/roles/container_runtime/vars/main.yml diff --git a/roles/contiv/defaults/main.yml b/roles/contiv/defaults/main.yml index b5d2f7c6e..aa976d921 100644 --- a/roles/contiv/defaults/main.yml +++ b/roles/contiv/defaults/main.yml @@ -119,3 +119,5 @@ contiv_h1_gw_default: "10.129.0.1" # contiv default private subnet for ext access contiv_private_ext_subnet: "10.130.0.0/16" + +openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" diff --git a/roles/contiv/tasks/netplugin.yml b/roles/contiv/tasks/netplugin.yml index 0b2f91bab..cf92a8cc0 100644 --- a/roles/contiv/tasks/netplugin.yml +++ b/roles/contiv/tasks/netplugin.yml @@ -105,7 +105,7 @@ - name: Docker | Restart docker service: - name: "{{ openshift.docker.service_name }}" + name: "{{ openshift_docker_service_name }}" state: restarted when: docker_updated|changed register: l_docker_restart_docker_in_contiv_result diff --git a/roles/contiv/templates/aci-gw.service b/roles/contiv/templates/aci-gw.service index 4506d2231..90bb98001 100644 --- a/roles/contiv/templates/aci-gw.service +++ b/roles/contiv/templates/aci-gw.service @@ -1,6 +1,6 @@ [Unit] Description=Contiv ACI gw -After=auditd.service systemd-user-sessions.service time-sync.target {{ openshift.docker.service_name }}.service +After=auditd.service systemd-user-sessions.service time-sync.target {{ openshift_docker_service_name }}.service [Service] ExecStart={{ bin_dir }}/aci_gw.sh start diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml index a069e4d87..3038ed9f6 100644 --- a/roles/etcd/defaults/main.yaml +++ b/roles/etcd/defaults/main.yaml @@ -97,3 +97,5 @@ r_etcd_os_firewall_allow: # set the backend quota to 4GB by default etcd_quota_backend_bytes: 4294967296 + +openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" diff --git a/roles/etcd/templates/etcd.docker.service b/roles/etcd/templates/etcd.docker.service index 99ae37319..4c25a9955 100644 --- a/roles/etcd/templates/etcd.docker.service +++ b/roles/etcd/templates/etcd.docker.service @@ -1,8 +1,8 @@ [Unit] Description=The Etcd Server container -After={{ openshift.docker.service_name }}.service -Requires={{ openshift.docker.service_name }}.service -PartOf={{ openshift.docker.service_name }}.service +After={{ openshift_docker_service_name }}.service +Requires={{ openshift_docker_service_name }}.service +PartOf={{ openshift_docker_service_name }}.service [Service] EnvironmentFile={{ etcd_conf_file }} @@ -14,4 +14,4 @@ Restart=always RestartSec=5s [Install] -WantedBy={{ openshift.docker.service_name }}.service +WantedBy={{ openshift_docker_service_name }}.service diff --git a/roles/flannel/defaults/main.yaml b/roles/flannel/defaults/main.yaml index 988731ef2..488b6b0bc 100644 --- a/roles/flannel/defaults/main.yaml +++ b/roles/flannel/defaults/main.yaml @@ -5,3 +5,5 @@ etcd_hosts: "{{ etcd_urls }}" etcd_peer_ca_file: "{{ openshift.common.config_base }}/node/{{ 'ca' if (embedded_etcd | bool) else 'flannel.etcd-ca' }}.crt" etcd_peer_cert_file: "{{ openshift.common.config_base }}/node/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'flannel.etcd-client' }}.crt" etcd_peer_key_file: "{{ openshift.common.config_base }}/node/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'flannel.etcd-client' }}.key" + +openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml index 889069485..80e4d391d 100644 --- a/roles/flannel/handlers/main.yml +++ b/roles/flannel/handlers/main.yml @@ -6,7 +6,7 @@ - name: restart docker become: yes systemd: - name: "{{ openshift.docker.service_name }}" + name: "{{ openshift_docker_service_name }}" state: restarted register: l_docker_restart_docker_in_flannel_result until: not l_docker_restart_docker_in_flannel_result | failed diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py index 3cb1fa8d0..83ca83350 100644 --- a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py +++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py @@ -86,7 +86,7 @@ class CallbackModule(CallbackBase): }, 'installer_phase_logging': { 'title': 'Logging Install', - 'playbook': 'playbooks/byo/openshift-cluster/openshift-logging.yml' + 'playbook': 'playbooks/openshift-logging/config.yml' }, 'installer_phase_prometheus': { 'title': 'Prometheus Install', diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml index 410b739e9..cb83c8ead 100644 --- a/roles/nuage_master/handlers/main.yaml +++ b/roles/nuage_master/handlers/main.yaml @@ -3,8 +3,7 @@ systemd: name={{ openshift.common.service_type }}-master-api state=restarted when: > (openshift_master_ha | bool) and - (not master_api_service_status_changed | default(false)) and - openshift.master.cluster_method == 'native' + (not master_api_service_status_changed | default(false)) # TODO: need to fix up ignore_errors here # We retry the controllers because the API may not be 100% initialized yet. @@ -16,6 +15,5 @@ until: result.rc == 0 when: > (openshift_master_ha | bool) and - (not master_controllers_service_status_changed | default(false)) and - openshift.master.cluster_method == 'native' + (not master_controllers_service_status_changed | default(false)) ignore_errors: yes diff --git a/roles/openshift_cli/defaults/main.yml b/roles/openshift_cli/defaults/main.yml index 82da0639e..631a0455e 100644 --- a/roles/openshift_cli/defaults/main.yml +++ b/roles/openshift_cli/defaults/main.yml @@ -4,3 +4,8 @@ system_images_registry_dict: origin: "docker.io" system_images_registry: "{{ system_images_registry_dict[openshift_deployment_type | default('origin')] }}" + +openshift_use_crio_only: False + +l_is_system_container_image: "{{ openshift_use_master_system_container | default(openshift_use_system_containers | default(False)) | bool }}" +l_use_cli_atomic_image: "{{ openshift_use_crio_only or l_is_system_container_image }}" diff --git a/roles/openshift_cli/meta/main.yml b/roles/openshift_cli/meta/main.yml index 29ed82783..5d2b6abed 100644 --- a/roles/openshift_cli/meta/main.yml +++ b/roles/openshift_cli/meta/main.yml @@ -12,6 +12,4 @@ galaxy_info: categories: - cloud dependencies: -- role: openshift_docker - when: not skip_docker_role | default(False) | bool - role: openshift_facts diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml index 7b046b2c4..140c6ea26 100644 --- a/roles/openshift_cli/tasks/main.yml +++ b/roles/openshift_cli/tasks/main.yml @@ -1,10 +1,4 @@ --- -- set_fact: - l_use_crio_only: "{{ openshift_use_crio_only | default(false) }}" - l_is_system_container_image: "{{ openshift_use_master_system_container | default(openshift_use_system_containers | default(false)) | bool }}" -- set_fact: - l_use_cli_atomic_image: "{{ l_use_crio_only or l_is_system_container_image }}" - - name: Install clients package: name={{ openshift.common.service_type }}-clients state=present when: not openshift.common.is_containerized | bool diff --git a/roles/openshift_docker/defaults/main.yml b/roles/openshift_docker/defaults/main.yml deleted file mode 100644 index ed97d539c..000000000 --- a/roles/openshift_docker/defaults/main.yml +++ /dev/null @@ -1 +0,0 @@ ---- diff --git a/roles/openshift_docker/meta/main.yml b/roles/openshift_docker/meta/main.yml deleted file mode 100644 index 60efd4e45..000000000 --- a/roles/openshift_docker/meta/main.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -galaxy_info: - author: Jason DeTiberus - description: OpenShift Docker - company: Red Hat, Inc. - license: Apache License, Version 2.0 - min_ansible_version: 1.9 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud -dependencies: -- role: openshift_docker_facts -- role: docker diff --git a/roles/openshift_docker/tasks/main.yml b/roles/openshift_docker/tasks/main.yml deleted file mode 100644 index ed97d539c..000000000 --- a/roles/openshift_docker/tasks/main.yml +++ /dev/null @@ -1 +0,0 @@ ---- diff --git a/roles/openshift_docker_facts/defaults/main.yml b/roles/openshift_docker_facts/defaults/main.yml deleted file mode 100644 index ed97d539c..000000000 --- a/roles/openshift_docker_facts/defaults/main.yml +++ /dev/null @@ -1 +0,0 @@ ---- diff --git a/roles/openshift_docker_facts/meta/main.yml b/roles/openshift_docker_facts/meta/main.yml deleted file mode 100644 index 5b1be7a8d..000000000 --- a/roles/openshift_docker_facts/meta/main.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -galaxy_info: - author: Jason DeTiberus - description: OpenShift Docker Facts - company: Red Hat, Inc. - license: Apache License, Version 2.0 - min_ansible_version: 1.9 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud -dependencies: -- { role: openshift_facts } diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml deleted file mode 100644 index 5a3e50678..000000000 --- a/roles/openshift_docker_facts/tasks/main.yml +++ /dev/null @@ -1,39 +0,0 @@ ---- -- name: Set docker facts - openshift_facts: - role: "{{ item.role }}" - local_facts: "{{ item.local_facts }}" - with_items: - - role: docker - local_facts: - selinux_enabled: "{{ openshift_docker_selinux_enabled | default(None) }}" - log_driver: "{{ openshift_docker_log_driver | default(None) }}" - log_options: "{{ openshift_docker_log_options | default(None) }}" - options: "{{ openshift_docker_options | default(None) }}" - disable_push_dockerhub: "{{ openshift_disable_push_dockerhub | default(None) }}" - hosted_registry_insecure: "{{ openshift_docker_hosted_registry_insecure | default(openshift.docker.hosted_registry_insecure | default(False)) }}" - hosted_registry_network: "{{ openshift_docker_hosted_registry_network | default(None) }}" - use_system_container: "{{ openshift_docker_use_system_container | default(False) }}" - use_crio: "{{ openshift_use_crio | default(False) }}" - - role: node - local_facts: - sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" - -- set_fact: - docker_selinux_enabled: "{{ openshift.docker.selinux_enabled | default(omit) }}" - docker_log_driver: "{{ openshift.docker.log_driver | default(omit) }}" - docker_log_options: "{{ openshift.docker.log_options | default(omit) }}" - docker_push_dockerhub: "{{ openshift.docker.disable_push_dockerhub - | default(omit) }}" - docker_http_proxy: "{{ openshift.common.http_proxy | default(omit) }}" - docker_https_proxy: "{{ openshift.common.https_proxy | default(omit) }}" - docker_no_proxy: "{{ openshift.common.no_proxy | default(omit) }}" - -- set_fact: - docker_options: "--insecure-registry={{ openshift.docker.hosted_registry_network }} {{ openshift.docker.options | default ('') }}" - when: openshift.docker.hosted_registry_insecure | default(False) | bool and openshift.docker.hosted_registry_network is defined - register: hosted_registry_options - -- set_fact: - docker_options: "{{ openshift.docker.options | default(omit) }}" - when: hosted_registry_options | skipped diff --git a/roles/openshift_docker_facts/vars/main.yml b/roles/openshift_docker_facts/vars/main.yml deleted file mode 100644 index 55c04b0c1..000000000 --- a/roles/openshift_docker_facts/vars/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}" diff --git a/roles/openshift_etcd/meta/main.yml b/roles/openshift_etcd/meta/main.yml index 7cc548f69..0e28fec03 100644 --- a/roles/openshift_etcd/meta/main.yml +++ b/roles/openshift_etcd/meta/main.yml @@ -13,6 +13,4 @@ galaxy_info: - cloud dependencies: - role: openshift_etcd_facts -- role: openshift_docker - when: openshift.common.is_containerized | bool - role: etcd diff --git a/roles/openshift_facts/defaults/main.yml b/roles/openshift_facts/defaults/main.yml index 7064d727a..a182d23c5 100644 --- a/roles/openshift_facts/defaults/main.yml +++ b/roles/openshift_facts/defaults/main.yml @@ -3,4 +3,98 @@ openshift_cli_image_dict: origin: 'openshift/origin' openshift-enterprise: 'openshift3/ose' +openshift_hosted_images_dict: + origin: 'openshift/origin-${component}:${version}' + openshift-enterprise: 'openshift3/ose-${component}:${version}' + openshift_cli_image: "{{ osm_image | default(openshift_cli_image_dict[openshift_deployment_type]) }}" + +# osm_default_subdomain is an old migrated fact, can probably be removed. +osm_default_subdomain: "router.default.svc.cluster.local" +openshift_master_default_subdomain: "{{ osm_default_subdomain }}" + +openshift_hosted_etcd_storage_nfs_directory: '/exports' +openshift_hosted_etcd_storage_nfs_options: '*(rw,root_squash)' +openshift_hosted_etcd_storage_volume_name: 'etcd' +openshift_hosted_etcd_storage_volume_size: '1Gi' +openshift_hosted_etcd_storage_create_pv: True +openshift_hosted_etcd_storage_create_pvc: False +openshift_hosted_etcd_storage_access_modes: + - 'ReadWriteOnce' + +openshift_hosted_registry_namespace: 'default' +openshift_hosted_registry_storage_volume_name: 'registry' +openshift_hosted_registry_storage_volume_size: '5Gi' +openshift_hosted_registry_storage_create_pv: True +openshift_hosted_registry_storage_create_pvc: True +openshift_hosted_registry_storage_nfs_directory: '/exports' +openshift_hosted_registry_storage_nfs_options: '*(rw,root_squash)' +openshift_hosted_registry_storage_glusterfs_endpoints: 'glusterfs-registry-endpoints' +openshift_hosted_registry_storage_glusterfs_path: glusterfs-registry-volume +openshift_hosted_registry_storage_glusterfs_readOnly: False +openshift_hosted_registry_storage_glusterfs_swap: False +openshift_hosted_registry_storage_glusterfs_swapcopy: True +openshift_hosted_registry_storage_glusterfs_ips: [] +openshift_hosted_registry_storage_access_modes: + - 'ReadWriteMany' + +openshift_logging_storage_nfs_directory: '/exports' +openshift_logging_storage_nfs_options: '*(rw,root_squash)' +openshift_logging_storage_volume_name: 'logging-es' +openshift_logging_storage_create_pv: True +openshift_logging_storage_create_pvc: False +openshift_logging_storage_access_modes: + - ['ReadWriteOnce'] + +openshift_loggingops_storage_volume_name: 'logging-es-ops' +openshift_loggingops_storage_volume_size: '10Gi' +openshift_loggingops_storage_create_pv: True +openshift_loggingops_storage_create_pvc: False +openshift_loggingops_storage_nfs_directory: '/exports' +openshift_loggingops_storage_nfs_options: '*(rw,root_squash)' +openshift_loggingops_storage_access_modes: + - 'ReadWriteOnce' + +openshift_metrics_deploy: False +openshift_metrics_duration: 7 +openshift_metrics_resolution: '10s' +openshift_metrics_storage_volume_name: 'metrics' +openshift_metrics_storage_volume_size: '10Gi' +openshift_metrics_storage_create_pv: True +openshift_metrics_storage_create_pvc: False +openshift_metrics_storage_nfs_directory: '/exports' +openshift_metrics_storage_nfs_options: '*(rw,root_squash)' +openshift_metrics_storage_access_modes: + - 'ReadWriteOnce' + +openshift_prometheus_storage_volume_name: 'prometheus' +openshift_prometheus_storage_volume_size: '10Gi' +openshift_prometheus_storage_nfs_directory: '/exports' +openshift_prometheus_storage_nfs_options: '*(rw,root_squash)' +openshift_prometheus_storage_access_modes: + - 'ReadWriteOnce' +openshift_prometheus_storage_create_pv: True +openshift_prometheus_storage_create_pvc: False + +openshift_prometheus_alertmanager_storage_volume_name: 'prometheus-alertmanager' +openshift_prometheus_alertmanager_storage_volume_size: '10Gi' +openshift_prometheus_alertmanager_storage_nfs_directory: '/exports' +openshift_prometheus_alertmanager_storage_nfs_options: '*(rw,root_squash)' +openshift_prometheus_alertmanager_storage_access_modes: + - 'ReadWriteOnce' +openshift_prometheus_alertmanager_storage_create_pv: True +openshift_prometheus_alertmanager_storage_create_pvc: False + +openshift_prometheus_alertbuffer_storage_volume_name: 'prometheus-alertbuffer' +openshift_prometheus_alertbuffer_storage_volume_size: '10Gi' +openshift_prometheus_alertbuffer_storage_nfs_directory: '/exports' +openshift_prometheus_alertbuffer_storage_nfs_options: '*(rw,root_squash)' +openshift_prometheus_alertbuffer_storage_access_modes: + - 'ReadWriteOnce' +openshift_prometheus_alertbuffer_storage_create_pv: True +openshift_prometheus_alertbuffer_storage_create_pvc: False + + +openshift_router_selector: "region=infra" +openshift_hosted_router_selector: "{{ openshift_router_selector }}" +openshift_hosted_registry_selector: "{{ openshift_router_selector }}" diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index a5d90d602..b371d347c 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -11,14 +11,13 @@ import copy import errno import json import re -import io import os import yaml import struct import socket from distutils.util import strtobool from distutils.version import LooseVersion -from ansible.module_utils.six import string_types, text_type +from ansible.module_utils.six import string_types from ansible.module_utils.six.moves import configparser # ignore pylint errors related to the module_utils import @@ -51,39 +50,6 @@ EXAMPLES = ''' ''' -def migrate_docker_facts(facts): - """ Apply migrations for docker facts """ - params = { - 'common': ( - 'options' - ), - 'node': ( - 'log_driver', - 'log_options' - ) - } - if 'docker' not in facts: - facts['docker'] = {} - # pylint: disable=consider-iterating-dictionary - for role in params.keys(): - if role in facts: - for param in params[role]: - old_param = 'docker_' + param - if old_param in facts[role]: - facts['docker'][param] = facts[role].pop(old_param) - - if 'node' in facts and 'portal_net' in facts['node']: - facts['docker']['hosted_registry_network'] = facts['node'].pop('portal_net') - - # log_options was originally meant to be a comma separated string, but - # we now prefer an actual list, with backward compatibility: - if 'log_options' in facts['docker'] and \ - isinstance(facts['docker']['log_options'], string_types): - facts['docker']['log_options'] = facts['docker']['log_options'].split(",") - - return facts - - # TODO: We should add a generic migration function that takes source and destination # paths and does the right thing rather than one function for common, one for node, etc. def migrate_common_facts(facts): @@ -119,24 +85,6 @@ def migrate_node_facts(facts): return facts -def migrate_hosted_facts(facts): - """ Apply migrations for master facts """ - if 'master' in facts: - if 'router_selector' in facts['master']: - if 'hosted' not in facts: - facts['hosted'] = {} - if 'router' not in facts['hosted']: - facts['hosted']['router'] = {} - facts['hosted']['router']['selector'] = facts['master'].pop('router_selector') - if 'registry_selector' in facts['master']: - if 'hosted' not in facts: - facts['hosted'] = {} - if 'registry' not in facts['hosted']: - facts['hosted']['registry'] = {} - facts['hosted']['registry']['selector'] = facts['master'].pop('registry_selector') - return facts - - def migrate_admission_plugin_facts(facts): """ Apply migrations for admission plugin facts """ if 'master' in facts: @@ -156,10 +104,8 @@ def migrate_admission_plugin_facts(facts): def migrate_local_facts(facts): """ Apply migrations of local facts """ migrated_facts = copy.deepcopy(facts) - migrated_facts = migrate_docker_facts(migrated_facts) migrated_facts = migrate_common_facts(migrated_facts) migrated_facts = migrate_node_facts(migrated_facts) - migrated_facts = migrate_hosted_facts(migrated_facts) migrated_facts = migrate_admission_plugin_facts(migrated_facts) return migrated_facts @@ -446,58 +392,6 @@ def normalize_provider_facts(provider, metadata): return facts -# pylint: disable=too-many-branches -def set_selectors(facts): - """ Set selectors facts if not already present in facts dict - Args: - facts (dict): existing facts - Returns: - dict: the facts dict updated with the generated selectors - facts if they were not already present - - """ - selector = "region=infra" - - if 'hosted' not in facts: - facts['hosted'] = {} - if 'router' not in facts['hosted']: - facts['hosted']['router'] = {} - if 'selector' not in facts['hosted']['router'] or facts['hosted']['router']['selector'] in [None, 'None']: - facts['hosted']['router']['selector'] = selector - if 'registry' not in facts['hosted']: - facts['hosted']['registry'] = {} - if 'selector' not in facts['hosted']['registry'] or facts['hosted']['registry']['selector'] in [None, 'None']: - facts['hosted']['registry']['selector'] = selector - if 'metrics' not in facts['hosted']: - facts['hosted']['metrics'] = {} - if 'selector' not in facts['hosted']['metrics'] or facts['hosted']['metrics']['selector'] in [None, 'None']: - facts['hosted']['metrics']['selector'] = None - if 'logging' not in facts or not isinstance(facts['logging'], dict): - facts['logging'] = {} - if 'selector' not in facts['logging'] or facts['logging']['selector'] in [None, 'None']: - facts['logging']['selector'] = None - if 'etcd' not in facts['hosted']: - facts['hosted']['etcd'] = {} - if 'selector' not in facts['hosted']['etcd'] or facts['hosted']['etcd']['selector'] in [None, 'None']: - facts['hosted']['etcd']['selector'] = None - if 'prometheus' not in facts: - facts['prometheus'] = {} - if 'selector' not in facts['prometheus'] or facts['prometheus']['selector'] in [None, 'None']: - facts['prometheus']['selector'] = None - if 'alertmanager' not in facts['prometheus']: - facts['prometheus']['alertmanager'] = {} - # pylint: disable=line-too-long - if 'selector' not in facts['prometheus']['alertmanager'] or facts['prometheus']['alertmanager']['selector'] in [None, 'None']: - facts['prometheus']['alertmanager']['selector'] = None - if 'alertbuffer' not in facts['prometheus']: - facts['prometheus']['alertbuffer'] = {} - # pylint: disable=line-too-long - if 'selector' not in facts['prometheus']['alertbuffer'] or facts['prometheus']['alertbuffer']['selector'] in [None, 'None']: - facts['prometheus']['alertbuffer']['selector'] = None - - return facts - - def set_identity_providers_if_unset(facts): """ Set identity_providers fact if not already present in facts dict @@ -642,60 +536,6 @@ def set_aggregate_facts(facts): return facts -def set_etcd_facts_if_unset(facts): - """ - If using embedded etcd, loads the data directory from master-config.yaml. - - If using standalone etcd, loads ETCD_DATA_DIR from etcd.conf. - - If anything goes wrong parsing these, the fact will not be set. - """ - if 'master' in facts and safe_get_bool(facts['master']['embedded_etcd']): - etcd_facts = facts['etcd'] if 'etcd' in facts else dict() - - if 'etcd_data_dir' not in etcd_facts: - try: - # Parse master config to find actual etcd data dir: - master_cfg_path = os.path.join(facts['common']['config_base'], - 'master/master-config.yaml') - master_cfg_f = open(master_cfg_path, 'r') - config = yaml.safe_load(master_cfg_f.read()) - master_cfg_f.close() - - etcd_facts['etcd_data_dir'] = \ - config['etcdConfig']['storageDirectory'] - - facts['etcd'] = etcd_facts - - # We don't want exceptions bubbling up here: - # pylint: disable=broad-except - except Exception: - pass - else: - etcd_facts = facts['etcd'] if 'etcd' in facts else dict() - - # Read ETCD_DATA_DIR from /etc/etcd/etcd.conf: - try: - # Add a fake section for parsing: - ini_str = text_type('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8') - ini_fp = io.StringIO(ini_str) - config = configparser.RawConfigParser() - config.readfp(ini_fp) - etcd_data_dir = config.get('root', 'ETCD_DATA_DIR') - if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'): - etcd_data_dir = etcd_data_dir[1:-1] - - etcd_facts['etcd_data_dir'] = etcd_data_dir - facts['etcd'] = etcd_facts - - # We don't want exceptions bubbling up here: - # pylint: disable=broad-except - except Exception: - pass - - return facts - - def set_deployment_facts_if_unset(facts): """ Set Facts that vary based on deployment_type. This currently includes common.service_type, master.registry_url, node.registry_url, @@ -1105,6 +945,7 @@ def get_version_output(binary, version_cmd): return output +# We may need this in the future. def get_docker_version_info(): """ Parses and returns the docker version info """ result = None @@ -1118,25 +959,6 @@ def get_docker_version_info(): return result -def get_hosted_registry_insecure(): - """ Parses OPTIONS from /etc/sysconfig/docker to determine if the - registry is currently insecure. - """ - hosted_registry_insecure = None - if os.path.exists('/etc/sysconfig/docker'): - try: - ini_str = text_type('[root]\n' + open('/etc/sysconfig/docker', 'r').read(), 'utf-8') - ini_fp = io.StringIO(ini_str) - config = configparser.RawConfigParser() - config.readfp(ini_fp) - options = config.get('root', 'OPTIONS') - if 'insecure-registry' in options: - hosted_registry_insecure = True - except Exception: # pylint: disable=broad-except - pass - return hosted_registry_insecure - - def get_openshift_version(facts): """ Get current version of openshift on the host. @@ -1588,13 +1410,6 @@ def set_container_facts_if_unset(facts): deployer_image = 'openshift/origin-deployer' facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted') - # If openshift_docker_use_system_container is set and is True .... - if 'use_system_container' in list(facts['docker'].keys()): - # use safe_get_bool as the inventory variable may not be a - # valid boolean on it's own. - if safe_get_bool(facts['docker']['use_system_container']): - # ... set the service name to container-engine - facts['docker']['service_name'] = 'container-engine' if 'is_containerized' not in facts['common']: facts['common']['is_containerized'] = facts['common']['is_atomic'] @@ -1689,15 +1504,9 @@ class OpenShiftFacts(object): 'buildoverrides', 'cloudprovider', 'common', - 'docker', 'etcd', - 'hosted', 'master', - 'node', - 'logging', - 'loggingops', - 'metrics', - 'prometheus'] + 'node'] # Disabling too-many-arguments, this should be cleaned up as a TODO item. # pylint: disable=too-many-arguments,no-value-for-parameter @@ -1777,7 +1586,6 @@ class OpenShiftFacts(object): facts = migrate_oauth_template_facts(facts) facts['current_config'] = get_current_config(facts) facts = set_url_facts_if_unset(facts) - facts = set_selectors(facts) facts = set_identity_providers_if_unset(facts) facts = set_deployment_facts_if_unset(facts) facts = set_sdn_facts_if_unset(facts, self.system_facts) @@ -1787,7 +1595,6 @@ class OpenShiftFacts(object): facts = build_api_server_args(facts) facts = set_version_facts_if_unset(facts) facts = set_aggregate_facts(facts) - facts = set_etcd_facts_if_unset(facts) facts = set_proxy_facts(facts) facts = set_builddefaults_facts(facts) facts = set_buildoverrides_facts(facts) @@ -1850,200 +1657,9 @@ class OpenShiftFacts(object): local_quota_per_fsgroup="", set_node_ip=False) - if 'docker' in roles: - docker = dict(disable_push_dockerhub=False, - options='--log-driver=journald') - # NOTE: This is a workaround for a dnf output racecondition that can occur in - # some situations. See https://bugzilla.redhat.com/show_bug.cgi?id=918184 - if self.system_facts['ansible_pkg_mgr'] == 'dnf': - rpm_rebuilddb() - - version_info = get_docker_version_info() - if version_info is not None: - docker['api_version'] = version_info['api_version'] - docker['version'] = version_info['version'] - docker['gte_1_10'] = LooseVersion(version_info['version']) >= LooseVersion('1.10') - hosted_registry_insecure = get_hosted_registry_insecure() - if hosted_registry_insecure is not None: - docker['hosted_registry_insecure'] = hosted_registry_insecure - docker['service_name'] = 'docker' - defaults['docker'] = docker - if 'cloudprovider' in roles: defaults['cloudprovider'] = dict(kind=None) - if 'hosted' in roles or self.role == 'hosted': - defaults['hosted'] = dict( - etcd=dict( - storage=dict( - kind=None, - volume=dict( - name='etcd', - size='1Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)' - ), - host=None, - access=dict( - modes=['ReadWriteOnce'] - ), - create_pv=True, - create_pvc=False - ) - ), - registry=dict( - storage=dict( - kind=None, - volume=dict( - name='registry', - size='5Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)'), - glusterfs=dict( - endpoints='glusterfs-registry-endpoints', - path='glusterfs-registry-volume', - ips=[], - readOnly=False, - swap=False, - swapcopy=True), - host=None, - access=dict( - modes=['ReadWriteMany'] - ), - create_pv=True, - create_pvc=True - ) - ), - router=dict() - ) - - defaults['logging'] = dict( - storage=dict( - kind=None, - volume=dict( - name='logging-es', - size='10Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)' - ), - host=None, - access=dict( - modes=['ReadWriteOnce'] - ), - create_pv=True, - create_pvc=False - ) - ) - - defaults['loggingops'] = dict( - storage=dict( - kind=None, - volume=dict( - name='logging-es-ops', - size='10Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)' - ), - host=None, - access=dict( - modes=['ReadWriteOnce'] - ), - create_pv=True, - create_pvc=False - ) - ) - - defaults['metrics'] = dict( - deploy=False, - duration=7, - resolution='10s', - storage=dict( - kind=None, - volume=dict( - name='metrics', - size='10Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)' - ), - host=None, - access=dict( - modes=['ReadWriteOnce'] - ), - create_pv=True, - create_pvc=False - ) - ) - - defaults['prometheus'] = dict( - storage=dict( - kind=None, - volume=dict( - name='prometheus', - size='10Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)' - ), - host=None, - access=dict( - modes=['ReadWriteOnce'] - ), - create_pv=True, - create_pvc=False - ) - ) - - defaults['prometheus']['alertmanager'] = dict( - storage=dict( - kind=None, - volume=dict( - name='prometheus-alertmanager', - size='10Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)' - ), - host=None, - access=dict( - modes=['ReadWriteOnce'] - ), - create_pv=True, - create_pvc=False - ) - ) - - defaults['prometheus']['alertbuffer'] = dict( - storage=dict( - kind=None, - volume=dict( - name='prometheus-alertbuffer', - size='10Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)' - ), - host=None, - access=dict( - modes=['ReadWriteOnce'] - ), - create_pv=True, - create_pvc=False - ) - ) - return defaults def guess_host_provider(self): @@ -2226,12 +1842,6 @@ class OpenShiftFacts(object): additive_facts_to_overwrite, protected_facts_to_overwrite) - if 'docker' in new_local_facts: - # Convert legacy log_options comma sep string to a list if present: - if 'log_options' in new_local_facts['docker'] and \ - isinstance(new_local_facts['docker']['log_options'], string_types): - new_local_facts['docker']['log_options'] = new_local_facts['docker']['log_options'].split(',') - new_local_facts = self.remove_empty_facts(new_local_facts) if new_local_facts != local_facts: diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml index e70c0c420..b6501d288 100644 --- a/roles/openshift_hosted/defaults/main.yml +++ b/roles/openshift_hosted/defaults/main.yml @@ -27,6 +27,9 @@ openshift_cluster_domain: 'cluster.local' r_openshift_hosted_router_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" r_openshift_hosted_router_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" +openshift_hosted_router_selector: "{{ openshift_router_selector | default(openshift_hosted_infra_selector) }}" +openshift_hosted_router_namespace: 'default' + openshift_hosted_router_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}" openshift_hosted_router_edits: @@ -40,13 +43,14 @@ openshift_hosted_router_edits: value: 21600 action: put +openshift_hosted_router_registryurl: "{{ openshift_hosted_images_dict[openshift_deployment_type] }}" openshift_hosted_routers: - name: router replicas: "{{ replicas | default(1) }}" namespace: default serviceaccount: router selector: "{{ openshift_hosted_router_selector | default(None) }}" - images: "{{ openshift_hosted_router_image | default(None) }}" + images: "{{ openshift_hosted_router_registryurl }}" edits: "{{ openshift_hosted_router_edits }}" stats_port: 1936 ports: @@ -64,6 +68,11 @@ r_openshift_hosted_router_os_firewall_allow: [] # Registry # ############ +openshift_hosted_registry_selector: "{{ openshift_registry_selector | default(openshift_hosted_infra_selector) }}" +penshift_hosted_registry_registryurl: "{{ openshift_hosted_images_dict[openshift_deployment_type] }}" +openshift_hosted_registry_routecertificates: {} +openshift_hosted_registry_routetermination: "passthrough" + r_openshift_hosted_registry_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" r_openshift_hosted_registry_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" diff --git a/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py b/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py index 7f41529ac..003ce5f9e 100644 --- a/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py +++ b/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py @@ -12,7 +12,7 @@ class FilterModule(object): def get_router_replicas(replicas=None, router_nodes=None): ''' This function will return the number of replicas based on the results from the defined - openshift.hosted.router.replicas OR + openshift_hosted_router_replicas OR the query from oc_obj on openshift nodes with a selector OR default to 1 diff --git a/roles/openshift_hosted/tasks/registry.yml b/roles/openshift_hosted/tasks/registry.yml index 9f2ef4e40..4797fb788 100644 --- a/roles/openshift_hosted/tasks/registry.yml +++ b/roles/openshift_hosted/tasks/registry.yml @@ -13,13 +13,13 @@ l_openshift_hosted_fw_allow: "{{ r_openshift_hosted_registry_os_firewall_allow }}" l_openshift_hosted_fw_deny: "{{ r_openshift_hosted_registry_os_firewall_deny }}" -- when: openshift.hosted.registry.replicas | default(none) is none +- when: openshift_hosted_registry_replicas | default(none) is none block: - name: Retrieve list of openshift nodes matching registry selector oc_obj: state: list kind: node - selector: "{{ openshift.hosted.registry.selector | default(omit) }}" + selector: "{{ openshift_hosted_registry_selector }}" register: registry_nodes - name: set_fact l_node_count to number of nodes matching registry selector @@ -39,16 +39,13 @@ # just 1: - name: set_fact l_default_replicas when l_node_count > 0 set_fact: - l_default_replicas: "{{ l_node_count if openshift.hosted.registry.storage.kind | default(none) is not none else 1 }}" + l_default_replicas: "{{ l_node_count if openshift_hosted_registry_storage_kind | default(none) is not none else 1 }}" when: l_node_count | int > 0 - name: set openshift_hosted facts set_fact: - openshift_hosted_registry_replicas: "{{ openshift.hosted.registry.replicas | default(l_default_replicas) }}" - openshift_hosted_registry_namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" - openshift_hosted_registry_selector: "{{ openshift.hosted.registry.selector }}" - openshift_hosted_registry_images: "{{ openshift.hosted.registry.registryurl | default('openshift3/ose-${component}:${version}')}}" - openshift_hosted_registry_storage_glusterfs_ips: "{%- set gluster_ips = [] %}{% if groups.glusterfs_registry is defined %}{% for node in groups.glusterfs_registry %}{%- set _ = gluster_ips.append(hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip)) %}{% endfor %}{{ gluster_ips }}{% elif groups.glusterfs is defined %}{% for node in groups.glusterfs %}{%- set _ = gluster_ips.append(hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip)) %}{% endfor %}{{ gluster_ips }}{% else %}{{ openshift.hosted.registry.storage.glusterfs.ips }}{% endif %}" + # This determines the gluster_ips to use for the registry by looping over the glusterfs_registry group + openshift_hosted_registry_storage_glusterfs_ips: "{%- set gluster_ips = [] %}{% if groups.glusterfs_registry is defined %}{% for node in groups.glusterfs_registry %}{%- set _ = gluster_ips.append(hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip)) %}{% endfor %}{{ gluster_ips }}{% elif groups.glusterfs is defined %}{% for node in groups.glusterfs %}{%- set _ = gluster_ips.append(hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip)) %}{% endfor %}{{ gluster_ips }}{% else %}{{ openshift_hosted_registry_storage_glusterfs_ips }}{% endif %}" - name: Update registry environment variables when pushing via dns set_fact: @@ -101,12 +98,12 @@ static: no run_once: true when: - - not (openshift.docker.hosted_registry_insecure | default(false) | bool) + - not (openshift_docker_hosted_registry_insecure | default(False)) | bool - include: storage/object_storage.yml static: no when: - - openshift.hosted.registry.storage.kind | default(none) == 'object' + - openshift_hosted_registry_storage_kind | default(none) == 'object' - name: Update openshift_hosted facts for persistent volumes set_fact: @@ -115,23 +112,23 @@ pvc_volume_mounts: - name: registry-storage type: persistentVolumeClaim - claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-claim" + claim_name: "{{ openshift_hosted_registry_storage_volume_name }}-claim" when: - - openshift.hosted.registry.storage.kind | default(none) in ['nfs', 'openstack', 'glusterfs'] + - openshift_hosted_registry_storage_kind | default(none) in ['nfs', 'openstack', 'glusterfs'] - include: storage/glusterfs_endpoints.yml when: - openshift_hosted_registry_storage_glusterfs_ips|length > 0 - - openshift.hosted.registry.storage.kind | default(none) in ['glusterfs'] + - openshift_hosted_registry_storage_kind | default(none) in ['glusterfs'] - name: Create OpenShift registry oc_adm_registry: name: "{{ openshift_hosted_registry_name }}" namespace: "{{ openshift_hosted_registry_namespace }}" selector: "{{ openshift_hosted_registry_selector }}" - replicas: "{{ openshift_hosted_registry_replicas }}" + replicas: "{{ openshift_hosted_registry_replicas | default(l_default_replicas) }}" service_account: "{{ openshift_hosted_registry_serviceaccount }}" - images: "{{ openshift_hosted_registry_images }}" + images: "{{ penshift_hosted_registry_registryurl }}" env_vars: "{{ openshift_hosted_registry_env_vars }}" volume_mounts: "{{ openshift_hosted_registry_volumes }}" edits: "{{ openshift_hosted_registry_edits }}" @@ -151,7 +148,7 @@ - include: storage/glusterfs.yml when: - - openshift.hosted.registry.storage.kind | default(none) == 'glusterfs' or openshift.hosted.registry.storage.glusterfs.swap + - openshift_hosted_registry_storage_kind | default(none) == 'glusterfs' or openshift_hosted_registry_storage_glusterfs_swap - name: Delete temp directory file: diff --git a/roles/openshift_hosted/tasks/router.yml b/roles/openshift_hosted/tasks/router.yml index dd7053656..57c10b637 100644 --- a/roles/openshift_hosted/tasks/router.yml +++ b/roles/openshift_hosted/tasks/router.yml @@ -11,16 +11,14 @@ oc_obj: state: list kind: node - namespace: "{{ openshift.hosted.router.namespace | default('default') }}" - selector: "{{ openshift.hosted.router.selector | default(omit) }}" + namespace: "{{ openshift_hosted_router_namespace }}" + selector: "{{ openshift_hosted_router_selector }}" register: router_nodes - when: openshift.hosted.router.replicas | default(none) is none + when: openshift_hosted_router_replicas | default(none) is none - name: set_fact replicas set_fact: - replicas: "{{ openshift.hosted.router.replicas|default(None) | get_router_replicas(router_nodes) }}" - openshift_hosted_router_selector: "{{ openshift.hosted.router.selector | default(None) }}" - openshift_hosted_router_image: "{{ openshift.hosted.router.registryurl }}" + replicas: "{{ openshift_hosted_router_replicas | default(None) | get_router_replicas(router_nodes) }}" - name: Get the certificate contents for router copy: @@ -42,8 +40,8 @@ signer_key: "{{ openshift_master_config_dir }}/ca.key" signer_serial: "{{ openshift_master_config_dir }}/ca.serial.txt" hostnames: - - "{{ openshift_master_default_subdomain | default('router.default.svc.cluster.local') }}" - - "*.{{ openshift_master_default_subdomain | default('router.default.svc.cluster.local') }}" + - "{{ openshift_master_default_subdomain }}" + - "*.{{ openshift_master_default_subdomain }}" cert: "{{ openshift_master_config_dir ~ '/openshift-router.crt' }}" key: "{{ openshift_master_config_dir ~ '/openshift-router.key' }}" with_items: "{{ openshift_hosted_routers }}" diff --git a/roles/openshift_hosted/tasks/secure.yml b/roles/openshift_hosted/tasks/secure.yml index 174bc39a4..ecbf5b141 100644 --- a/roles/openshift_hosted/tasks/secure.yml +++ b/roles/openshift_hosted/tasks/secure.yml @@ -1,10 +1,4 @@ --- -- name: Configure facts for docker-registry - set_fact: - openshift_hosted_registry_routecertificates: "{{ ('routecertificates' in openshift.hosted.registry.keys()) | ternary(openshift_hosted_registry_routecertificates, {}) }}" - openshift_hosted_registry_routehost: "{{ ('routehost' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routehost, False) }}" - openshift_hosted_registry_routetermination: "{{ ('routetermination' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routetermination, 'passthrough') }}" - - name: Include reencrypt route configuration include: secure/reencrypt.yml static: no @@ -39,7 +33,7 @@ - "{{ docker_registry_route.results[0].spec.host }}" - "{{ openshift_hosted_registry_name }}.default.svc" - "{{ openshift_hosted_registry_name }}.default.svc.{{ openshift_cluster_domain }}" - - "{{ openshift_hosted_registry_routehost }}" + - "{{ openshift_hosted_registry_routehost | default(omit) }}" cert: "{{ docker_registry_cert_path }}" key: "{{ docker_registry_key_path }}" expire_days: "{{ openshift_hosted_registry_cert_expire_days }}" diff --git a/roles/openshift_hosted/tasks/storage/glusterfs.yml b/roles/openshift_hosted/tasks/storage/glusterfs.yml index 7cae67baa..18b2edcc6 100644 --- a/roles/openshift_hosted/tasks/storage/glusterfs.yml +++ b/roles/openshift_hosted/tasks/storage/glusterfs.yml @@ -17,7 +17,7 @@ until: - "registry_pods.results.results[0]['items'] | count > 0" # There must be as many matching pods with 'Ready' status True as there are expected replicas - - "registry_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == openshift_hosted_registry_replicas | int" + - "registry_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == openshift_hosted_registry_replicas | default(l_default_replicas) | int" delay: 10 retries: "{{ (600 / 10) | int }}" @@ -35,7 +35,7 @@ mount: state: mounted fstype: glusterfs - src: "{% if 'glusterfs_registry' in groups %}{% set node = groups.glusterfs_registry[0] %}{% elif 'glusterfs' in groups %}{% set node = groups.glusterfs[0] %}{% endif %}{% if openshift_hosted_registry_storage_glusterfs_ips is defined and openshift_hosted_registry_storage_glusterfs_ips|length > 0 %}{{ openshift_hosted_registry_storage_glusterfs_ips[0] }}{% elif 'glusterfs_hostname' in hostvars[node] %}{{ hostvars[node].glusterfs_hostname }}{% elif 'openshift' in hostvars[node] %}{{ hostvars[node].openshift.node.nodename }}{% else %}{{ node }}{% endif %}:/{{ openshift.hosted.registry.storage.glusterfs.path }}" + src: "{% if 'glusterfs_registry' in groups %}{% set node = groups.glusterfs_registry[0] %}{% elif 'glusterfs' in groups %}{% set node = groups.glusterfs[0] %}{% endif %}{% if openshift_hosted_registry_storage_glusterfs_ips is defined and openshift_hosted_registry_storage_glusterfs_ips|length > 0 %}{{ openshift_hosted_registry_storage_glusterfs_ips[0] }}{% elif 'glusterfs_hostname' in hostvars[node] %}{{ hostvars[node].glusterfs_hostname }}{% elif 'openshift' in hostvars[node] %}{{ hostvars[node].openshift.node.nodename }}{% else %}{{ node }}{% endif %}:/{{ openshift_hosted_registry_storage_glusterfs_path }}" name: "{{ mktemp.stdout }}" - name: Set registry volume permissions @@ -60,7 +60,7 @@ - name: Copy current registry contents to new GlusterFS volume command: "oc rsync {{ registry_pod_name }}:/registry/ {{ mktemp.stdout }}/" - when: openshift.hosted.registry.storage.glusterfs.swapcopy + when: openshift_hosted_registry_storage_glusterfs_swapcopy - name: Swap new GlusterFS registry volume oc_volume: @@ -68,7 +68,7 @@ name: "{{ openshift_hosted_registry_name }}" vol_name: registry-storage mount_type: pvc - claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim" + claim_name: "{{ openshift_hosted_registry_storage_volume_name }}-glusterfs-claim" - name: Deactivate registry maintenance mode oc_env: @@ -77,7 +77,7 @@ state: absent env_vars: - REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED: 'true' - when: openshift.hosted.registry.storage.glusterfs.swap + when: openshift_hosted_registry_storage_glusterfs_swap - name: Unmount registry volume and clean up mount point/fstab mount: diff --git a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml index 0f4381748..bd7181c17 100644 --- a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml +++ b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml @@ -10,7 +10,7 @@ dest: "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml" - name: Create GlusterFS registry service and endpoint - command: "{{ openshift.common.client_binary }} apply -f {{ item }} -n {{ openshift.hosted.registry.namespace | default('default') }}" + command: "{{ openshift.common.client_binary }} apply -f {{ item }} -n {{ openshift_hosted_registry_namespace | default('default') }}" with_items: - "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml" - "{{ mktempHosted.stdout }}/glusterfs-registry-endpoints.yml" diff --git a/roles/openshift_hosted/tasks/storage/object_storage.yml b/roles/openshift_hosted/tasks/storage/object_storage.yml index 8553a8098..3d1b2c68e 100644 --- a/roles/openshift_hosted/tasks/storage/object_storage.yml +++ b/roles/openshift_hosted/tasks/storage/object_storage.yml @@ -1,6 +1,6 @@ --- - include: s3.yml - when: openshift.hosted.registry.storage.provider == 's3' + when: openshift_hosted_registry_storage_provider == 's3' - name: Ensure the registry secret exists oc_secret: diff --git a/roles/openshift_hosted/tasks/storage/s3.yml b/roles/openshift_hosted/tasks/storage/s3.yml index 8e905d905..4c100ee4e 100644 --- a/roles/openshift_hosted/tasks/storage/s3.yml +++ b/roles/openshift_hosted/tasks/storage/s3.yml @@ -2,8 +2,8 @@ - name: Assert that S3 variables are provided for registry_config template assert: that: - - openshift.hosted.registry.storage.s3.bucket | default(none) is not none - - openshift.hosted.registry.storage.s3.bucket | default(none) is not none + - openshift_hosted_registry_storage_s3_bucket | default(none) is not none + - openshift_hosted_registry_storage_s3_region | default(none) is not none msg: | When using S3 storage, the following variables are required: openshift_hosted_registry_storage_s3_bucket diff --git a/roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j2 index 607d25533..3c874d910 100644 --- a/roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j2 +++ b/roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Endpoints metadata: - name: {{ openshift.hosted.registry.storage.glusterfs.endpoints }} + name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }} subsets: - addresses: {% for ip in openshift_hosted_registry_storage_glusterfs_ips %} diff --git a/roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j2 b/roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j2 index 452c7c3e1..f18c94a4f 100644 --- a/roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j2 +++ b/roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Service metadata: - name: {{ openshift.hosted.registry.storage.glusterfs.endpoints }} + name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }} spec: ports: - port: 1 diff --git a/roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j2 index 607d25533..3c874d910 100644 --- a/roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j2 +++ b/roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Endpoints metadata: - name: {{ openshift.hosted.registry.storage.glusterfs.endpoints }} + name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }} subsets: - addresses: {% for ip in openshift_hosted_registry_storage_glusterfs_ips %} diff --git a/roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j2 b/roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j2 index 452c7c3e1..f18c94a4f 100644 --- a/roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j2 +++ b/roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Service metadata: - name: {{ openshift.hosted.registry.storage.glusterfs.endpoints }} + name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }} spec: ports: - port: 1 diff --git a/roles/openshift_hosted_facts/tasks/main.yml b/roles/openshift_hosted_facts/tasks/main.yml index 8fc70cecb..ed97d539c 100644 --- a/roles/openshift_hosted_facts/tasks/main.yml +++ b/roles/openshift_hosted_facts/tasks/main.yml @@ -1,19 +1 @@ --- -# openshift_*_selector variables have been deprecated in favor of -# openshift_hosted_*_selector variables. -- set_fact: - openshift_hosted_router_selector: "{{ openshift_router_selector | default(openshift_hosted_infra_selector) }}" - when: openshift_hosted_router_selector is not defined and openshift_hosted_infra_selector is defined -- set_fact: - openshift_hosted_registry_selector: "{{ openshift_registry_selector | default(openshift_hosted_infra_selector) }}" - when: openshift_hosted_registry_selector is not defined and openshift_hosted_infra_selector is defined - -- name: Set hosted facts - openshift_facts: - role: "{{ item }}" - openshift_env: "{{ hostvars - | oo_merge_hostvars(vars, inventory_hostname) - | oo_openshift_env }}" - openshift_env_structures: - - 'openshift.hosted.router.*' - with_items: [hosted, logging, loggingops, metrics, prometheus] diff --git a/roles/openshift_hosted_metrics/README.md b/roles/openshift_hosted_metrics/README.md deleted file mode 100644 index c2af3c494..000000000 --- a/roles/openshift_hosted_metrics/README.md +++ /dev/null @@ -1,54 +0,0 @@ -OpenShift Metrics with Hawkular -==================== - -OpenShift Metrics Installation - -Requirements ------------- - -* Ansible 2.2 -* It requires subdomain fqdn to be set. -* If persistence is enabled, then it also requires NFS. - -Role Variables --------------- - -From this role: - -| Name | Default value | | -|-------------------------------------------------|-----------------------|-------------------------------------------------------------| -| openshift_hosted_metrics_deploy | `False` | If metrics should be deployed | -| openshift_hosted_metrics_public_url | null | Hawkular metrics public url | -| openshift_hosted_metrics_storage_nfs_directory | `/exports` | Root export directory. | -| openshift_hosted_metrics_storage_volume_name | `metrics` | Metrics volume within openshift_hosted_metrics_volume_dir | -| openshift_hosted_metrics_storage_volume_size | `10Gi` | Metrics volume size | -| openshift_hosted_metrics_storage_nfs_options | `*(rw,root_squash)` | NFS options for configured exports. | -| openshift_hosted_metrics_duration | `7` | Metrics query duration | -| openshift_hosted_metrics_resolution | `10s` | Metrics resolution | - - -Dependencies ------------- -openshift_facts -openshift_examples -openshift_master_facts - -Example Playbook ----------------- - -``` -- name: Configure openshift-metrics - hosts: oo_first_master - roles: - - role: openshift_hosted_metrics -``` - -License -------- - -Apache License, Version 2.0 - -Author Information ------------------- - -Jose David MartÃn (j.david.nieto@gmail.com) diff --git a/roles/openshift_hosted_metrics/defaults/main.yml b/roles/openshift_hosted_metrics/defaults/main.yml deleted file mode 100644 index a01f24df8..000000000 --- a/roles/openshift_hosted_metrics/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -hosted_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/hosted" diff --git a/roles/openshift_hosted_metrics/handlers/main.yml b/roles/openshift_hosted_metrics/handlers/main.yml deleted file mode 100644 index 074b72942..000000000 --- a/roles/openshift_hosted_metrics/handlers/main.yml +++ /dev/null @@ -1,31 +0,0 @@ ---- -- name: restart master api - systemd: name={{ openshift.common.service_type }}-master-api state=restarted - when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' - notify: Verify API Server - -# We retry the controllers because the API may not be 100% initialized yet. -- name: restart master controllers - command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" - retries: 3 - delay: 5 - register: result - until: result.rc == 0 - when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' - -- name: Verify API Server - # Using curl here since the uri module requires python-httplib2 and - # wait_for port doesn't provide health information. - command: > - curl --silent --tlsv1.2 - --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt - {{ openshift.master.api_url }}/healthz/ready - args: - # Disables the following warning: - # Consider using get_url or uri module rather than running curl - warn: no - register: api_available_output - until: api_available_output.stdout == 'ok' - retries: 120 - delay: 1 - changed_when: false diff --git a/roles/openshift_hosted_metrics/meta/main.yaml b/roles/openshift_hosted_metrics/meta/main.yaml deleted file mode 100644 index debca3ca6..000000000 --- a/roles/openshift_hosted_metrics/meta/main.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- -galaxy_info: - author: David MartÃn - description: - company: - license: Apache License, Version 2.0 - min_ansible_version: 2.2 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud - - system -dependencies: -- { role: openshift_examples } -- { role: openshift_facts } -- { role: openshift_master_facts } diff --git a/roles/openshift_hosted_metrics/tasks/install.yml b/roles/openshift_hosted_metrics/tasks/install.yml deleted file mode 100644 index 15dd1bd54..000000000 --- a/roles/openshift_hosted_metrics/tasks/install.yml +++ /dev/null @@ -1,132 +0,0 @@ ---- - -- name: Test if metrics-deployer service account exists - command: > - {{ openshift.common.client_binary }} - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace=openshift-infra - get serviceaccount metrics-deployer -o json - register: serviceaccount - changed_when: false - failed_when: false - -- name: Create metrics-deployer Service Account - shell: > - echo {{ metrics_deployer_sa | to_json | quote }} | - {{ openshift.common.client_binary }} - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace openshift-infra - create -f - - when: serviceaccount.rc == 1 - -- name: Test edit permissions - command: > - {{ openshift.common.client_binary }} - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace openshift-infra - get rolebindings -o jsonpath='{.items[?(@.metadata.name == "edit")].userNames}' - register: edit_rolebindings - changed_when: false - -- name: Add edit permission to the openshift-infra project to metrics-deployer SA - command: > - {{ openshift.common.client_binary }} adm - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace openshift-infra - policy add-role-to-user edit - system:serviceaccount:openshift-infra:metrics-deployer - when: "'system:serviceaccount:openshift-infra:metrics-deployer' not in edit_rolebindings.stdout" - -- name: Test hawkular view permissions - command: > - {{ openshift.common.client_binary }} - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace openshift-infra - get rolebindings -o jsonpath='{.items[?(@.metadata.name == "view")].userNames}' - register: view_rolebindings - changed_when: false - -- name: Add view permissions to hawkular SA - command: > - {{ openshift.common.client_binary }} adm - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace openshift-infra - policy add-role-to-user view - system:serviceaccount:openshift-infra:hawkular - when: "'system:serviceaccount:openshift-infra:hawkular' not in view_rolebindings" - -- name: Test cluster-reader permissions - command: > - {{ openshift.common.client_binary }} - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace openshift-infra - get clusterrolebindings -o jsonpath='{.items[?(@.metadata.name == "cluster-reader")].userNames}' - register: cluster_reader_clusterrolebindings - changed_when: false - -- name: Add cluster-reader permission to the openshift-infra project to heapster SA - command: > - {{ openshift.common.client_binary }} adm - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace openshift-infra - policy add-cluster-role-to-user cluster-reader - system:serviceaccount:openshift-infra:heapster - when: "'system:serviceaccount:openshift-infra:heapster' not in cluster_reader_clusterrolebindings.stdout" - -- name: Create metrics-deployer secret - command: > - {{ openshift.common.client_binary }} - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace openshift-infra - secrets new metrics-deployer nothing=/dev/null - register: metrics_deployer_secret - changed_when: metrics_deployer_secret.rc == 0 - failed_when: metrics_deployer_secret.rc == 1 and 'already exists' not in metrics_deployer_secret.stderr - -# TODO: extend this to allow user passed in certs or generating cert with -# OpenShift CA -- name: Build metrics deployer command - set_fact: - deployer_cmd: "{{ openshift.common.client_binary }} process -f \ - {{ hosted_base }}/metrics-deployer.yaml -v \ - HAWKULAR_METRICS_HOSTNAME={{ g_metrics_hostname }} \ - -v USE_PERSISTENT_STORAGE={{metrics_persistence | string | lower }} \ - -v DYNAMICALLY_PROVISION_STORAGE={{metrics_dynamic_vol | string | lower }} \ - -v METRIC_DURATION={{ openshift.hosted.metrics.duration }} \ - -v METRIC_RESOLUTION={{ openshift.hosted.metrics.resolution }} - {{ image_prefix }} \ - {{ image_version }} \ - -v MODE={{ deployment_mode }} \ - | {{ openshift.common.client_binary }} --namespace openshift-infra \ - --config={{ openshift_hosted_metrics_kubeconfig }} \ - create -o name -f -" - -- name: Deploy Metrics - shell: "{{ deployer_cmd }}" - register: deploy_metrics - failed_when: "'already exists' not in deploy_metrics.stderr and deploy_metrics.rc != 0" - changed_when: deploy_metrics.rc == 0 - -- set_fact: - deployer_pod: "{{ deploy_metrics.stdout[1:2] }}" - -# TODO: re-enable this once the metrics deployer validation issue is fixed -# when using dynamically provisioned volumes -- name: "Wait for image pull and deployer pod" - shell: > - {{ openshift.common.client_binary }} - --namespace openshift-infra - --config={{ openshift_hosted_metrics_kubeconfig }} - get {{ deploy_metrics.stdout }} - register: deploy_result - until: "{{ 'Completed' in deploy_result.stdout }}" - failed_when: False - retries: 60 - delay: 10 - -- name: Configure master for metrics - modify_yaml: - dest: "{{ openshift.common.config_base }}/master/master-config.yaml" - yaml_key: assetConfig.metricsPublicURL - yaml_value: "{{ openshift_hosted_metrics_deploy_url }}" - notify: restart master diff --git a/roles/openshift_hosted_metrics/tasks/main.yaml b/roles/openshift_hosted_metrics/tasks/main.yaml deleted file mode 100644 index 5ce8aa92b..000000000 --- a/roles/openshift_hosted_metrics/tasks/main.yaml +++ /dev/null @@ -1,75 +0,0 @@ ---- -- name: Create temp directory for kubeconfig - command: mktemp -d /tmp/openshift-ansible-XXXXXX - register: mktemp - changed_when: False - -- name: Record kubeconfig tmp dir - set_fact: - openshift_hosted_metrics_kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig" - -- name: Copy the admin client config(s) - command: > - cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ openshift_hosted_metrics_kubeconfig }} - changed_when: False - -- name: Set hosted metrics facts - openshift_facts: - role: hosted - openshift_env: "{{ hostvars - | oo_merge_hostvars(vars, inventory_hostname) - | oo_openshift_env }}" - openshift_env_structures: - - 'openshift.hosted.metrics.*' - -- set_fact: - metrics_persistence: "{{ openshift.hosted.metrics.storage_kind | default(none) is not none }}" - metrics_dynamic_vol: "{{ openshift.hosted.metrics.storage_kind | default(none) == 'dynamic' }}" - metrics_template_dir: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/examples/infrastructure-templates/{{ 'origin' if deployment_type == 'origin' else 'enterprise' }}" - image_prefix: "{{ '-v IMAGE_PREFIX=' ~ openshift.hosted.metrics.deployer.prefix if 'prefix' in openshift.hosted.metrics.deployer else '' }}" - image_version: "{{ '-v IMAGE_VERSION=' ~ openshift.hosted.metrics.deployer.version if 'version' in openshift.hosted.metrics.deployer else '' }}" - - -- name: Check for existing metrics pods - shell: > - {{ openshift.common.client_binary }} - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace openshift-infra - get pods -l {{ item }} | grep -q Running - register: metrics_pods_status - with_items: - - metrics-infra=hawkular-metrics - - metrics-infra=heapster - - metrics-infra=hawkular-cassandra - failed_when: false - changed_when: false - -- name: Check for previous deployer - shell: > - {{ openshift.common.client_binary }} - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace openshift-infra - get pods -l metrics-infra=deployer --sort-by='{.metadata.creationTimestamp}' | tail -1 | grep metrics-deployer- - register: metrics_deployer_status - failed_when: false - changed_when: false - -- name: Record current deployment status - set_fact: - greenfield: "{{ not metrics_deployer_status.rc == 0 }}" - failed_error: "{{ True if 'Error' in metrics_deployer_status.stdout else False }}" - metrics_running: "{{ metrics_pods_status.results | oo_collect(attribute='rc') == [0,0,0] }}" - -- name: Set deployment mode - set_fact: - deployment_mode: "{{ 'refresh' if (failed_error | bool or metrics_upgrade | bool) else 'deploy' }}" - -# TODO: handle non greenfield deployments in the future -- include: install.yml - when: greenfield - -- name: Delete temp directory - file: - name: "{{ mktemp.stdout }}" - state: absent - changed_when: False diff --git a/roles/openshift_hosted_metrics/vars/main.yaml b/roles/openshift_hosted_metrics/vars/main.yaml deleted file mode 100644 index 6c207d6ac..000000000 --- a/roles/openshift_hosted_metrics/vars/main.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- -hawkular_permission_oc_commands: - - policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer -n openshift-infra - - policy add-cluster-role-to-user cluster-admin system:serviceaccount:openshift-infra:heapster - -metrics_deployer_sa: - apiVersion: v1 - kind: ServiceAccount - metadata: - name: metrics-deployer - secrets: - - name: metrics-deployer - - -hawkular_tmp_conf: /tmp/hawkular_admin.kubeconfig - -hawkular_persistence: "{% if openshift.hosted.metrics.storage.kind != None %}true{% else %}false{% endif %}" - -hawkular_type: "{{ 'origin' if deployment_type == 'origin' else 'enterprise' }}" - -metrics_upgrade: openshift.hosted.metrics.upgrade | default(False) diff --git a/roles/openshift_loadbalancer/defaults/main.yml b/roles/openshift_loadbalancer/defaults/main.yml index 239b16427..f9c16ba40 100644 --- a/roles/openshift_loadbalancer/defaults/main.yml +++ b/roles/openshift_loadbalancer/defaults/main.yml @@ -26,6 +26,8 @@ r_openshift_loadbalancer_os_firewall_allow: port: "{{ nuage_mon_rest_server_port | default(9443) }}/tcp" cond: "{{ r_openshift_lb_use_nuage | bool }}" +openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" + # NOTE # r_openshift_lb_use_nuage_default may be defined external to this role. # openshift_use_nuage, if defined, may affect other roles or play behavior. diff --git a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 index 72182fcdd..57121447d 100644 --- a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 +++ b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 @@ -1,7 +1,7 @@ [Unit] -After={{ openshift.docker.service_name }}.service -Requires={{ openshift.docker.service_name }}.service -PartOf={{ openshift.docker.service_name }}.service +After={{ openshift_docker_service_name }}.service +Requires={{ openshift_docker_service_name }}.service +PartOf={{ openshift_docker_service_name }}.service [Service] ExecStartPre=-/usr/bin/docker rm -f openshift_loadbalancer @@ -14,4 +14,4 @@ Restart=always RestartSec=5s [Install] -WantedBy={{ openshift.docker.service_name }}.service +WantedBy={{ openshift_docker_service_name }}.service diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml index 497c6e0c5..2f1aa061f 100644 --- a/roles/openshift_logging/defaults/main.yml +++ b/roles/openshift_logging/defaults/main.yml @@ -28,7 +28,7 @@ openshift_logging_curator_ops_memory_limit: 256Mi openshift_logging_curator_ops_cpu_request: 100m openshift_logging_curator_ops_nodeselector: {} -openshift_logging_kibana_hostname: "{{ 'kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}" +openshift_logging_kibana_hostname: "{{ 'kibana.' ~ openshift_master_default_subdomain }}" openshift_logging_kibana_cpu_limit: null openshift_logging_kibana_memory_limit: 736Mi openshift_logging_kibana_cpu_request: 100m @@ -54,7 +54,7 @@ openshift_logging_kibana_key: "" #for the public facing kibana certs openshift_logging_kibana_ca: "" -openshift_logging_kibana_ops_hostname: "{{ 'kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}" +openshift_logging_kibana_ops_hostname: "{{ 'kibana-ops.' ~ openshift_master_default_subdomain }}" openshift_logging_kibana_ops_cpu_limit: null openshift_logging_kibana_ops_memory_limit: 736Mi openshift_logging_kibana_ops_cpu_request: 100m @@ -109,7 +109,7 @@ openshift_logging_es_config: {} # for exposing es to external (outside of the cluster) clients openshift_logging_es_allow_external: False -openshift_logging_es_hostname: "{{ 'es.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}" +openshift_logging_es_hostname: "{{ 'es.' ~ openshift_master_default_subdomain }}" #The absolute path on the control node to the cert file to use #for the public facing es certs @@ -145,7 +145,7 @@ openshift_logging_es_ops_nodeselector: {} # for exposing es-ops to external (outside of the cluster) clients openshift_logging_es_ops_allow_external: False -openshift_logging_es_ops_hostname: "{{ 'es-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}" +openshift_logging_es_ops_hostname: "{{ 'es-ops.' ~ openshift_master_default_subdomain }}" #The absolute path on the control node to the cert file to use #for the public facing es-ops certs @@ -165,7 +165,7 @@ openshift_logging_storage_access_modes: ['ReadWriteOnce'] # mux - secure_forward listener service openshift_logging_mux_allow_external: False openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}" -openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}" +openshift_logging_mux_hostname: "{{ 'mux.' ~ openshift_master_default_subdomain}}" openshift_logging_mux_port: 24284 openshift_logging_mux_cpu_limit: null openshift_logging_mux_memory_limit: 512Mi diff --git a/roles/openshift_logging/handlers/main.yml b/roles/openshift_logging/handlers/main.yml index 074b72942..e0329ee7c 100644 --- a/roles/openshift_logging/handlers/main.yml +++ b/roles/openshift_logging/handlers/main.yml @@ -1,7 +1,7 @@ --- - name: restart master api systemd: name={{ openshift.common.service_type }}-master-api state=restarted - when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + when: (not (master_api_service_status_changed | default(false) | bool)) notify: Verify API Server # We retry the controllers because the API may not be 100% initialized yet. @@ -11,7 +11,7 @@ delay: 5 register: result until: result.rc == 0 - when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + when: (not (master_controllers_service_status_changed | default(false) | bool)) - name: Verify API Server # Using curl here since the uri module requires python-httplib2 and diff --git a/roles/openshift_logging_curator/meta/main.yaml b/roles/openshift_logging_curator/meta/main.yaml index 6752fb7f9..d4635aab0 100644 --- a/roles/openshift_logging_curator/meta/main.yaml +++ b/roles/openshift_logging_curator/meta/main.yaml @@ -13,3 +13,4 @@ galaxy_info: - cloud dependencies: - role: lib_openshift +- role: openshift_facts diff --git a/roles/openshift_logging_elasticsearch/meta/main.yaml b/roles/openshift_logging_elasticsearch/meta/main.yaml index 097270772..6a9a6539c 100644 --- a/roles/openshift_logging_elasticsearch/meta/main.yaml +++ b/roles/openshift_logging_elasticsearch/meta/main.yaml @@ -13,3 +13,4 @@ galaxy_info: - cloud dependencies: - role: lib_openshift +- role: openshift_facts diff --git a/roles/openshift_logging_fluentd/meta/main.yaml b/roles/openshift_logging_fluentd/meta/main.yaml index 2003aacb2..89c98204f 100644 --- a/roles/openshift_logging_fluentd/meta/main.yaml +++ b/roles/openshift_logging_fluentd/meta/main.yaml @@ -13,3 +13,4 @@ galaxy_info: - cloud dependencies: - role: lib_openshift +- role: openshift_facts diff --git a/roles/openshift_logging_kibana/defaults/main.yml b/roles/openshift_logging_kibana/defaults/main.yml index 6cdf7c8f3..007089e00 100644 --- a/roles/openshift_logging_kibana/defaults/main.yml +++ b/roles/openshift_logging_kibana/defaults/main.yml @@ -10,7 +10,7 @@ openshift_logging_kibana_cpu_limit: null openshift_logging_kibana_cpu_request: 100m openshift_logging_kibana_memory_limit: 736Mi -openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" +openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ openshift_master_default_subdomain }}" openshift_logging_kibana_es_host: "logging-es" openshift_logging_kibana_es_port: 9200 diff --git a/roles/openshift_logging_kibana/meta/main.yaml b/roles/openshift_logging_kibana/meta/main.yaml index 89e08abc0..d97586a37 100644 --- a/roles/openshift_logging_kibana/meta/main.yaml +++ b/roles/openshift_logging_kibana/meta/main.yaml @@ -13,3 +13,4 @@ galaxy_info: - cloud dependencies: - role: lib_openshift +- role: openshift_facts diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml index cd15da939..1e6c501bf 100644 --- a/roles/openshift_logging_mux/defaults/main.yml +++ b/roles/openshift_logging_mux/defaults/main.yml @@ -28,7 +28,7 @@ openshift_logging_mux_journal_read_from_head: "{{ openshift_hosted_logging_journ openshift_logging_mux_allow_external: False openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}" -openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}" +openshift_logging_mux_hostname: "{{ 'mux.' ~ openshift_master_default_subdomain }}" openshift_logging_mux_port: 24284 # the namespace to use for undefined projects should come first, followed by any # additional namespaces to create by default - users will typically not need to set this diff --git a/roles/openshift_logging_mux/meta/main.yaml b/roles/openshift_logging_mux/meta/main.yaml index f40beb79d..f271d8d7d 100644 --- a/roles/openshift_logging_mux/meta/main.yaml +++ b/roles/openshift_logging_mux/meta/main.yaml @@ -13,3 +13,4 @@ galaxy_info: - cloud dependencies: - role: lib_openshift +- role: openshift_facts diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index 8e4a46ebb..38b2fd8b8 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -52,6 +52,8 @@ openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | containerized_svc_dir: "/usr/lib/systemd/system" ha_svc_template_path: "native-cluster" +openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" + # NOTE # r_openshift_master_*_default may be defined external to this role. # openshift_use_*, if defined, may affect other roles or play behavior. diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml index 359536202..e6b8b8ac8 100644 --- a/roles/openshift_master/handlers/main.yml +++ b/roles/openshift_master/handlers/main.yml @@ -5,7 +5,6 @@ state: restarted when: - not (master_api_service_status_changed | default(false) | bool) - - openshift.master.cluster_method == 'native' notify: - Verify API Server @@ -18,7 +17,6 @@ until: result.rc == 0 when: - not (master_controllers_service_status_changed | default(false) | bool) - - openshift.master.cluster_method == 'native' - name: Verify API Server # Using curl here since the uri module requires python-httplib2 and diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml index a1cda2ad4..bf0cbbf18 100644 --- a/roles/openshift_master/meta/main.yml +++ b/roles/openshift_master/meta/main.yml @@ -15,3 +15,4 @@ dependencies: - role: lib_openshift - role: lib_utils - role: lib_os_firewall +- role: openshift_facts diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index e52cd6231..5f4e6df71 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -11,25 +11,6 @@ - openshift_master_oauth_grant_method is defined - openshift_master_oauth_grant_method not in openshift_master_valid_grant_methods -# HA Variable Validation -- fail: - msg: "openshift_master_cluster_method must be set to either 'native' or 'pacemaker' for multi-master installations" - when: - - openshift.master.ha | bool - - (openshift.master.cluster_method is not defined) or (openshift.master.cluster_method is defined and openshift.master.cluster_method not in ["native", "pacemaker"]) -- fail: - msg: "openshift_master_cluster_password must be set for multi-master installations" - when: - - openshift.master.ha | bool - - openshift.master.cluster_method == "pacemaker" - - openshift_master_cluster_password is not defined or not openshift_master_cluster_password -- fail: - msg: "Pacemaker based HA is not supported at this time when used with containerized installs" - when: - - openshift.master.ha | bool - - openshift.master.cluster_method == "pacemaker" - - openshift.common.is_containerized | bool - - name: Open up firewall ports import_tasks: firewall.yml @@ -226,7 +207,6 @@ enabled: yes state: started when: - - openshift.master.cluster_method == 'native' - inventory_hostname == openshift_master_hosts[0] register: l_start_result until: not l_start_result | failed @@ -241,14 +221,12 @@ - set_fact: master_api_service_status_changed: "{{ l_start_result | changed }}" when: - - openshift.master.cluster_method == 'native' - inventory_hostname == openshift_master_hosts[0] - pause: seconds: 15 when: - openshift.master.ha | bool - - openshift.master.cluster_method == 'native' - name: Start and enable master api all masters systemd: @@ -256,7 +234,6 @@ enabled: yes state: started when: - - openshift.master.cluster_method == 'native' - inventory_hostname != openshift_master_hosts[0] register: l_start_result until: not l_start_result | failed @@ -271,14 +248,12 @@ - set_fact: master_api_service_status_changed: "{{ l_start_result | changed }}" when: - - openshift.master.cluster_method == 'native' - inventory_hostname != openshift_master_hosts[0] # A separate wait is required here for native HA since notifies will # be resolved after all tasks in the role. - include_tasks: check_master_api_is_ready.yml when: - - openshift.master.cluster_method == 'native' - master_api_service_status_changed | bool - name: Start and enable master controller service @@ -286,8 +261,6 @@ name: "{{ openshift.common.service_type }}-master-controllers" enabled: yes state: started - when: - - openshift.master.cluster_method == 'native' register: l_start_result until: not l_start_result | failed retries: 1 @@ -301,30 +274,6 @@ - name: Set fact master_controllers_service_status_changed set_fact: master_controllers_service_status_changed: "{{ l_start_result | changed }}" - when: - - openshift.master.cluster_method == 'native' - -- name: Install cluster packages - package: name=pcs state=present - when: - - openshift.master.cluster_method == 'pacemaker' - - not openshift.common.is_containerized | bool - register: l_install_result - until: l_install_result | success - -- name: Start and enable cluster service - systemd: - name: pcsd - enabled: yes - state: started - when: - - openshift.master.cluster_method == 'pacemaker' - - not openshift.common.is_containerized | bool - -- name: Set the cluster user password - shell: echo {{ openshift_master_cluster_password | quote }} | passwd --stdin hacluster - when: - - l_install_result | changed - name: node bootstrap settings include_tasks: bootstrap.yml diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml index c95f562d0..ca04d2243 100644 --- a/roles/openshift_master/tasks/registry_auth.yml +++ b/roles/openshift_master/tasks/registry_auth.yml @@ -33,7 +33,7 @@ - openshift_docker_alternative_creds | default(False) | bool - oreg_auth_user is defined - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool - register: master_oreg_auth_credentials_create + register: master_oreg_auth_credentials_create_alt notify: - restart master api - restart master controllers @@ -45,4 +45,8 @@ when: - openshift.common.is_containerized | bool - oreg_auth_user is defined - - (master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or master_oreg_auth_credentials_create.changed) | bool + - > + (master_oreg_auth_credentials_stat.stat.exists + or oreg_auth_credentials_replace + or master_oreg_auth_credentials_create.changed + or master_oreg_auth_credentials_create_alt.changed) | bool diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml index 9d11ed574..ee76413e3 100644 --- a/roles/openshift_master/tasks/systemd_units.yml +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -25,7 +25,6 @@ state: absent ignore_errors: true when: - - openshift.master.cluster_method == "native" - not l_is_master_system_container | bool # This is the image used for both HA and non-HA clusters: @@ -43,7 +42,6 @@ src: "{{ ha_svc_template_path }}/atomic-openshift-master-{{ item }}.service.j2" dest: "{{ containerized_svc_dir }}/{{ openshift.common.service_type }}-master-{{ item }}.service" when: - - openshift.master.cluster_method == "native" - not l_is_master_system_container | bool with_items: - api @@ -63,22 +61,17 @@ - api - controllers when: - - openshift.master.cluster_method == "native" - not l_is_master_system_container | bool - name: Preserve Master API Proxy Config options command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-api register: l_master_api_proxy - when: - - openshift.master.cluster_method == "native" failed_when: false changed_when: false - name: Preserve Master API AWS options command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-api register: master_api_aws - when: - - openshift.master.cluster_method == "native" failed_when: false changed_when: false @@ -87,14 +80,11 @@ src: "{{ ha_svc_template_path }}/atomic-openshift-master-api.j2" dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api backup: true - when: - - openshift.master.cluster_method == "native" notify: - restart master api - name: Restore Master API Proxy Config Options when: - - openshift.master.cluster_method == "native" - l_master_api_proxy.rc == 0 - "'http_proxy' not in openshift.common" - "'https_proxy' not in openshift.common" @@ -105,7 +95,6 @@ - name: Restore Master API AWS Options when: - - openshift.master.cluster_method == "native" - master_api_aws.rc == 0 - not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined) lineinfile: @@ -117,16 +106,12 @@ - name: Preserve Master Controllers Proxy Config options command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers register: master_controllers_proxy - when: - - openshift.master.cluster_method == "native" failed_when: false changed_when: false - name: Preserve Master Controllers AWS options command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers register: master_controllers_aws - when: - - openshift.master.cluster_method == "native" failed_when: false changed_when: false @@ -135,8 +120,6 @@ src: "{{ ha_svc_template_path }}/atomic-openshift-master-controllers.j2" dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers backup: true - when: - - openshift.master.cluster_method == "native" notify: - restart master controllers @@ -146,7 +129,6 @@ line: "{{ item }}" with_items: "{{ master_controllers_proxy.stdout_lines | default([]) }}" when: - - openshift.master.cluster_method == "native" - master_controllers_proxy.rc == 0 - "'http_proxy' not in openshift.common" - "'https_proxy' not in openshift.common" @@ -157,6 +139,5 @@ line: "{{ item }}" with_items: "{{ master_controllers_aws.stdout_lines | default([]) }}" when: - - openshift.master.cluster_method == "native" - master_controllers_aws.rc == 0 - not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined) diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 index 5d4a99c97..cec3d3fb1 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 @@ -4,9 +4,9 @@ Documentation=https://github.com/openshift/origin After=etcd_container.service Wants=etcd_container.service Before={{ openshift.common.service_type }}-node.service -After={{ openshift.docker.service_name }}.service -PartOf={{ openshift.docker.service_name }}.service -Requires={{ openshift.docker.service_name }}.service +After={{ openshift_docker_service_name }}.service +PartOf={{ openshift_docker_service_name }}.service +Requires={{ openshift_docker_service_name }}.service [Service] EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api @@ -33,5 +33,5 @@ Restart=always RestartSec=5s [Install] -WantedBy={{ openshift.docker.service_name }}.service +WantedBy={{ openshift_docker_service_name }}.service WantedBy={{ openshift.common.service_type }}-node.service diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 index f93f3b565..a0248151d 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 @@ -3,9 +3,9 @@ Description=Atomic OpenShift Master Controllers Documentation=https://github.com/openshift/origin Wants={{ openshift.common.service_type }}-master-api.service After={{ openshift.common.service_type }}-master-api.service -After={{ openshift.docker.service_name }}.service -Requires={{ openshift.docker.service_name }}.service -PartOf={{ openshift.docker.service_name }}.service +After={{ openshift_docker_service_name }}.service +Requires={{ openshift_docker_service_name }}.service +PartOf={{ openshift_docker_service_name }}.service [Service] EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers @@ -32,4 +32,4 @@ Restart=always RestartSec=5s [Install] -WantedBy={{ openshift.docker.service_name }}.service +WantedBy={{ openshift_docker_service_name }}.service diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index a0f00e545..92668b227 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -120,7 +120,7 @@ kubernetesMasterConfig: - application/vnd.kubernetes.protobuf {% endif %} controllerArguments: {{ openshift.master.controller_args | default(None) | to_padded_yaml( level=2 ) }} - masterCount: {{ openshift.master.master_count if openshift.master.cluster_method | default(None) == 'native' else 1 }} + masterCount: {{ openshift.master.master_count }} masterIP: {{ openshift.common.ip }} podEvictionTimeout: {{ openshift.master.pod_eviction_timeout | default("") }} proxyClientInfo: @@ -204,7 +204,7 @@ projectConfig: mcsLabelsPerProject: {{ osm_mcs_labels_per_project }} uidAllocatorRange: "{{ osm_uid_allocator_range }}" routingConfig: - subdomain: "{{ openshift_master_default_subdomain | default("") }}" + subdomain: "{{ openshift_master_default_subdomain }}" serviceAccountConfig: limitSecretReferences: {{ openshift_master_saconfig_limitsecretreferences | default(false) }} managedNames: diff --git a/roles/openshift_master_cluster/README.md b/roles/openshift_master_cluster/README.md deleted file mode 100644 index 58dd19ac3..000000000 --- a/roles/openshift_master_cluster/README.md +++ /dev/null @@ -1,34 +0,0 @@ -OpenShift Master Cluster -======================== - -TODO - -Requirements ------------- - -* Ansible 2.2 - -Role Variables --------------- - -TODO - -Dependencies ------------- - -TODO - -Example Playbook ----------------- - -TODO - -License -------- - -Apache License Version 2.0 - -Author Information ------------------- - -Jason DeTiberus (jdetiber@redhat.com) diff --git a/roles/openshift_master_cluster/meta/main.yml b/roles/openshift_master_cluster/meta/main.yml deleted file mode 100644 index c452b165e..000000000 --- a/roles/openshift_master_cluster/meta/main.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -galaxy_info: - author: Jason DeTiberus - description: - company: Red Hat, Inc. - license: Apache License, Version 2.0 - min_ansible_version: 2.2 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud - - system -dependencies: [] diff --git a/roles/openshift_master_cluster/tasks/configure.yml b/roles/openshift_master_cluster/tasks/configure.yml deleted file mode 100644 index 1b94598dd..000000000 --- a/roles/openshift_master_cluster/tasks/configure.yml +++ /dev/null @@ -1,43 +0,0 @@ ---- -- fail: - msg: This role requires that openshift_master_cluster_vip is set - when: openshift_master_cluster_vip is not defined or not openshift_master_cluster_vip -- fail: - msg: This role requires that openshift_master_cluster_public_vip is set - when: openshift_master_cluster_public_vip is not defined or not openshift_master_cluster_public_vip - -- name: Authenticate to the cluster - command: pcs cluster auth -u hacluster -p {{ openshift_master_cluster_password }} {{ omc_cluster_hosts }} - -- name: Create the cluster - command: pcs cluster setup --name openshift_master {{ omc_cluster_hosts }} - -- name: Start the cluster - command: pcs cluster start --all - -- name: Enable the cluster on all nodes - command: pcs cluster enable --all - -- name: Set default resource stickiness - command: pcs resource defaults resource-stickiness=100 - -- name: Add the cluster VIP resource - command: pcs resource create virtual-ip IPaddr2 ip={{ openshift_master_cluster_vip }} --group {{ openshift.common.service_type }}-master - -- name: Add the cluster public VIP resource - command: pcs resource create virtual-ip IPaddr2 ip={{ openshift_master_cluster_public_vip }} --group {{ openshift.common.service_type }}-master - when: openshift_master_cluster_public_vip != openshift_master_cluster_vip - -- name: Add the cluster master service resource - command: pcs resource create master systemd:{{ openshift.common.service_type }}-master op start timeout=90s stop timeout=90s --group {{ openshift.common.service_type }}-master - -- name: Disable stonith - command: pcs property set stonith-enabled=false - -- name: Wait for the clustered master service to be available - wait_for: - host: "{{ openshift_master_cluster_vip }}" - port: "{{ openshift.master.api_port }}" - state: started - timeout: 180 - delay: 90 diff --git a/roles/openshift_master_cluster/tasks/main.yml b/roles/openshift_master_cluster/tasks/main.yml deleted file mode 100644 index 41bfc72cb..000000000 --- a/roles/openshift_master_cluster/tasks/main.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -- fail: - msg: "Not possible on atomic hosts for now" - when: openshift.common.is_containerized | bool - -- name: Test if cluster is already configured - command: pcs status - register: pcs_status - changed_when: false - failed_when: false - when: openshift.master.cluster_method == "pacemaker" - -- include_tasks: configure.yml - when: "pcs_status | failed and 'Error: cluster is not currently running on this node' in pcs_status.stderr" diff --git a/roles/openshift_master_facts/defaults/main.yml b/roles/openshift_master_facts/defaults/main.yml index d0dcdae4b..a89f48afa 100644 --- a/roles/openshift_master_facts/defaults/main.yml +++ b/roles/openshift_master_facts/defaults/main.yml @@ -1,5 +1,4 @@ --- -openshift_master_default_subdomain: "router.default.svc.cluster.local" openshift_master_admission_plugin_config: openshift.io/ImagePolicy: configuration: diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py index c827f2d26..ff15f693b 100644 --- a/roles/openshift_master_facts/filter_plugins/openshift_master.py +++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py @@ -485,31 +485,6 @@ class FilterModule(object): Dumper=AnsibleDumper)) @staticmethod - def validate_pcs_cluster(data, masters=None): - ''' Validates output from "pcs status", ensuring that each master - provided is online. - Ex: data = ('...', - 'PCSD Status:', - 'master1.example.com: Online', - 'master2.example.com: Online', - 'master3.example.com: Online', - '...') - masters = ['master1.example.com', - 'master2.example.com', - 'master3.example.com'] - returns True - ''' - if not issubclass(type(data), string_types): - raise errors.AnsibleFilterError("|failed expects data is a string or unicode") - if not issubclass(type(masters), list): - raise errors.AnsibleFilterError("|failed expects masters is a list") - valid = True - for master in masters: - if "{0}: Online".format(master) not in data: - valid = False - return valid - - @staticmethod def certificates_to_synchronize(hostvars, include_keys=True, include_ca=True): ''' Return certificates to synchronize based on facts. ''' if not issubclass(type(hostvars), dict): @@ -553,6 +528,5 @@ class FilterModule(object): def filters(self): ''' returns a mapping of filters to methods ''' return {"translate_idps": self.translate_idps, - "validate_pcs_cluster": self.validate_pcs_cluster, "certificates_to_synchronize": self.certificates_to_synchronize, "oo_htpasswd_users_from_file": self.oo_htpasswd_users_from_file} diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml index 20cc5358e..0cb87dcaa 100644 --- a/roles/openshift_master_facts/tasks/main.yml +++ b/roles/openshift_master_facts/tasks/main.yml @@ -1,14 +1,8 @@ --- -# Ensure the default sub-domain is set: -- name: Migrate legacy osm_default_subdomain fact - set_fact: - openshift_master_default_subdomain: "{{ osm_default_subdomain | default(None) }}" - when: openshift_master_default_subdomain is not defined - - name: Verify required variables are set fail: msg: openshift_master_default_subdomain must be set to deploy metrics - when: openshift_hosted_metrics_deploy | default(false) | bool and openshift_master_default_subdomain | default("") == "" + when: openshift_hosted_metrics_deploy | default(false) | bool and openshift_master_default_subdomain == "" # NOTE: These metrics variables are unfortunately needed by both the master and the metrics roles # to properly configure the master-config.yaml file. @@ -20,7 +14,7 @@ - name: Set g_metrics_hostname set_fact: g_metrics_hostname: "{{ openshift_hosted_metrics_public_url - | default('hawkular-metrics.' ~ (openshift_master_default_subdomain)) + | default('hawkular-metrics.' ~ openshift_master_default_subdomain) | oo_hostname_from_url }}" - set_fact: @@ -31,7 +25,6 @@ openshift_facts: role: master local_facts: - cluster_method: "{{ openshift_master_cluster_method | default('native') }}" cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}" cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}" api_port: "{{ openshift_master_api_port | default(None) }}" diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_metrics/handlers/main.yml index 074b72942..e0329ee7c 100644 --- a/roles/openshift_metrics/handlers/main.yml +++ b/roles/openshift_metrics/handlers/main.yml @@ -1,7 +1,7 @@ --- - name: restart master api systemd: name={{ openshift.common.service_type }}-master-api state=restarted - when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + when: (not (master_api_service_status_changed | default(false) | bool)) notify: Verify API Server # We retry the controllers because the API may not be 100% initialized yet. @@ -11,7 +11,7 @@ delay: 5 register: result until: result.rc == 0 - when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + when: (not (master_controllers_service_status_changed | default(false) | bool)) - name: Verify API Server # Using curl here since the uri module requires python-httplib2 and diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index 5a0c09f5c..f3867fe4a 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -101,8 +101,11 @@ oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_ur oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker" oreg_auth_credentials_replace: False l_bind_docker_reg_auth: False +openshift_use_crio: False openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False)) or (openshift_use_crio_only | default(False)) }}" +openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" + # NOTE # r_openshift_node_*_default may be defined external to this role. # openshift_use_*, if defined, may affect other roles or play behavior. diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml index 927d107c6..70057c7f3 100644 --- a/roles/openshift_node/meta/main.yml +++ b/roles/openshift_node/meta/main.yml @@ -17,7 +17,6 @@ dependencies: - role: lib_openshift - role: lib_os_firewall when: not (openshift_node_upgrade_in_progress | default(False)) -- role: openshift_docker - role: openshift_cloud_provider when: not (openshift_node_upgrade_in_progress | default(False)) - role: lib_utils diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index d46b1f9c3..e60d96760 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -4,7 +4,7 @@ when: - (not ansible_selinux or ansible_selinux.status != 'enabled') - deployment_type == 'openshift-enterprise' - - not openshift_use_crio | default(false) + - not openshift_use_crio - include: dnsmasq.yml @@ -49,7 +49,7 @@ name: cri-o enabled: yes state: restarted - when: openshift_use_crio | default(false) + when: openshift_use_crio - name: restart NetworkManager to ensure resolv.conf is present systemd: diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml index 73dc9e130..eb8d9a6a5 100644 --- a/roles/openshift_node/tasks/node_system_container.yml +++ b/roles/openshift_node/tasks/node_system_container.yml @@ -16,6 +16,6 @@ image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}" values: - "DNS_DOMAIN={{ openshift.common.dns_domain }}" - - "DOCKER_SERVICE={{ openshift.docker.service_name }}.service" + - "DOCKER_SERVICE={{ openshift_docker_service_name }}.service" - "MASTER_SERVICE={{ openshift.common.service_type }}.service" state: latest diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml index 8c3548475..d33e172c1 100644 --- a/roles/openshift_node/tasks/openvswitch_system_container.yml +++ b/roles/openshift_node/tasks/openvswitch_system_container.yml @@ -1,14 +1,11 @@ --- - set_fact: - l_use_crio: "{{ openshift_use_crio | default(false) }}" - -- set_fact: l_service_name: "cri-o" - when: l_use_crio + when: openshift_use_crio - set_fact: - l_service_name: "{{ openshift.docker.service_name }}" - when: not l_use_crio + l_service_name: "{{ openshift_docker_service_name }}" + when: not openshift_use_crio - name: Ensure proxies are in the atomic.conf include_role: diff --git a/roles/openshift_node/tasks/registry_auth.yml b/roles/openshift_node/tasks/registry_auth.yml index f5428867a..ab43ec049 100644 --- a/roles/openshift_node/tasks/registry_auth.yml +++ b/roles/openshift_node/tasks/registry_auth.yml @@ -32,7 +32,7 @@ - openshift_docker_alternative_creds | bool - oreg_auth_user is defined - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool - register: node_oreg_auth_credentials_create + register: node_oreg_auth_credentials_create_alt notify: - restart node @@ -43,4 +43,8 @@ when: - openshift.common.is_containerized | bool - oreg_auth_user is defined - - (node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or node_oreg_auth_credentials_create.changed) | bool + - > + (node_oreg_auth_credentials_stat.stat.exists + or oreg_auth_credentials_replace + or node_oreg_auth_credentials_create.changed + or node_oreg_auth_credentials_create_alt.changed) | bool diff --git a/roles/openshift_node/tasks/upgrade/restart.yml b/roles/openshift_node/tasks/upgrade/restart.yml index a4fa51172..3f1abceab 100644 --- a/roles/openshift_node/tasks/upgrade/restart.yml +++ b/roles/openshift_node/tasks/upgrade/restart.yml @@ -13,19 +13,15 @@ - name: Reload systemd to ensure latest unit files command: systemctl daemon-reload -- name: Restart docker +- name: Restart container runtime service: - name: "{{ openshift.docker.service_name }}" + name: "{{ openshift_docker_service_name }}" state: started register: docker_start_result until: not docker_start_result | failed retries: 3 delay: 30 -- name: Update docker facts - openshift_facts: - role: docker - - name: Start services service: name={{ item }} state=started with_items: diff --git a/roles/openshift_node/templates/node.service.j2 b/roles/openshift_node/templates/node.service.j2 index 7602d8ee6..da751bd65 100644 --- a/roles/openshift_node/templates/node.service.j2 +++ b/roles/openshift_node/templates/node.service.j2 @@ -1,14 +1,14 @@ [Unit] Description=OpenShift Node -After={{ openshift.docker.service_name }}.service +After={{ openshift_docker_service_name }}.service Wants=openvswitch.service After=ovsdb-server.service After=ovs-vswitchd.service -Wants={{ openshift.docker.service_name }}.service +Wants={{ openshift_docker_service_name }}.service Documentation=https://github.com/openshift/origin Requires=dnsmasq.service After=dnsmasq.service -{% if openshift_use_crio|default(false) %}Wants=cri-o.service{% endif %} +{% if openshift_use_crio %}Wants=cri-o.service{% endif %} [Service] Type=notify diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index d452cc45c..16fdde02e 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -16,7 +16,7 @@ imageConfig: latest: {{ openshift_node_image_config_latest }} kind: NodeConfig kubeletArguments: {{ openshift.node.kubelet_args | default(None) | to_padded_yaml(level=1) }} -{% if openshift_use_crio | default(False) %} +{% if openshift_use_crio %} container-runtime: - remote container-runtime-endpoint: diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service index fa7238849..5964ac095 100644 --- a/roles/openshift_node/templates/openshift.docker.node.dep.service +++ b/roles/openshift_node/templates/openshift.docker.node.dep.service @@ -1,9 +1,9 @@ [Unit] -Requires={{ openshift.docker.service_name }}.service -After={{ openshift.docker.service_name }}.service +Requires={{ openshift_docker_service_name }}.service +After={{ openshift_docker_service_name }}.service PartOf={{ openshift.common.service_type }}-node.service Before={{ openshift.common.service_type }}-node.service -{% if openshift_use_crio|default(false) %}Wants=cri-o.service{% endif %} +{% if openshift_use_crio %}Wants=cri-o.service{% endif %} [Service] ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi" diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service index 561aa01f4..3b33ca542 100644 --- a/roles/openshift_node/templates/openshift.docker.node.service +++ b/roles/openshift_node/templates/openshift.docker.node.service @@ -1,9 +1,9 @@ [Unit] After={{ openshift.common.service_type }}-master.service -After={{ openshift.docker.service_name }}.service +After={{ openshift_docker_service_name }}.service After=openvswitch.service -PartOf={{ openshift.docker.service_name }}.service -Requires={{ openshift.docker.service_name }}.service +PartOf={{ openshift_docker_service_name }}.service +Requires={{ openshift_docker_service_name }}.service {% if openshift_node_use_openshift_sdn %} Wants=openvswitch.service PartOf=openvswitch.service @@ -26,7 +26,7 @@ ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \ --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node \ -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} \ -e HOST=/rootfs -e HOST_ETC=/host-etc \ - -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} \ + -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}:rslave \ -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node \ {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \ -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro \ @@ -48,4 +48,4 @@ Restart=always RestartSec=5s [Install] -WantedBy={{ openshift.docker.service_name }}.service +WantedBy={{ openshift_docker_service_name }}.service diff --git a/roles/openshift_node/templates/openvswitch.docker.service b/roles/openshift_node/templates/openvswitch.docker.service index 34aaaabd6..37f091c76 100644 --- a/roles/openshift_node/templates/openvswitch.docker.service +++ b/roles/openshift_node/templates/openvswitch.docker.service @@ -1,7 +1,7 @@ [Unit] -After={{ openshift.docker.service_name }}.service -Requires={{ openshift.docker.service_name }}.service -PartOf={{ openshift.docker.service_name }}.service +After={{ openshift_docker_service_name }}.service +Requires={{ openshift_docker_service_name }}.service +PartOf={{ openshift_docker_service_name }}.service [Service] EnvironmentFile=/etc/sysconfig/openvswitch @@ -14,4 +14,4 @@ Restart=always RestartSec=5s [Install] -WantedBy={{ openshift.docker.service_name }}.service +WantedBy={{ openshift_docker_service_name }}.service diff --git a/roles/openshift_node_certificates/defaults/main.yml b/roles/openshift_node_certificates/defaults/main.yml index 455f26f30..b42b75be9 100644 --- a/roles/openshift_node_certificates/defaults/main.yml +++ b/roles/openshift_node_certificates/defaults/main.yml @@ -1,3 +1,5 @@ --- openshift_node_cert_expire_days: 730 openshift_ca_host: '' + +openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" diff --git a/roles/openshift_node_certificates/handlers/main.yml b/roles/openshift_node_certificates/handlers/main.yml index ef66bf9ca..0686ac101 100644 --- a/roles/openshift_node_certificates/handlers/main.yml +++ b/roles/openshift_node_certificates/handlers/main.yml @@ -6,7 +6,7 @@ - name: check for container runtime after updating ca trust command: > - systemctl -q is-active {{ openshift.docker.service_name }}.service + systemctl -q is-active {{ openshift_docker_service_name }}.service register: l_docker_installed # An rc of 0 indicates that the container runtime service is # running. We will restart it by notifying the restart handler since @@ -18,7 +18,7 @@ - name: restart container runtime after updating ca trust systemd: - name: "{{ openshift.docker.service_name }}" + name: "{{ openshift_docker_service_name }}" state: restarted when: not openshift_certificates_redeploy | default(false) | bool register: l_docker_restart_docker_in_cert_result diff --git a/roles/openshift_node_facts/tasks/main.yml b/roles/openshift_node_facts/tasks/main.yml index d33d09980..c234a3000 100644 --- a/roles/openshift_node_facts/tasks/main.yml +++ b/roles/openshift_node_facts/tasks/main.yml @@ -15,7 +15,6 @@ kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}" labels: "{{ openshift_node_labels | default(None) }}" registry_url: "{{ oreg_url_node | default(oreg_url) | default(None) }}" - sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}" set_node_ip: "{{ openshift_set_node_ip | default(None) }}" node_image: "{{ osn_image | default(None) }}" diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml index 5f182e0d6..e72e7a9df 100644 --- a/roles/openshift_openstack/defaults/main.yml +++ b/roles/openshift_openstack/defaults/main.yml @@ -4,7 +4,6 @@ openshift_openstack_stack_state: 'present' openshift_openstack_ssh_ingress_cidr: 0.0.0.0/0 openshift_openstack_node_ingress_cidr: 0.0.0.0/0 openshift_openstack_lb_ingress_cidr: 0.0.0.0/0 -openshift_openstack_bastion_ingress_cidr: 0.0.0.0/0 openshift_openstack_num_etcd: 0 openshift_openstack_num_masters: 1 openshift_openstack_num_nodes: 1 @@ -91,6 +90,4 @@ openshift_openstack_node_volume_size: "{{ openshift_openstack_docker_volume_size openshift_openstack_etcd_volume_size: 2 openshift_openstack_dns_volume_size: 1 openshift_openstack_lb_volume_size: 5 -openshift_openstack_use_bastion: false -openshift_openstack_ui_ssh_tunnel: false openshift_openstack_ephemeral_volumes: false diff --git a/roles/openshift_openstack/tasks/populate-dns.yml b/roles/openshift_openstack/tasks/populate-dns.yml index c03aceb94..b5c4da532 100644 --- a/roles/openshift_openstack/tasks/populate-dns.yml +++ b/roles/openshift_openstack/tasks/populate-dns.yml @@ -63,15 +63,6 @@ when: - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined - openshift_openstack_num_masters == 1 - - not openshift_openstack_use_bastion|bool - -- name: "Add public master cluster hostname records to the public A records (single master behind a bastion)" - set_fact: - public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(openshift_openstack_full_dns_domain, ''))[:-1], 'ip': hostvars[groups.bastions[0]].public_v4 } ] }}" - when: - - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined - - openshift_openstack_num_masters == 1 - - openshift_openstack_use_bastion|bool - name: "Add public master cluster hostname records to the public A records (multi-master)" set_fact: diff --git a/roles/openshift_openstack/templates/heat_stack.yaml.j2 b/roles/openshift_openstack/templates/heat_stack.yaml.j2 index 0e7538629..ec488941e 100644 --- a/roles/openshift_openstack/templates/heat_stack.yaml.j2 +++ b/roles/openshift_openstack/templates/heat_stack.yaml.j2 @@ -72,7 +72,7 @@ outputs: {% endif %} conditions: - no_floating: {% if openshift_openstack_provider_network_name or openshift_openstack_use_bastion|bool %}true{% else %}false{% endif %} + no_floating: {% if openshift_openstack_provider_network_name %}true{% else %}false{% endif %} resources: @@ -180,13 +180,6 @@ resources: port_range_min: 22 port_range_max: 22 remote_ip_prefix: {{ openshift_openstack_ssh_ingress_cidr }} -{% if openshift_openstack_use_bastion|bool %} - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: {{ openshift_openstack_bastion_ingress_cidr }} -{% endif %} - direction: ingress protocol: icmp remote_ip_prefix: {{ openshift_openstack_ssh_ingress_cidr }} @@ -480,7 +473,7 @@ resources: remote_ip_prefix: "{{ openshift_openstack_subnet_prefix }}.0/24" {% endif %} -{% if openshift_openstack_num_masters|int > 1 or openshift_openstack_ui_ssh_tunnel|bool %} +{% if openshift_openstack_num_masters|int > 1 %} lb-secgrp: type: OS::Neutron::SecurityGroup properties: @@ -491,20 +484,13 @@ resources: protocol: tcp port_range_min: {{ openshift_master_api_port | default(8443) }} port_range_max: {{ openshift_master_api_port | default(8443) }} - remote_ip_prefix: {{ openshift_openstack_lb_ingress_cidr | default(openshift_openstack_bastion_ingress_cidr) }} -{% if openshift_openstack_ui_ssh_tunnel|bool %} - - direction: ingress - protocol: tcp - port_range_min: {{ openshift_master_api_port | default(8443) }} - port_range_max: {{ openshift_master_api_port | default(8443) }} - remote_ip_prefix: {{ openshift_openstack_ssh_ingress_cidr }} -{% endif %} + remote_ip_prefix: {{ openshift_openstack_lb_ingress_cidr }} {% if openshift_master_console_port is defined and openshift_master_console_port != openshift_master_api_port %} - direction: ingress protocol: tcp port_range_min: {{ openshift_master_console_port | default(8443) }} port_range_max: {{ openshift_master_console_port | default(8443) }} - remote_ip_prefix: {{ openshift_openstack_lb_ingress_cidr | default(openshift_openstack_bastion_ingress_cidr) }} + remote_ip_prefix: {{ openshift_openstack_lb_ingress_cidr }} {% endif %} {% endif %} @@ -553,7 +539,7 @@ resources: - no_floating - null - {{ openshift_openstack_external_network_name }} -{% if openshift_openstack_use_bastion|bool or openshift_openstack_provider_network_name %} +{% if openshift_openstack_provider_network_name %} attach_float_net: false {% endif %} volume_size: {{ openshift_openstack_etcd_volume_size }} @@ -685,7 +671,7 @@ resources: - no_floating - null - {{ openshift_openstack_external_network_name }} -{% if openshift_openstack_use_bastion|bool or openshift_openstack_provider_network_name %} +{% if openshift_openstack_provider_network_name %} attach_float_net: false {% endif %} volume_size: {{ openshift_openstack_master_volume_size }} @@ -755,7 +741,7 @@ resources: - no_floating - null - {{ openshift_openstack_external_network_name }} -{% if openshift_openstack_use_bastion|bool or openshift_openstack_provider_network_name %} +{% if openshift_openstack_provider_network_name %} attach_float_net: false {% endif %} volume_size: {{ openshift_openstack_node_volume_size }} @@ -818,9 +804,6 @@ resources: {% else %} - { get_resource: node-secgrp } {% endif %} -{% if openshift_openstack_ui_ssh_tunnel|bool and openshift_openstack_num_masters|int < 2 %} - - { get_resource: lb-secgrp } -{% endif %} - { get_resource: infra-secgrp } - { get_resource: common-secgrp } {% if not openshift_openstack_provider_network_name %} diff --git a/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py b/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py new file mode 100644 index 000000000..8046aff23 --- /dev/null +++ b/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py @@ -0,0 +1,159 @@ +""" +Ansible action plugin to generate pv and pvc dictionaries lists +""" + +from ansible.plugins.action import ActionBase +from ansible import errors + + +class ActionModule(ActionBase): + """Action plugin to execute health checks.""" + + def get_templated(self, var_to_template): + """Return a properly templated ansible variable""" + return self._templar.template(self.task_vars.get(var_to_template)) + + def build_common(self, varname=None): + """Retrieve common variables for each pv and pvc type""" + volume = self.get_templated(str(varname) + '_volume_name') + size = self.get_templated(str(varname) + '_volume_size') + labels = self.task_vars.get(str(varname) + '_labels') + if labels: + labels = self._templar.template(labels) + else: + labels = dict() + access_modes = self.get_templated(str(varname) + '_access_modes') + return (volume, size, labels, access_modes) + + def build_pv_nfs(self, varname=None): + """Build pv dictionary for nfs storage type""" + host = self.task_vars.get(str(varname) + '_host') + if host: + self._templar.template(host) + elif host is None: + groups = self.task_vars.get('groups') + default_group_name = self.get_templated('openshift_persistent_volumes_default_nfs_group') + if groups and default_group_name and default_group_name in groups and len(groups[default_group_name]) > 0: + host = groups['oo_nfs_to_config'][0] + else: + raise errors.AnsibleModuleError("|failed no storage host detected") + volume, size, labels, access_modes = self.build_common(varname=varname) + directory = self.get_templated(str(varname) + '_nfs_directory') + path = directory + '/' + volume + return dict( + name="{0}-volume".format(volume), + capacity=size, + labels=labels, + access_modes=access_modes, + storage=dict( + nfs=dict( + server=host, + path=path))) + + def build_pv_openstack(self, varname=None): + """Build pv dictionary for openstack storage type""" + volume, size, labels, access_modes = self.build_common(varname=varname) + filesystem = self.get_templated(str(varname) + '_openstack_filesystem') + volume_id = self.get_templated(str(varname) + '_openstack_volumeID') + return dict( + name="{0}-volume".format(volume), + capacity=size, + labels=labels, + access_modes=access_modes, + storage=dict( + cinder=dict( + fsType=filesystem, + volumeID=volume_id))) + + def build_pv_glusterfs(self, varname=None): + """Build pv dictionary for glusterfs storage type""" + volume, size, labels, access_modes = self.build_common(varname=varname) + endpoints = self.get_templated(str(varname) + '_glusterfs_endpoints') + path = self.get_templated(str(varname) + '_glusterfs_path') + read_only = self.get_templated(str(varname) + '_glusterfs_readOnly') + return dict( + name="{0}-volume".format(volume), + capacity=size, + labels=labels, + access_modes=access_modes, + storage=dict( + glusterfs=dict( + endpoints=endpoints, + path=path, + readOnly=read_only))) + + def build_pv_dict(self, varname=None): + """Check for the existence of PV variables""" + kind = self.task_vars.get(str(varname) + '_kind') + if kind: + kind = self._templar.template(kind) + create_pv = self.task_vars.get(str(varname) + '_create_pv') + if create_pv and self._templar.template(create_pv): + if kind == 'nfs': + persistent_volume = self.build_pv_nfs(varname=varname) + + elif kind == 'openstack': + persistent_volume = self.build_pv_openstack(varname=varname) + + elif kind == 'glusterfs': + persistent_volume = self.build_pv_glusterfs(varname=varname) + + elif not (kind == 'object' or kind == 'dynamic'): + msg = "|failed invalid storage kind '{0}' for component '{1}'".format( + kind, + varname) + raise errors.AnsibleModuleError(msg) + + return persistent_volume + return None + + def build_pvc_dict(self, varname=None): + """Check for the existence of PVC variables""" + kind = self.task_vars.get(str(varname) + '_kind') + if kind: + kind = self._templar.template(kind) + create_pv = self.task_vars.get(str(varname) + '_create_pv') + if create_pv: + create_pv = self._templar.template(create_pv) + create_pvc = self.task_vars.get(str(varname) + '_create_pvc') + if create_pvc: + create_pvc = self._templar.template(create_pvc) + if kind != 'object' and create_pv and create_pvc: + volume, size, _, access_modes = self.build_common(varname=varname) + return dict( + name="{0}-claim".format(volume), + capacity=size, + access_modes=access_modes) + return None + + def run(self, tmp=None, task_vars=None): + """Run generate_pv_pvcs_list action plugin""" + result = super(ActionModule, self).run(tmp, task_vars) + # Ignore settting self.task_vars outside of init. + # pylint: disable=W0201 + self.task_vars = task_vars or {} + + result["changed"] = False + result["failed"] = False + result["msg"] = "persistent_volumes list and persistent_volume_claims list created" + vars_to_check = ['openshift_hosted_registry_storage', + 'openshift_hosted_router_storage', + 'openshift_hosted_etcd_storage', + 'openshift_logging_storage', + 'openshift_loggingops_storage', + 'openshift_metrics_storage', + 'openshift_prometheus_storage', + 'openshift_prometheus_alertmanager_storage', + 'openshift_prometheus_alertbuffer_storage'] + persistent_volumes = [] + persistent_volume_claims = [] + for varname in vars_to_check: + pv_dict = self.build_pv_dict(varname) + if pv_dict: + persistent_volumes.append(pv_dict) + pvc_dict = self.build_pvc_dict(varname) + if pvc_dict: + persistent_volume_claims.append(pvc_dict) + result["persistent_volumes"] = persistent_volumes + result["persistent_volume_claims"] = persistent_volume_claims + return result diff --git a/roles/openshift_persistent_volumes/defaults/main.yml b/roles/openshift_persistent_volumes/defaults/main.yml new file mode 100644 index 000000000..b16e164e6 --- /dev/null +++ b/roles/openshift_persistent_volumes/defaults/main.yml @@ -0,0 +1,9 @@ +--- + +openshift_persistent_volumes_default_nfs_group: 'oo_nfs_to_config' + +openshift_persistent_volume_extras: [] +openshift_persistent_volume_claims_extras: [] + +glusterfs_pv: [] +glusterfs_pvc: [] diff --git a/roles/openshift_persistent_volumes/meta/main.yml b/roles/openshift_persistent_volumes/meta/main.yml index 19e9a56b7..48b0699ab 100644 --- a/roles/openshift_persistent_volumes/meta/main.yml +++ b/roles/openshift_persistent_volumes/meta/main.yml @@ -9,4 +9,5 @@ galaxy_info: - name: EL versions: - 7 -dependencies: {} +dependencies: +- role: openshift_facts diff --git a/roles/openshift_persistent_volumes/tasks/main.yml b/roles/openshift_persistent_volumes/tasks/main.yml index e431e978c..0b4dd7d1f 100644 --- a/roles/openshift_persistent_volumes/tasks/main.yml +++ b/roles/openshift_persistent_volumes/tasks/main.yml @@ -9,39 +9,36 @@ cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig changed_when: False -- name: Deploy PersistentVolume definitions - template: - dest: "{{ mktemp.stdout }}/persistent-volumes.yml" - src: persistent-volume.yml.j2 - when: persistent_volumes | length > 0 - changed_when: False +- set_fact: + glusterfs_pv: + - name: "{{ openshift_hosted_registry_storage_volume_name }}-glusterfs-volume" + capacity: "{{ openshift_hosted_registry_storage_volume_size }}" + access_modes: "{{ openshift_hosted_registry_storage_access_modes }}" + storage: + glusterfs: + endpoints: "{{ openshift_hosted_registry_storage_glusterfs_endpoints }}" + path: "{{ openshift_hosted_registry_storage_glusterfs_path }}" + readOnly: "{{ openshift_hosted_registry_storage_glusterfs_readOnly }}" + glusterfs_pvc: + - name: "{{ openshift_hosted_registry_storage_volume_name }}-glusterfs-claim" + capacity: "{{ openshift_hosted_registry_storage_volume_size }}" + access_modes: "{{ openshift_hosted_registry_storage_access_modes }}" + when: openshift_hosted_registry_storage_glusterfs_swap | default(False) -- name: Create PersistentVolumes - command: > - {{ openshift.common.client_binary }} create - -f {{ mktemp.stdout }}/persistent-volumes.yml - --config={{ mktemp.stdout }}/admin.kubeconfig - register: pv_create_output - when: persistent_volumes | length > 0 - failed_when: ('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout) - changed_when: ('created' in pv_create_output.stdout) +- name: create standard pv and pvc lists + # generate_pv_pvcs_list is a custom action module defined in ../action_plugins + generate_pv_pvcs_list: {} + register: l_pv_pvcs_list -- name: Deploy PersistentVolumeClaim definitions - template: - dest: "{{ mktemp.stdout }}/persistent-volume-claims.yml" - src: persistent-volume-claim.yml.j2 - when: persistent_volume_claims | length > 0 - changed_when: False +- include_tasks: pv.yml + vars: + l_extra_persistent_volumes: "{{ openshift_persistent_volume_extras | union(glusterfs_pv) }}" + persistent_volumes: "{{ l_pv_pvcs_list.persistent_volumes | union(l_extra_persistent_volumes) }}" -- name: Create PersistentVolumeClaims - command: > - {{ openshift.common.client_binary }} create - -f {{ mktemp.stdout }}/persistent-volume-claims.yml - --config={{ mktemp.stdout }}/admin.kubeconfig - register: pvc_create_output - when: persistent_volume_claims | length > 0 - failed_when: ('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout) - changed_when: ('created' in pvc_create_output.stdout) +- include_tasks: pvc.yml + vars: + l_extra_persistent_volume_claims: "{{ openshift_persistent_volume_claims_extras | union(glusterfs_pvc) }}" + persistent_volume_claims: "{{ l_pv_pvcs_list.persistent_volume_claims | union(l_extra_persistent_volume_claims) }}" - name: Delete temp directory file: diff --git a/roles/openshift_persistent_volumes/tasks/pv.yml b/roles/openshift_persistent_volumes/tasks/pv.yml new file mode 100644 index 000000000..346605ff7 --- /dev/null +++ b/roles/openshift_persistent_volumes/tasks/pv.yml @@ -0,0 +1,17 @@ +--- +- name: Deploy PersistentVolume definitions + template: + dest: "{{ mktemp.stdout }}/persistent-volumes.yml" + src: persistent-volume.yml.j2 + when: persistent_volumes | length > 0 + changed_when: False + +- name: Create PersistentVolumes + command: > + {{ openshift.common.client_binary }} create + -f {{ mktemp.stdout }}/persistent-volumes.yml + --config={{ mktemp.stdout }}/admin.kubeconfig + register: pv_create_output + when: persistent_volumes | length > 0 + failed_when: ('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout) + changed_when: ('created' in pv_create_output.stdout) diff --git a/roles/openshift_persistent_volumes/tasks/pvc.yml b/roles/openshift_persistent_volumes/tasks/pvc.yml new file mode 100644 index 000000000..e44f9b18f --- /dev/null +++ b/roles/openshift_persistent_volumes/tasks/pvc.yml @@ -0,0 +1,17 @@ +--- +- name: Deploy PersistentVolumeClaim definitions + template: + dest: "{{ mktemp.stdout }}/persistent-volume-claims.yml" + src: persistent-volume-claim.yml.j2 + when: persistent_volume_claims | length > 0 + changed_when: False + +- name: Create PersistentVolumeClaims + command: > + {{ openshift.common.client_binary }} create + -f {{ mktemp.stdout }}/persistent-volume-claims.yml + --config={{ mktemp.stdout }}/admin.kubeconfig + register: pvc_create_output + when: persistent_volume_claims | length > 0 + failed_when: ('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout) + changed_when: ('created' in pvc_create_output.stdout) diff --git a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 index ee9dac7cb..9ec14208b 100644 --- a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 +++ b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 @@ -17,5 +17,5 @@ items: capacity: storage: "{{ volume.capacity }}" accessModes: {{ volume.access_modes | to_padded_yaml(2, 2) }} - {{ volume.storage.keys()[0] }}: {{ volume.storage[volume.storage.keys()[0]] | to_padded_yaml(3, 2) }} + {{ (volume.storage.keys() | list)[0] }}: {{ volume.storage[(volume.storage.keys() | list)[0]] | to_padded_yaml(3, 2) }} {% endfor %} diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml index 814d6ff28..b7b3c0db2 100644 --- a/roles/openshift_storage_glusterfs/defaults/main.yml +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -45,7 +45,7 @@ openshift_storage_glusterfs_heketi_fstab: "{{ '/var/lib/heketi/fstab' | quote if openshift_storage_glusterfs_namespace: "{{ 'glusterfs' | quote if openshift_storage_glusterfs_is_native or openshift_storage_glusterfs_heketi_is_native else 'default' | quote }}" openshift_storage_glusterfs_registry_timeout: "{{ openshift_storage_glusterfs_timeout }}" -openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.namespace | default(openshift_storage_glusterfs_namespace) }}" +openshift_storage_glusterfs_registry_namespace: "{{ openshift_hosted_registry_namespace | default(openshift_storage_glusterfs_namespace) }}" openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}" openshift_storage_glusterfs_registry_name: 'registry' openshift_storage_glusterfs_registry_nodeselector: "glusterfs={{ openshift_storage_glusterfs_registry_name }}-host" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml index d3cba61cf..fa50e39a2 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml @@ -56,5 +56,5 @@ register: registry_volume - name: Create GlusterFS registry volume - command: "{{ glusterfs_heketi_client }} volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}" - when: "openshift.hosted.registry.storage.glusterfs.path not in registry_volume.stdout" + command: "{{ glusterfs_heketi_client }} volume create --size={{ openshift_hosted_registry_storage_volume_size | replace('Gi','') }} --name={{ openshift_hosted_registry_storage_glusterfs_path }}" + when: "openshift_hosted_registry_storage_glusterfs_path not in registry_volume.stdout" diff --git a/roles/openshift_storage_glusterfs/tasks/main.yml b/roles/openshift_storage_glusterfs/tasks/main.yml index d2d8c6c10..728f15a21 100644 --- a/roles/openshift_storage_glusterfs/tasks/main.yml +++ b/roles/openshift_storage_glusterfs/tasks/main.yml @@ -10,8 +10,10 @@ - groups.glusterfs | default([]) | count > 0 - include: glusterfs_registry.yml - when: - - "groups.glusterfs_registry | default([]) | count > 0 or openshift.hosted.registry.storage.kind == 'glusterfs' or openshift.hosted.registry.storage.glusterfs.swap" + when: > + groups.glusterfs_registry | default([]) | count > 0 + or (openshift_hosted_registry_storage_kind | default(none) == 'glusterfs') + or (openshift_hosted_registry_storage_glusterfs_swap | default(False)) - name: Delete temp directory file: diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml index c25cad74c..55e4024ec 100644 --- a/roles/openshift_storage_nfs/tasks/main.yml +++ b/roles/openshift_storage_nfs/tasks/main.yml @@ -20,25 +20,25 @@ - name: Ensure exports directory exists file: - path: "{{ openshift.hosted.registry.storage.nfs.directory }}" + path: "{{ openshift_hosted_registry_storage_nfs_directory }}" state: directory - name: Ensure export directories exist file: - path: "{{ item.storage.nfs.directory }}/{{ item.storage.volume.name }}" + path: "{{ item }}" state: directory mode: 0777 owner: nfsnobody group: nfsnobody with_items: - - "{{ openshift.hosted.registry }}" - - "{{ openshift.metrics }}" - - "{{ openshift.logging }}" - - "{{ openshift.loggingops }}" - - "{{ openshift.hosted.etcd }}" - - "{{ openshift.prometheus }}" - - "{{ openshift.prometheus.alertmanager }}" - - "{{ openshift.prometheus.alertbuffer }}" + - "{{ openshift_hosted_registry_storage_nfs_directory }}/{{ openshift_hosted_registry_storage_volume_name }}" + - "{{ openshift_metrics_storage_nfs_directory }}/{{ openshift_metrics_storage_volume_name }}" + - "{{ openshift_logging_storage_nfs_directory }}/{{ openshift_logging_storage_volume_name }}" + - "{{ openshift_loggingops_storage_nfs_directory }}/{{ openshift_loggingops_storage_volume_name }}" + - "{{ openshift_hosted_etcd_storage_nfs_directory }}/{{ openshift_hosted_etcd_storage_volume_name }}" + - "{{ openshift_prometheus_storage_nfs_directory }}/{{ openshift_prometheus_storage_volume_name }}" + - "{{ openshift_prometheus_alertmanager_storage_nfs_directory }}/{{ openshift_prometheus_alertmanager_storage_volume_name }}" + - "{{ openshift_prometheus_alertbuffer_storage_nfs_directory }}/{{ openshift_prometheus_alertbuffer_storage_volume_name }}" - name: Configure exports template: diff --git a/roles/openshift_storage_nfs/templates/exports.j2 b/roles/openshift_storage_nfs/templates/exports.j2 index c2a741035..2ec8db019 100644 --- a/roles/openshift_storage_nfs/templates/exports.j2 +++ b/roles/openshift_storage_nfs/templates/exports.j2 @@ -1,8 +1,8 @@ -{{ openshift.hosted.registry.storage.nfs.directory }}/{{ openshift.hosted.registry.storage.volume.name }} {{ openshift.hosted.registry.storage.nfs.options }} -{{ openshift.metrics.storage.nfs.directory }}/{{ openshift.metrics.storage.volume.name }} {{ openshift.metrics.storage.nfs.options }} -{{ openshift.logging.storage.nfs.directory }}/{{ openshift.logging.storage.volume.name }} {{ openshift.logging.storage.nfs.options }} -{{ openshift.loggingops.storage.nfs.directory }}/{{ openshift.loggingops.storage.volume.name }} {{ openshift.loggingops.storage.nfs.options }} -{{ openshift.hosted.etcd.storage.nfs.directory }}/{{ openshift.hosted.etcd.storage.volume.name }} {{ openshift.hosted.etcd.storage.nfs.options }} -{{ openshift.prometheus.storage.nfs.directory }}/{{ openshift.prometheus.storage.volume.name }} {{ openshift.prometheus.storage.nfs.options }} -{{ openshift.prometheus.alertmanager.storage.nfs.directory }}/{{ openshift.prometheus.alertmanager.storage.volume.name }} {{ openshift.prometheus.alertmanager.storage.nfs.options }} -{{ openshift.prometheus.alertbuffer.storage.nfs.directory }}/{{ openshift.prometheus.alertbuffer.storage.volume.name }} {{ openshift.prometheus.alertbuffer.storage.nfs.options }} +{{ openshift_hosted_registry_storage_nfs_directory }}/{{ openshift_hosted_registry_storage_volume_name }} {{ openshift_hosted_registry_storage_nfs_options }} +{{ openshift_metrics_storage_nfs_directory }}/{{ openshift_metrics_storage_volume_name }} {{ openshift_metrics_storage_nfs_options }} +{{ openshift_logging_storage_nfs_directory }}/{{ openshift_logging_storage_volume_name }} {{ openshift_logging_storage_nfs_options }} +{{ openshift_loggingops_storage_nfs_directory }}/{{ openshift_loggingops_storage_volume_name }} {{ openshift_loggingops_storage_nfs_options }} +{{ openshift_hosted_etcd_storage_nfs_directory }}/{{ openshift_hosted_etcd_storage_volume_name }} {{ openshift_hosted_etcd_storage_nfs_options }} +{{ openshift_prometheus_storage_nfs_directory }}/{{ openshift_prometheus_storage_volume_name }} {{ openshift_prometheus_storage_nfs_options }} +{{ openshift_prometheus_alertmanager_storage_nfs_directory }}/{{ openshift_prometheus_alertmanager_storage_volume_name }} {{ openshift_prometheus_alertmanager_storage_nfs_options }} +{{ openshift_prometheus_alertbuffer_storage_nfs_directory }}/{{ openshift_prometheus_alertbuffer_storage_volume_name }} {{ openshift_prometheus_alertbuffer_storage_nfs_options }} diff --git a/roles/openshift_version/meta/main.yml b/roles/openshift_version/meta/main.yml index 38b398343..5d7683120 100644 --- a/roles/openshift_version/meta/main.yml +++ b/roles/openshift_version/meta/main.yml @@ -12,7 +12,4 @@ galaxy_info: categories: - cloud dependencies: -- role: openshift_docker_facts -- role: docker - when: openshift.common.is_containerized | default(False) | bool and not skip_docker_role | default(False) | bool - role: lib_utils diff --git a/roles/openshift_version/tasks/set_version_containerized.yml b/roles/openshift_version/tasks/set_version_containerized.yml index 574e89899..71f957b78 100644 --- a/roles/openshift_version/tasks/set_version_containerized.yml +++ b/roles/openshift_version/tasks/set_version_containerized.yml @@ -1,7 +1,4 @@ --- -- set_fact: - l_use_crio_only: "{{ openshift_use_crio_only | default(false) }}" - - name: Set containerized version to configure if openshift_image_tag specified set_fact: # Expects a leading "v" in inventory, strip it off here unless @@ -24,7 +21,7 @@ register: cli_image_version when: - openshift_version is not defined - - not l_use_crio_only + - not openshift_use_crio_only # Origin latest = pre-release version (i.e. v1.3.0-alpha.1-321-gb095e3a) - set_fact: @@ -33,7 +30,7 @@ - openshift_version is not defined - openshift.common.deployment_type == 'origin' - cli_image_version.stdout_lines[0].split('-') | length > 1 - - not l_use_crio_only + - not openshift_use_crio_only - set_fact: openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}" @@ -48,14 +45,14 @@ when: - openshift_version is defined - openshift_version.split('.') | length == 2 - - not l_use_crio_only + - not openshift_use_crio_only - set_fact: openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0:2][1:] | join('-') if openshift.common.deployment_type == 'origin' else cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}" when: - openshift_version is defined - openshift_version.split('.') | length == 2 - - not l_use_crio_only + - not openshift_use_crio_only # TODO: figure out a way to check for the openshift_version when using CRI-O. # We should do that using the images in the ostree storage so we don't have diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index 1e2af2c61..dda8eb4c6 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -125,7 +125,6 @@ def write_inventory_vars(base_inventory, lb): base_inventory.write('openshift_override_hostname_check=true\n') if lb is not None: - base_inventory.write('openshift_master_cluster_method=native\n') base_inventory.write("openshift_master_cluster_hostname={}\n".format(lb.hostname)) base_inventory.write( "openshift_master_cluster_public_hostname={}\n".format(lb.public_hostname)) @@ -266,7 +265,6 @@ def default_facts(hosts, verbose=False): facts_env = os.environ.copy() facts_env["OO_INSTALL_CALLBACK_FACTS_YAML"] = CFG.settings['ansible_callback_facts_yaml'] facts_env["ANSIBLE_CALLBACK_PLUGINS"] = CFG.settings['ansible_plugins_directory'] - facts_env["OPENSHIFT_MASTER_CLUSTER_METHOD"] = 'native' if 'ansible_log_path' in CFG.settings: facts_env["ANSIBLE_LOG_PATH"] = CFG.settings['ansible_log_path'] if 'ansible_config' in CFG.settings: |