diff options
244 files changed, 1348 insertions, 2852 deletions
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index f9564499d..45795d097 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -710,229 +710,6 @@ def oo_openshift_env(hostvars): return facts -# pylint: disable=too-many-branches, too-many-nested-blocks, too-many-statements, too-many-locals -def oo_component_persistent_volumes(hostvars, groups, component, subcomponent=None): - """ Generate list of persistent volumes based on oo_openshift_env - storage options set in host variables for a specific component. - """ - if not issubclass(type(hostvars), dict): - raise errors.AnsibleFilterError("|failed expects hostvars is a dict") - if not issubclass(type(groups), dict): - raise errors.AnsibleFilterError("|failed expects groups is a dict") - - persistent_volume = None - - if component in hostvars['openshift']: - if subcomponent is not None: - storage_component = hostvars['openshift'][component][subcomponent] - else: - storage_component = hostvars['openshift'][component] - - if 'storage' in storage_component: - params = storage_component['storage'] - kind = params['kind'] - if 'create_pv' in params: - create_pv = params['create_pv'] - if kind is not None and create_pv: - if kind == 'nfs': - host = params['host'] - if host is None: - if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0: - host = groups['oo_nfs_to_config'][0] - else: - raise errors.AnsibleFilterError("|failed no storage host detected") - directory = params['nfs']['directory'] - volume = params['volume']['name'] - path = directory + '/' + volume - size = params['volume']['size'] - if 'labels' in params: - labels = params['labels'] - else: - labels = dict() - access_modes = params['access']['modes'] - persistent_volume = dict( - name="{0}-volume".format(volume), - capacity=size, - labels=labels, - access_modes=access_modes, - storage=dict( - nfs=dict( - server=host, - path=path))) - - elif kind == 'openstack': - volume = params['volume']['name'] - size = params['volume']['size'] - if 'labels' in params: - labels = params['labels'] - else: - labels = dict() - access_modes = params['access']['modes'] - filesystem = params['openstack']['filesystem'] - volume_id = params['openstack']['volumeID'] - persistent_volume = dict( - name="{0}-volume".format(volume), - capacity=size, - labels=labels, - access_modes=access_modes, - storage=dict( - cinder=dict( - fsType=filesystem, - volumeID=volume_id))) - - elif kind == 'glusterfs': - volume = params['volume']['name'] - size = params['volume']['size'] - if 'labels' in params: - labels = params['labels'] - else: - labels = dict() - access_modes = params['access']['modes'] - endpoints = params['glusterfs']['endpoints'] - path = params['glusterfs']['path'] - read_only = params['glusterfs']['readOnly'] - persistent_volume = dict( - name="{0}-volume".format(volume), - capacity=size, - labels=labels, - access_modes=access_modes, - storage=dict( - glusterfs=dict( - endpoints=endpoints, - path=path, - readOnly=read_only))) - - elif not (kind == 'object' or kind == 'dynamic'): - msg = "|failed invalid storage kind '{0}' for component '{1}'".format( - kind, - component) - raise errors.AnsibleFilterError(msg) - return persistent_volume - - -# pylint: disable=too-many-branches, too-many-nested-blocks, too-many-statements -def oo_persistent_volumes(hostvars, groups, persistent_volumes=None): - """ Generate list of persistent volumes based on oo_openshift_env - storage options set in host variables. - """ - if not issubclass(type(hostvars), dict): - raise errors.AnsibleFilterError("|failed expects hostvars is a dict") - if not issubclass(type(groups), dict): - raise errors.AnsibleFilterError("|failed expects groups is a dict") - if persistent_volumes is not None and not issubclass(type(persistent_volumes), list): - raise errors.AnsibleFilterError("|failed expects persistent_volumes is a list") - - if persistent_volumes is None: - persistent_volumes = [] - if 'hosted' in hostvars['openshift']: - for component in hostvars['openshift']['hosted']: - persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'hosted', component) - if persistent_volume is not None: - persistent_volumes.append(persistent_volume) - - if 'logging' in hostvars['openshift']: - persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'logging') - if persistent_volume is not None: - persistent_volumes.append(persistent_volume) - if 'loggingops' in hostvars['openshift']: - persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'loggingops') - if persistent_volume is not None: - persistent_volumes.append(persistent_volume) - if 'metrics' in hostvars['openshift']: - persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'metrics') - if persistent_volume is not None: - persistent_volumes.append(persistent_volume) - if 'prometheus' in hostvars['openshift']: - persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'prometheus') - if persistent_volume is not None: - persistent_volumes.append(persistent_volume) - if 'alertmanager' in hostvars['openshift']['prometheus']: - persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'prometheus', 'alertmanager') - if persistent_volume is not None: - persistent_volumes.append(persistent_volume) - if 'alertbuffer' in hostvars['openshift']['prometheus']: - persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'prometheus', 'alertbuffer') - if persistent_volume is not None: - persistent_volumes.append(persistent_volume) - return persistent_volumes - - -def oo_component_pv_claims(hostvars, component, subcomponent=None): - """ Generate list of persistent volume claims based on oo_openshift_env - storage options set in host variables for a speicific component. - """ - if not issubclass(type(hostvars), dict): - raise errors.AnsibleFilterError("|failed expects hostvars is a dict") - - if component in hostvars['openshift']: - if subcomponent is not None: - storage_component = hostvars['openshift'][component][subcomponent] - else: - storage_component = hostvars['openshift'][component] - - if 'storage' in storage_component: - params = storage_component['storage'] - kind = params['kind'] - if 'create_pv' in params: - if 'create_pvc' in params: - create_pv = params['create_pv'] - create_pvc = params['create_pvc'] - if kind not in [None, 'object'] and create_pv and create_pvc: - volume = params['volume']['name'] - size = params['volume']['size'] - access_modes = params['access']['modes'] - persistent_volume_claim = dict( - name="{0}-claim".format(volume), - capacity=size, - access_modes=access_modes) - return persistent_volume_claim - return None - - -def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None): - """ Generate list of persistent volume claims based on oo_openshift_env - storage options set in host variables. - """ - if not issubclass(type(hostvars), dict): - raise errors.AnsibleFilterError("|failed expects hostvars is a dict") - if persistent_volume_claims is not None and not issubclass(type(persistent_volume_claims), list): - raise errors.AnsibleFilterError("|failed expects persistent_volume_claims is a list") - - if persistent_volume_claims is None: - persistent_volume_claims = [] - if 'hosted' in hostvars['openshift']: - for component in hostvars['openshift']['hosted']: - persistent_volume_claim = oo_component_pv_claims(hostvars, 'hosted', component) - if persistent_volume_claim is not None: - persistent_volume_claims.append(persistent_volume_claim) - - if 'logging' in hostvars['openshift']: - persistent_volume_claim = oo_component_pv_claims(hostvars, 'logging') - if persistent_volume_claim is not None: - persistent_volume_claims.append(persistent_volume_claim) - if 'loggingops' in hostvars['openshift']: - persistent_volume_claim = oo_component_pv_claims(hostvars, 'loggingops') - if persistent_volume_claim is not None: - persistent_volume_claims.append(persistent_volume_claim) - if 'metrics' in hostvars['openshift']: - persistent_volume_claim = oo_component_pv_claims(hostvars, 'metrics') - if persistent_volume_claim is not None: - persistent_volume_claims.append(persistent_volume_claim) - if 'prometheus' in hostvars['openshift']: - persistent_volume_claim = oo_component_pv_claims(hostvars, 'prometheus') - if persistent_volume_claim is not None: - persistent_volume_claims.append(persistent_volume_claim) - if 'alertmanager' in hostvars['openshift']['prometheus']: - persistent_volume_claim = oo_component_pv_claims(hostvars, 'prometheus', 'alertmanager') - if persistent_volume_claim is not None: - persistent_volume_claims.append(persistent_volume_claim) - if 'alertbuffer' in hostvars['openshift']['prometheus']: - persistent_volume_claim = oo_component_pv_claims(hostvars, 'prometheus', 'alertbuffer') - if persistent_volume_claim is not None: - persistent_volume_claims.append(persistent_volume_claim) - return persistent_volume_claims - - def oo_31_rpm_rename_conversion(rpms, openshift_version=None): """ Filters a list of 3.0 rpms and return the corresponding 3.1 rpms names with proper version (if provided) @@ -1220,8 +997,6 @@ class FilterModule(object): "oo_generate_secret": oo_generate_secret, "oo_nodes_with_label": oo_nodes_with_label, "oo_openshift_env": oo_openshift_env, - "oo_persistent_volumes": oo_persistent_volumes, - "oo_persistent_volume_claims": oo_persistent_volume_claims, "oo_31_rpm_rename_conversion": oo_31_rpm_rename_conversion, "oo_pods_match_component": oo_pods_match_component, "oo_get_hosts_from_hostvars": oo_get_hosts_from_hostvars, diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example index 3a9944ba4..e3b56d7a1 100644 --- a/inventory/byo/hosts.example +++ b/inventory/byo/hosts.example @@ -298,24 +298,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Set cockpit plugins #osm_cockpit_plugins=['cockpit-kubernetes'] -# Native high availability cluster method with optional load balancer. +# Native high availability (default cluster method) # If no lb group is defined, the installer assumes that a load balancer has # been preconfigured. For installation the value of # openshift_master_cluster_hostname must resolve to the load balancer # or to one or all of the masters defined in the inventory if no load # balancer is present. -#openshift_master_cluster_method=native -#openshift_master_cluster_hostname=openshift-ansible.test.example.com -#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com - -# Pacemaker high availability cluster method. -# Pacemaker HA environment must be able to self provision the -# configured VIP. For installation openshift_master_cluster_hostname -# must resolve to the configured VIP. -#openshift_master_cluster_method=pacemaker -#openshift_master_cluster_password=openshift_cluster -#openshift_master_cluster_vip=192.168.133.25 -#openshift_master_cluster_public_vip=192.168.133.25 #openshift_master_cluster_hostname=openshift-ansible.test.example.com #openshift_master_cluster_public_hostname=openshift-ansible.test.example.com diff --git a/playbooks/adhoc/openshift_hosted_logging_efk.yaml b/playbooks/adhoc/openshift_hosted_logging_efk.yaml index 44a2ef534..69b2541bb 100644 --- a/playbooks/adhoc/openshift_hosted_logging_efk.yaml +++ b/playbooks/adhoc/openshift_hosted_logging_efk.yaml @@ -8,7 +8,7 @@ hosts: masters:!masters[0] pre_tasks: - set_fact: - openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" + openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ openshift_master_default_subdomain }}" tasks: - include_role: name: openshift_logging diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 0c2a2c7e8..ed7a7bd1a 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -62,7 +62,6 @@ - origin-master - origin-master-api - origin-master-controllers - - pcsd failed_when: false - hosts: etcd @@ -384,8 +383,6 @@ - origin-excluder - origin-docker-excluder - origin-master - - pacemaker - - pcs register: result until: result | success @@ -456,8 +453,6 @@ - /etc/sysconfig/origin-master-api - /etc/sysconfig/origin-master-controllers - /usr/share/openshift/examples - - /var/lib/pacemaker - - /var/lib/pcsd - /usr/lib/systemd/system/atomic-openshift-master-api.service - /usr/lib/systemd/system/atomic-openshift-master-controllers.service - /usr/lib/systemd/system/origin-master-api.service diff --git a/playbooks/aws/openshift-cluster/hosted.yml b/playbooks/aws/openshift-cluster/hosted.yml index da7ec9d21..1dabae357 100644 --- a/playbooks/aws/openshift-cluster/hosted.yml +++ b/playbooks/aws/openshift-cluster/hosted.yml @@ -4,7 +4,7 @@ - include: ../../openshift-metrics/private/config.yml when: openshift_metrics_install_metrics | default(false) | bool -- include: ../../common/openshift-cluster/openshift_logging.yml +- include: ../../openshift-logging/private/config.yml when: openshift_logging_install_logging | default(false) | bool - include: ../../openshift-prometheus/private/config.yml diff --git a/playbooks/byo/config.yml b/playbooks/byo/config.yml index 7d03914a2..4b74e5bce 100644 --- a/playbooks/byo/config.yml +++ b/playbooks/byo/config.yml @@ -1,2 +1,3 @@ --- -- include: openshift-cluster/config.yml +# TODO (rteague): Temporarily leaving this playbook to allow CI tests to operate until CI jobs are updated. +- import_playbook: ../deploy_cluster.yml diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml deleted file mode 100644 index 57823847b..000000000 --- a/playbooks/byo/openshift-cluster/config.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- include: ../../init/main.yml - -- include: ../../common/openshift-cluster/config.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml deleted file mode 100644 index a9be8dec4..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- include: ../../../init/evaluate_groups.yml - -- include: ../../../common/openshift-cluster/upgrades/etcd/main.yml diff --git a/playbooks/byo/openshift-node/network_manager.yml b/playbooks/byo/openshift-node/network_manager.yml deleted file mode 100644 index ca09fb65c..000000000 --- a/playbooks/byo/openshift-node/network_manager.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# TODO (rteague): Temporarily leaving this playbook to allow CI tests to operate until CI jobs are updated. -- include: ../../openshift-node/network_manager.yml diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml deleted file mode 100644 index 2eeb81b86..000000000 --- a/playbooks/common/openshift-cluster/config.yml +++ /dev/null @@ -1,44 +0,0 @@ ---- -- include: ../../openshift-checks/private/install.yml - -- include: ../../openshift-etcd/private/config.yml - -- include: ../../openshift-nfs/private/config.yml - when: groups.oo_nfs_to_config | default([]) | count > 0 - -- include: ../../openshift-loadbalancer/private/config.yml - when: groups.oo_lb_to_config | default([]) | count > 0 - -- include: ../../openshift-master/private/config.yml - -- include: ../../openshift-master/private/additional_config.yml - -- include: ../../openshift-node/private/config.yml - -- include: ../../openshift-glusterfs/private/config.yml - when: groups.oo_glusterfs_to_config | default([]) | count > 0 - -- include: ../../openshift-hosted/private/config.yml - -- include: ../../openshift-metrics/private/config.yml - when: openshift_metrics_install_metrics | default(false) | bool - -- include: openshift_logging.yml - when: openshift_logging_install_logging | default(false) | bool - -- include: ../../openshift-prometheus/private/config.yml - when: openshift_hosted_prometheus_deploy | default(false) | bool - -- include: ../../openshift-service-catalog/private/config.yml - when: openshift_enable_service_catalog | default(true) | bool - -- include: ../../openshift-management/private/config.yml - when: openshift_management_install_management | default(false) | bool - -- name: Print deprecated variable warning message if necessary - hosts: oo_first_master - gather_facts: no - tasks: - - debug: msg="{{__deprecation_message}}" - when: - - __deprecation_message | default ('') | length > 0 diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml index 83f16ac0d..3b779becb 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml @@ -6,10 +6,6 @@ retries: 3 delay: 30 -- name: Update docker facts - openshift_facts: - role: docker - - name: Restart containerized services service: name={{ item }} state=started with_items: diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins b/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins deleted file mode 120000 index 27ddaa18b..000000000 --- a/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins deleted file mode 120000 index cf407f69b..000000000 --- a/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/roles b/playbooks/common/openshift-cluster/upgrades/etcd/roles deleted file mode 120000 index 6bc1a7aef..000000000 --- a/playbooks/common/openshift-cluster/upgrades/etcd/roles +++ /dev/null @@ -1 +0,0 @@ -../../../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml index 446f315d6..84b740227 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -6,7 +6,7 @@ - name: Update oreg_auth docker login credentials if necessary include_role: - name: docker + name: container_runtime tasks_from: registry_auth.yml when: oreg_auth_user is defined diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index d7a52707c..0d3fed212 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -16,8 +16,8 @@ local_facts: embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" -- name: Upgrade and backup etcd - include: ./etcd/main.yml +- name: Backup and upgrade etcd + include: ../../../openshift-etcd/private/upgrade_main.yml # Create service signer cert when missing. Service signer certificate # is added to master config in the master_config_upgrade hook. @@ -143,10 +143,6 @@ roles: - { role: openshift_cli } vars: - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" - # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe - # restart. - skip_docker_role: True __master_shared_resource_viewer_file: "shared_resource_viewer_role.yaml" tasks: - name: Reconcile Cluster Roles diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml index 6cb6a665f..5f9c56867 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -73,12 +73,6 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - - include: ../../../../openshift-master/private/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index 8f48bedcc..1aac3d014 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -77,12 +77,6 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - - include: ../../../../openshift-master/private/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml index f25cfe0d0..306b76422 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml @@ -66,12 +66,6 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - - name: Verify masters are already upgraded hosts: oo_masters_to_config tags: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml index 2b99568c7..6d4949542 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml @@ -77,12 +77,6 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - - include: ../../../../openshift-master/private/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml index d3d2046e6..0a592896b 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -81,12 +81,6 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - - include: ../../../../openshift-master/private/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml index c0546bd2d..b381d606a 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml @@ -66,12 +66,6 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - - name: Verify masters are already upgraded hosts: oo_masters_to_config tags: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml index b602cdd0e..e7d7756d1 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml @@ -77,12 +77,6 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - - include: ../../../../openshift-master/private/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml index da81e6dea..be362e3ff 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml @@ -81,12 +81,6 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - - include: ../../../../openshift-master/private/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml index abd56e762..6e68116b0 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml @@ -66,12 +66,6 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - - name: Verify masters are already upgraded hosts: oo_masters_to_config tags: diff --git a/playbooks/container-runtime/config.yml b/playbooks/container-runtime/config.yml new file mode 100644 index 000000000..f15aa771f --- /dev/null +++ b/playbooks/container-runtime/config.yml @@ -0,0 +1,6 @@ +--- +- import_playbook: ../init/main.yml + vars: + skip_verison: True + +- import_playbook: private/config.yml diff --git a/playbooks/container-runtime/private/config.yml b/playbooks/container-runtime/private/config.yml new file mode 100644 index 000000000..67445edeb --- /dev/null +++ b/playbooks/container-runtime/private/config.yml @@ -0,0 +1,28 @@ +--- +- hosts: "{{ l_containerized_host_groups }}" + vars: + l_chg_temp: "{{ openshift_containerized_host_groups | default([]) }}" + l_containerized_host_groups: "{{ (['oo_nodes_to_config'] | union(l_chg_temp)) | join(':') }}" + # role: container_runtime is necessary here to bring role default variables + # into the play scope. + roles: + - role: container_runtime + tasks: + - include_role: + name: container_runtime + tasks_from: package_docker.yml + when: + - not openshift_docker_use_system_container | bool + - not openshift_use_crio_only | bool + - include_role: + name: container_runtime + tasks_from: systemcontainer_docker.yml + when: + - openshift_docker_use_system_container | bool + - not openshift_use_crio_only | bool + - include_role: + name: container_runtime + tasks_from: systemcontainer_crio.yml + when: + - openshift_use_crio | bool + - openshift_docker_is_node_or_master | bool diff --git a/playbooks/container-runtime/private/roles b/playbooks/container-runtime/private/roles new file mode 120000 index 000000000..148b13206 --- /dev/null +++ b/playbooks/container-runtime/private/roles @@ -0,0 +1 @@ +../../roles/
\ No newline at end of file diff --git a/playbooks/deploy_cluster.yml b/playbooks/deploy_cluster.yml new file mode 100644 index 000000000..0e6bde09a --- /dev/null +++ b/playbooks/deploy_cluster.yml @@ -0,0 +1,46 @@ +--- +- import_playbook: init/main.yml + +- import_playbook: openshift-checks/private/install.yml + +- import_playbook: openshift-etcd/private/config.yml + +- import_playbook: openshift-nfs/private/config.yml + when: groups.oo_nfs_to_config | default([]) | count > 0 + +- import_playbook: openshift-loadbalancer/private/config.yml + when: groups.oo_lb_to_config | default([]) | count > 0 + +- import_playbook: openshift-master/private/config.yml + +- import_playbook: openshift-master/private/additional_config.yml + +- import_playbook: openshift-node/private/config.yml + +- import_playbook: openshift-glusterfs/private/config.yml + when: groups.oo_glusterfs_to_config | default([]) | count > 0 + +- import_playbook: openshift-hosted/private/config.yml + +- import_playbook: openshift-metrics/private/config.yml + when: openshift_metrics_install_metrics | default(false) | bool + +- import_playbook: openshift-logging/private/config.yml + when: openshift_logging_install_logging | default(false) | bool + +- import_playbook: openshift-prometheus/private/config.yml + when: openshift_hosted_prometheus_deploy | default(false) | bool + +- import_playbook: openshift-service-catalog/private/config.yml + when: openshift_enable_service_catalog | default(true) | bool + +- import_playbook: openshift-management/private/config.yml + when: openshift_management_install_management | default(false) | bool + +- name: Print deprecated variable warning message if necessary + hosts: oo_first_master + gather_facts: no + tasks: + - debug: msg="{{__deprecation_message}}" + when: + - __deprecation_message | default ('') | length > 0 diff --git a/playbooks/gcp/openshift-cluster/provision.yml b/playbooks/gcp/provision.yml index 097717607..9887f09f2 100644 --- a/playbooks/gcp/openshift-cluster/provision.yml +++ b/playbooks/gcp/provision.yml @@ -9,8 +9,5 @@ include_role: name: openshift_gcp -- name: run the init - include: ../../init/main.yml - -- name: run the config - include: ../../common/openshift-cluster/config.yml +- name: run the cluster deploy + include: ../deploy_cluster.yml diff --git a/playbooks/init/facts.yml b/playbooks/init/facts.yml index 1166ac538..d41f365dc 100644 --- a/playbooks/init/facts.yml +++ b/playbooks/init/facts.yml @@ -135,11 +135,13 @@ - openshift_http_proxy is defined or openshift_https_proxy is defined - openshift_generate_no_proxy_hosts | default(True) | bool + - name: Initialize openshift.node.sdn_mtu + openshift_facts: + role: node + local_facts: + sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" + - name: initialize_facts set_fact repoquery command set_fact: repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}" repoquery_installed: "{{ 'dnf repoquery --latest-limit 1 -d 0 --disableexcludes=all --installed' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins --installed' }}" - - - name: initialize_facts set_fact on openshift_docker_hosted_registry_network - set_fact: - openshift_docker_hosted_registry_network: "{{ '' if 'oo_first_master' not in groups else hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" diff --git a/playbooks/init/main.yml b/playbooks/init/main.yml index 1d4f41ffc..b2b972a7d 100644 --- a/playbooks/init/main.yml +++ b/playbooks/init/main.yml @@ -18,12 +18,15 @@ - import_playbook: facts.yml - import_playbook: sanity_checks.yml + when: not (skip_sanity_checks | default(False)) - import_playbook: validate_hostnames.yml + when: not (skip_validate_hostnames | default(False)) - import_playbook: repos.yml - import_playbook: version.yml + when: not (skip_verison | default(False)) - name: Initialization Checkpoint End hosts: all diff --git a/playbooks/openshift-etcd/private/ca.yml b/playbooks/openshift-etcd/private/ca.yml index c9f186e72..f3bb3c2d1 100644 --- a/playbooks/openshift-etcd/private/ca.yml +++ b/playbooks/openshift-etcd/private/ca.yml @@ -7,7 +7,7 @@ tasks: - include_role: name: etcd - tasks_from: ca + tasks_from: ca.yml vars: etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" diff --git a/playbooks/openshift-etcd/private/certificates-backup.yml b/playbooks/openshift-etcd/private/certificates-backup.yml index d738c8207..ce21a1f96 100644 --- a/playbooks/openshift-etcd/private/certificates-backup.yml +++ b/playbooks/openshift-etcd/private/certificates-backup.yml @@ -5,10 +5,10 @@ tasks: - include_role: name: etcd - tasks_from: backup_generated_certificates + tasks_from: backup_generated_certificates.yml - include_role: name: etcd - tasks_from: remove_generated_certificates + tasks_from: remove_generated_certificates.yml - name: Backup deployed etcd certificates hosts: oo_etcd_to_config @@ -16,4 +16,4 @@ tasks: - include_role: name: etcd - tasks_from: backup_server_certificates + tasks_from: backup_server_certificates.yml diff --git a/playbooks/openshift-etcd/private/embedded2external.yml b/playbooks/openshift-etcd/private/embedded2external.yml index 514319b88..c7a532622 100644 --- a/playbooks/openshift-etcd/private/embedded2external.yml +++ b/playbooks/openshift-etcd/private/embedded2external.yml @@ -20,7 +20,7 @@ - name: Check the master API is ready include_role: name: openshift_master - tasks_from: check_master_api_is_ready + tasks_from: check_master_api_is_ready.yml - set_fact: master_service: "{{ openshift.common.service_type + '-master' }}" embedded_etcd_backup_suffix: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" @@ -34,7 +34,7 @@ # Can't use with_items with include_role: https://github.com/ansible/ansible/issues/21285 - include_role: name: etcd - tasks_from: backup + tasks_from: backup.yml vars: r_etcd_common_backup_tag: pre-migrate r_etcd_common_embedded_etcd: "{{ true }}" @@ -42,7 +42,7 @@ - include_role: name: etcd - tasks_from: backup.archive + tasks_from: backup.archive.yml vars: r_etcd_common_backup_tag: pre-migrate r_etcd_common_embedded_etcd: "{{ true }}" @@ -58,7 +58,7 @@ tasks: - include_role: name: etcd - tasks_from: backup_master_etcd_certificates + tasks_from: backup_master_etcd_certificates.yml - name: Redeploy master etcd certificates import_playbook: master_etcd_certificates.yml @@ -75,10 +75,10 @@ pre_tasks: - include_role: name: etcd - tasks_from: disable_etcd + tasks_from: disable_etcd.yml - include_role: name: etcd - tasks_from: clean_data + tasks_from: clean_data.yml # 6. copy the embedded etcd backup to the external host # TODO(jchaloup): if the etcd and first master are on the same host, just copy the directory @@ -93,7 +93,7 @@ - include_role: name: etcd - tasks_from: backup.fetch + tasks_from: backup.fetch.yml vars: etcd_backup_sync_directory: "{{ g_etcd_client_mktemp.stdout }}" r_etcd_common_backup_tag: pre-migrate @@ -103,7 +103,7 @@ - include_role: name: etcd - tasks_from: backup.copy + tasks_from: backup.copy.yml vars: etcd_backup_sync_directory: "{{ g_etcd_client_mktemp.stdout }}" r_etcd_common_backup_tag: pre-migrate @@ -124,14 +124,14 @@ tasks: - include_role: name: etcd - tasks_from: backup.unarchive + tasks_from: backup.unarchive.yml vars: r_etcd_common_backup_tag: pre-migrate r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}" - include_role: name: etcd - tasks_from: backup.force_new_cluster + tasks_from: backup.force_new_cluster.yml vars: r_etcd_common_backup_tag: pre-migrate r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}" @@ -145,7 +145,7 @@ tasks: - include_role: name: openshift_master - tasks_from: configure_external_etcd + tasks_from: configure_external_etcd.yml vars: etcd_peer_url_scheme: "https" etcd_ip: "{{ hostvars[groups.oo_etcd_to_config.0].openshift.common.ip }}" diff --git a/playbooks/openshift-etcd/private/migrate.yml b/playbooks/openshift-etcd/private/migrate.yml index 4269918c2..834bd242d 100644 --- a/playbooks/openshift-etcd/private/migrate.yml +++ b/playbooks/openshift-etcd/private/migrate.yml @@ -17,7 +17,7 @@ tasks: - include_role: name: etcd - tasks_from: migrate.pre_check + tasks_from: migrate.pre_check.yml vars: r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" etcd_peer: "{{ ansible_default_ipv4.address }}" @@ -46,7 +46,7 @@ post_tasks: - include_role: name: etcd - tasks_from: backup + tasks_from: backup.yml vars: r_etcd_common_backup_tag: pre-migration r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" @@ -74,7 +74,7 @@ pre_tasks: - include_role: name: etcd - tasks_from: disable_etcd + tasks_from: disable_etcd.yml - name: Migrate data on first etcd hosts: oo_etcd_to_migrate[0] @@ -82,7 +82,7 @@ tasks: - include_role: name: etcd - tasks_from: migrate + tasks_from: migrate.yml vars: r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" etcd_peer: "{{ openshift.common.ip }}" @@ -95,7 +95,7 @@ tasks: - include_role: name: etcd - tasks_from: clean_data + tasks_from: clean_data.yml vars: r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" etcd_peer: "{{ openshift.common.ip }}" @@ -132,7 +132,7 @@ tasks: - include_role: name: etcd - tasks_from: migrate.add_ttls + tasks_from: migrate.add_ttls.yml vars: etcd_peer: "{{ hostvars[groups.oo_etcd_to_migrate.0].openshift.common.ip }}" etcd_url_scheme: "https" @@ -144,7 +144,7 @@ tasks: - include_role: name: etcd - tasks_from: migrate.configure_master + tasks_from: migrate.configure_master.yml when: etcd_migration_failed | length == 0 - debug: msg: "Skipping master re-configuration since migration failed." diff --git a/playbooks/openshift-etcd/private/redeploy-ca.yml b/playbooks/openshift-etcd/private/redeploy-ca.yml index cc5d57031..158bcb849 100644 --- a/playbooks/openshift-etcd/private/redeploy-ca.yml +++ b/playbooks/openshift-etcd/private/redeploy-ca.yml @@ -16,12 +16,12 @@ tasks: - include_role: name: etcd - tasks_from: backup_ca_certificates + tasks_from: backup_ca_certificates.yml - include_role: name: etcd - tasks_from: remove_ca_certificates + tasks_from: remove_ca_certificates.yml -- include: ca.yml +- import_playbook: ca.yml - name: Create temp directory for syncing certs hosts: localhost @@ -44,7 +44,7 @@ etcd_sync_cert_dir: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}" etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" -- include: restart.yml +- import_playbook: restart.yml # Do not restart etcd when etcd certificates were previously expired. when: ('expired' not in (hostvars | oo_select_keys(groups['etcd']) @@ -56,7 +56,7 @@ tasks: - include_role: name: etcd - tasks_from: retrieve_ca_certificates + tasks_from: retrieve_ca_certificates.yml vars: etcd_sync_cert_dir: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}" @@ -82,7 +82,7 @@ state: absent changed_when: false -- include: ../../openshift-master/private/restart.yml +- import_playbook: ../../openshift-master/private/restart.yml # Do not restart masters when master or etcd certificates were previously expired. when: # masters diff --git a/playbooks/openshift-etcd/private/redeploy-certificates.yml b/playbooks/openshift-etcd/private/redeploy-certificates.yml index cc1e6adf5..1c8eb27ac 100644 --- a/playbooks/openshift-etcd/private/redeploy-certificates.yml +++ b/playbooks/openshift-etcd/private/redeploy-certificates.yml @@ -11,8 +11,8 @@ # certificates were previously expired. - role: openshift_certificate_expiry -- include: certificates-backup.yml +- import_playbook: certificates-backup.yml -- include: certificates.yml +- import_playbook: certificates.yml vars: etcd_certificates_redeploy: true diff --git a/playbooks/openshift-etcd/private/scaleup.yml b/playbooks/openshift-etcd/private/scaleup.yml index fac8e3f02..3ef043ec8 100644 --- a/playbooks/openshift-etcd/private/scaleup.yml +++ b/playbooks/openshift-etcd/private/scaleup.yml @@ -32,7 +32,7 @@ until: etcd_add_check.rc == 0 - include_role: name: etcd - tasks_from: server_certificates + tasks_from: server_certificates.yml vars: etcd_peers: "{{ groups.oo_new_etcd_to_config | default([], true) }}" etcd_certificates_etcd_hosts: "{{ groups.oo_new_etcd_to_config | default([], true) }}" @@ -78,4 +78,4 @@ post_tasks: - include_role: name: openshift_master - tasks_from: update_etcd_client_urls + tasks_from: update_etcd_client_urls.yml diff --git a/playbooks/openshift-etcd/private/server_certificates.yml b/playbooks/openshift-etcd/private/server_certificates.yml index 14c74baf3..695b53990 100644 --- a/playbooks/openshift-etcd/private/server_certificates.yml +++ b/playbooks/openshift-etcd/private/server_certificates.yml @@ -7,7 +7,7 @@ post_tasks: - include_role: name: etcd - tasks_from: server_certificates + tasks_from: server_certificates.yml vars: etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/openshift-etcd/private/upgrade_backup.yml index 531175c85..22ed7e610 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml +++ b/playbooks/openshift-etcd/private/upgrade_backup.yml @@ -6,7 +6,7 @@ post_tasks: - include_role: name: etcd - tasks_from: backup + tasks_from: backup.yml vars: r_etcd_common_backup_tag: "{{ etcd_backup_tag }}" r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml b/playbooks/openshift-etcd/private/upgrade_image_members.yml index 6fca42bd0..c133c0201 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml +++ b/playbooks/openshift-etcd/private/upgrade_image_members.yml @@ -8,7 +8,7 @@ tasks: - include_role: name: etcd - tasks_from: upgrade_image + tasks_from: upgrade_image.yml vars: r_etcd_upgrade_version: "{{ etcd_upgrade_version }}" etcd_peer: "{{ openshift.common.hostname }}" diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/openshift-etcd/private/upgrade_main.yml index 5b8ba3bb2..e373a4a4c 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml +++ b/playbooks/openshift-etcd/private/upgrade_main.yml @@ -6,7 +6,7 @@ # available in the repos. So for Fedora we'll simply skip this, sorry. - name: Backup etcd before upgrading anything - include: backup.yml + import_playbook: upgrade_backup.yml vars: etcd_backup_tag: "pre-upgrade-" when: openshift_etcd_backup | default(true) | bool @@ -16,14 +16,14 @@ tasks: - include_role: name: etcd - tasks_from: drop_etcdctl + tasks_from: drop_etcdctl.yml - name: Perform etcd upgrade - include: ./upgrade.yml + import_playbook: upgrade_step.yml when: openshift_etcd_upgrade | default(true) | bool - name: Backup etcd - include: backup.yml + import_playbook: upgrade_backup.yml vars: etcd_backup_tag: "post-3.0-" when: openshift_etcd_backup | default(true) | bool diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml b/playbooks/openshift-etcd/private/upgrade_rpm_members.yml index 51e8786b3..902c39d9c 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml +++ b/playbooks/openshift-etcd/private/upgrade_rpm_members.yml @@ -8,7 +8,7 @@ tasks: - include_role: name: etcd - tasks_from: upgrade_rpm + tasks_from: upgrade_rpm.yml vars: r_etcd_upgrade_version: "{{ etcd_upgrade_version }}" etcd_peer: "{{ openshift.common.hostname }}" diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/openshift-etcd/private/upgrade_step.yml index c5ff4133c..60127fc68 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml +++ b/playbooks/openshift-etcd/private/upgrade_step.yml @@ -6,47 +6,47 @@ name: etcd tasks_from: version_detect.yml -- include: upgrade_rpm_members.yml +- import_playbook: upgrade_rpm_members.yml vars: etcd_upgrade_version: '2.1' -- include: upgrade_rpm_members.yml +- import_playbook: upgrade_rpm_members.yml vars: etcd_upgrade_version: '2.2' -- include: upgrade_image_members.yml +- import_playbook: upgrade_image_members.yml vars: etcd_upgrade_version: '2.2.5' -- include: upgrade_rpm_members.yml +- import_playbook: upgrade_rpm_members.yml vars: etcd_upgrade_version: '2.3' -- include: upgrade_image_members.yml +- import_playbook: upgrade_image_members.yml vars: etcd_upgrade_version: '2.3.7' -- include: upgrade_rpm_members.yml +- import_playbook: upgrade_rpm_members.yml vars: etcd_upgrade_version: '3.0' -- include: upgrade_image_members.yml +- import_playbook: upgrade_image_members.yml vars: etcd_upgrade_version: '3.0.15' -- include: upgrade_rpm_members.yml +- import_playbook: upgrade_rpm_members.yml vars: etcd_upgrade_version: '3.1' -- include: upgrade_image_members.yml +- import_playbook: upgrade_image_members.yml vars: etcd_upgrade_version: '3.1.3' -- include: upgrade_rpm_members.yml +- import_playbook: upgrade_rpm_members.yml vars: etcd_upgrade_version: '3.2' -- include: upgrade_image_members.yml +- import_playbook: upgrade_image_members.yml vars: etcd_upgrade_version: '3.2.7' @@ -56,7 +56,7 @@ tasks: - include_role: name: etcd - tasks_from: upgrade_image + tasks_from: upgrade_image.yml vars: etcd_peer: "{{ openshift.common.hostname }}" when: diff --git a/playbooks/openshift-etcd/redeploy-ca.yml b/playbooks/openshift-etcd/redeploy-ca.yml index b1d23675d..769d694ba 100644 --- a/playbooks/openshift-etcd/redeploy-ca.yml +++ b/playbooks/openshift-etcd/redeploy-ca.yml @@ -1,4 +1,4 @@ --- -- include: ../init/main.yml +- import_playbook: ../init/main.yml -- include: private/redeploy-ca.yml +- import_playbook: private/redeploy-ca.yml diff --git a/playbooks/openshift-etcd/redeploy-certificates.yml b/playbooks/openshift-etcd/redeploy-certificates.yml index 1bd302c03..753878d70 100644 --- a/playbooks/openshift-etcd/redeploy-certificates.yml +++ b/playbooks/openshift-etcd/redeploy-certificates.yml @@ -1,10 +1,10 @@ --- -- include: ../init/main.yml +- import_playbook: ../init/main.yml -- include: private/redeploy-certificates.yml +- import_playbook: private/redeploy-certificates.yml -- include: private/restart.yml +- import_playbook: private/restart.yml vars: g_etcd_certificates_expired: "{{ ('expired' in (hostvars | oo_select_keys(groups['etcd']) | oo_collect('check_results.check_results.etcd') | oo_collect('health'))) | bool }}" -- include: ../openshift-master/private/restart.yml +- import_playbook: ../openshift-master/private/restart.yml diff --git a/playbooks/openshift-etcd/upgrade.yml b/playbooks/openshift-etcd/upgrade.yml new file mode 100644 index 000000000..ccc797527 --- /dev/null +++ b/playbooks/openshift-etcd/upgrade.yml @@ -0,0 +1,4 @@ +--- +- import_playbook: ../init/evaluate_groups.yml + +- import_playbook: private/upgrade_main.yml diff --git a/playbooks/openshift-glusterfs/private/registry.yml b/playbooks/openshift-glusterfs/private/registry.yml index 75c1f0300..917b729f9 100644 --- a/playbooks/openshift-glusterfs/private/registry.yml +++ b/playbooks/openshift-glusterfs/private/registry.yml @@ -1,40 +1,11 @@ --- - import_playbook: config.yml -- name: Initialize GlusterFS registry PV and PVC vars - hosts: oo_first_master - tags: hosted - tasks: - - set_fact: - glusterfs_pv: [] - glusterfs_pvc: [] - - - set_fact: - glusterfs_pv: - - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-volume" - capacity: "{{ openshift.hosted.registry.storage.volume.size }}" - access_modes: "{{ openshift.hosted.registry.storage.access.modes }}" - storage: - glusterfs: - endpoints: "{{ openshift.hosted.registry.storage.glusterfs.endpoints }}" - path: "{{ openshift.hosted.registry.storage.glusterfs.path }}" - readOnly: "{{ openshift.hosted.registry.storage.glusterfs.readOnly }}" - glusterfs_pvc: - - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim" - capacity: "{{ openshift.hosted.registry.storage.volume.size }}" - access_modes: "{{ openshift.hosted.registry.storage.access.modes }}" - when: openshift.hosted.registry.storage.glusterfs.swap - - name: Create persistent volumes hosts: oo_first_master - tags: - - hosted - vars: - persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups, glusterfs_pv) }}" - persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims(glusterfs_pvc) }}" roles: - role: openshift_persistent_volumes - when: persistent_volumes | union(glusterfs_pv) | length > 0 or persistent_volume_claims | union(glusterfs_pvc) | length > 0 + when: openshift_hosted_registry_storage_glusterfs_swap | default(False) - name: Create Hosted Resources hosts: oo_first_master diff --git a/playbooks/openshift-hosted/private/cockpit-ui.yml b/playbooks/openshift-hosted/private/cockpit-ui.yml index 359132dd0..d6529425b 100644 --- a/playbooks/openshift-hosted/private/cockpit-ui.yml +++ b/playbooks/openshift-hosted/private/cockpit-ui.yml @@ -5,4 +5,4 @@ - role: cockpit-ui when: - openshift_hosted_manage_registry | default(true) | bool - - not openshift.docker.hosted_registry_insecure | default(false) | bool + - not (openshift_docker_hosted_registry_insecure | default(false)) | bool diff --git a/playbooks/openshift-hosted/private/create_persistent_volumes.yml b/playbooks/openshift-hosted/private/create_persistent_volumes.yml index 8a60a30b8..41ae2eb69 100644 --- a/playbooks/openshift-hosted/private/create_persistent_volumes.yml +++ b/playbooks/openshift-hosted/private/create_persistent_volumes.yml @@ -1,9 +1,5 @@ --- - name: Create Hosted Resources - persistent volumes hosts: oo_first_master - vars: - persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}" - persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}" roles: - role: openshift_persistent_volumes - when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0 diff --git a/playbooks/openshift-loadbalancer/private/config.yml b/playbooks/openshift-loadbalancer/private/config.yml index d737b836b..78fe663db 100644 --- a/playbooks/openshift-loadbalancer/private/config.yml +++ b/playbooks/openshift-loadbalancer/private/config.yml @@ -11,14 +11,12 @@ status: "In Progress" start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" -- name: Configure firewall and docker for load balancers +- name: Configure firewall load balancers hosts: oo_lb_to_config:!oo_masters_to_config:!oo_nodes_to_config vars: openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}" roles: - role: os_firewall - - role: openshift_docker - when: openshift.common.is_containerized | default(False) | bool and not skip_docker_role | default(False) | bool - name: Configure load balancers hosts: oo_lb_to_config diff --git a/playbooks/byo/openshift-cluster/openshift-logging.yml b/playbooks/openshift-logging/config.yml index 74e186f33..8837a2d32 100644 --- a/playbooks/byo/openshift-cluster/openshift-logging.yml +++ b/playbooks/openshift-logging/config.yml @@ -4,6 +4,6 @@ # Hosted logging on. See inventory/byo/hosts.*.example for the # currently supported method. # -- include: ../../init/main.yml +- include: ../init/main.yml -- include: ../../common/openshift-cluster/openshift_logging.yml +- include: private/config.yml diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/openshift-logging/private/config.yml index bc59bd95a..bc59bd95a 100644 --- a/playbooks/common/openshift-cluster/openshift_logging.yml +++ b/playbooks/openshift-logging/private/config.yml diff --git a/playbooks/openshift-logging/private/filter_plugins b/playbooks/openshift-logging/private/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/openshift-logging/private/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/openshift-logging/private/library b/playbooks/openshift-logging/private/library new file mode 120000 index 000000000..ba40d2f56 --- /dev/null +++ b/playbooks/openshift-logging/private/library @@ -0,0 +1 @@ +../../../library
\ No newline at end of file diff --git a/playbooks/openshift-logging/private/lookup_plugins b/playbooks/openshift-logging/private/lookup_plugins new file mode 120000 index 000000000..ac79701db --- /dev/null +++ b/playbooks/openshift-logging/private/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/openshift-logging/private/roles b/playbooks/openshift-logging/private/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/openshift-logging/private/roles @@ -0,0 +1 @@ +../../../roles
\ No newline at end of file diff --git a/playbooks/openshift-master/private/additional_config.yml b/playbooks/openshift-master/private/additional_config.yml index b7cfbe4e4..a90cd6b22 100644 --- a/playbooks/openshift-master/private/additional_config.yml +++ b/playbooks/openshift-master/private/additional_config.yml @@ -19,8 +19,6 @@ openshift_master_ha: "{{ groups.oo_masters | length > 1 }}" omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}" roles: - - role: openshift_master_cluster - when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker" - role: openshift_project_request_template when: openshift_project_request_template_manage - role: openshift_examples diff --git a/playbooks/openshift-master/private/tasks/wire_aggregator.yml b/playbooks/openshift-master/private/tasks/wire_aggregator.yml index 97acc5d5d..ecf8f15d9 100644 --- a/playbooks/openshift-master/private/tasks/wire_aggregator.yml +++ b/playbooks/openshift-master/private/tasks/wire_aggregator.yml @@ -183,7 +183,6 @@ systemd: name={{ openshift.common.service_type }}-master-api state=restarted when: - yedit_output.changed - - openshift.master.cluster_method == 'native' # We retry the controllers because the API may not be 100% initialized yet. - name: restart master controllers @@ -194,7 +193,6 @@ until: result.rc == 0 when: - yedit_output.changed - - openshift.master.cluster_method == 'native' - name: Verify API Server # Using curl here since the uri module requires python-httplib2 and diff --git a/playbooks/openshift-master/private/validate_restart.yml b/playbooks/openshift-master/private/validate_restart.yml index 5dbb21502..1077d0b9c 100644 --- a/playbooks/openshift-master/private/validate_restart.yml +++ b/playbooks/openshift-master/private/validate_restart.yml @@ -14,9 +14,6 @@ - role: common local_facts: rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}" - - role: master - local_facts: - cluster_method: "{{ openshift_master_cluster_method | default(None) }}" # Creating a temp file on localhost, we then check each system that will # be rebooted to see if that file exists, if so we know we're running diff --git a/playbooks/openshift-node/private/configure_nodes.yml b/playbooks/openshift-node/private/configure_nodes.yml index 06f3df9fa..dc5d7a57e 100644 --- a/playbooks/openshift-node/private/configure_nodes.yml +++ b/playbooks/openshift-node/private/configure_nodes.yml @@ -4,7 +4,6 @@ vars: openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] | union(groups['oo_masters_to_config']) | union(groups['oo_etcd_to_config'] | default([]))) diff --git a/playbooks/openshift-node/private/containerized_nodes.yml b/playbooks/openshift-node/private/containerized_nodes.yml index 3c3ac3646..5afa83be7 100644 --- a/playbooks/openshift-node/private/containerized_nodes.yml +++ b/playbooks/openshift-node/private/containerized_nodes.yml @@ -5,7 +5,6 @@ vars: openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] | union(groups['oo_masters_to_config']) | union(groups['oo_etcd_to_config'] | default([]))) diff --git a/playbooks/openshift-node/private/image_prep.yml b/playbooks/openshift-node/private/image_prep.yml index b7ac27bda..6b517197d 100644 --- a/playbooks/openshift-node/private/image_prep.yml +++ b/playbooks/openshift-node/private/image_prep.yml @@ -1,12 +1,10 @@ --- - name: normalize groups - import_playbook: ../../init/evaluate_groups.yml - -- name: initialize the facts - import_playbook: ../../init/facts.yml - -- name: initialize the repositories - import_playbook: ../../init/repos.yml + import_playbook: ../../prerequisites.yml + vars: + skip_version: True + skip_sanity_checks: True + skip_validate_hostnames: True - name: run node config setup import_playbook: setup.yml diff --git a/playbooks/openshift-node/private/network_manager.yml b/playbooks/openshift-node/private/network_manager.yml index 7211787be..39640345f 100644 --- a/playbooks/openshift-node/private/network_manager.yml +++ b/playbooks/openshift-node/private/network_manager.yml @@ -1,6 +1,4 @@ --- -- import_playbook: ../../init/evaluate_groups.yml - - name: Install and configure NetworkManager hosts: oo_all_hosts become: yes diff --git a/playbooks/openshift-node/private/restart.yml b/playbooks/openshift-node/private/restart.yml index c3beb59b7..41eb00f99 100644 --- a/playbooks/openshift-node/private/restart.yml +++ b/playbooks/openshift-node/private/restart.yml @@ -16,10 +16,6 @@ retries: 3 delay: 30 - - name: Update docker facts - openshift_facts: - role: docker - - name: Restart containerized services service: name: "{{ item }}" diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md index c762169eb..f567242cd 100644 --- a/playbooks/openstack/README.md +++ b/playbooks/openstack/README.md @@ -24,7 +24,7 @@ The OpenStack release must be Newton (for Red Hat OpenStack this is version 10) or newer. It must also satisfy these requirements: * Heat (Orchestration) must be available -* The deployment image (CentOS 7 or RHEL 7) must be loaded +* The deployment image (CentOS 7.4 or RHEL 7) must be loaded * The deployment flavor must be available to your user - `m1.medium` / 4GB RAM + 40GB disk should be enough for testing - look at @@ -183,9 +183,14 @@ Then run the provision + install playbook -- this will create the OpenStack resources: ```bash -$ ansible-playbook --user openshift -i inventory openshift-ansible/playbooks/openstack/openshift-cluster/provision_install.yaml +$ ansible-playbook --user openshift -i inventory \ + openshift-ansible/playbooks/openstack/openshift-cluster/provision_install.yaml \ + -e openshift_repos_enable_testing=true ``` +Note, you may want to use the testing repo for development purposes only. +Normally, `openshift_repos_enable_testing` should not be specified. + If you're using multiple inventories, make sure you pass the path to the right one to `-i`. @@ -210,7 +215,6 @@ advanced configuration: * [External Dns][external-dns] * Multiple Clusters (TODO) * [Cinder Registry][cinder-registry] -* [Bastion Node][bastion] [ansible]: https://www.ansible.com/ @@ -229,4 +233,3 @@ advanced configuration: [loadbalancer]: ./advanced-configuration.md#multi-master-configuration [external-dns]: ./advanced-configuration.md#dns-configuration-variables [cinder-registry]: ./advanced-configuration.md#creating-and-using-a-cinder-volume-for-the-openshift-registry -[bastion]: ./advanced-configuration.md#configure-static-inventory-and-access-via-a-bastion-node diff --git a/playbooks/openstack/advanced-configuration.md b/playbooks/openstack/advanced-configuration.md index c0bdf5020..db2a13d38 100644 --- a/playbooks/openstack/advanced-configuration.md +++ b/playbooks/openstack/advanced-configuration.md @@ -23,35 +23,14 @@ There are no additional dependencies for the cluster nodes. Required configuration steps are done by Heat given a specific user data config that normally should not be changed. -## Required galaxy modules - -In order to pull in external dependencies for DNS configuration steps, -the following commads need to be executed: - - ansible-galaxy install \ - -r openshift-ansible-contrib/playbooks/provisioning/openstack/galaxy-requirements.yaml \ - -p openshift-ansible-contrib/roles - -Alternatively you can install directly from github: - - ansible-galaxy install git+https://github.com/redhat-cop/infra-ansible,master \ - -p openshift-ansible-contrib/roles - -Notes: -* This assumes we're in the directory that contains the clonned -openshift-ansible-contrib repo in its root path. -* When trying to install a different version, the previous one must be removed first -(`infra-ansible` directory from [roles](https://github.com/openshift/openshift-ansible-contrib/tree/master/roles)). -Otherwise, even if there are differences between the two versions, installation of the newer version is skipped. - - ## Accessing the OpenShift Cluster ### Configure DNS -OpenShift requires two DNS records to function fully. The first one points to +OpenShift requires a two public DNS records to function fully. The first one points to the master/load balancer and provides the UI/API access. The other one is a -wildcard domain that resolves app route requests to the infra node. +wildcard domain that resolves app route requests to the infra node. A private DNS +server and records are not required and not managed here. If you followed the default installation from the README section, there is no DNS configured. You should add two entries to the `/etc/hosts` file on the @@ -187,8 +166,8 @@ That sudomain can be set as well by the `openshift_openstack_app_subdomain` vari the inventory. The `openstack_<role name>_hostname` is a set of variables used for customising -hostnames of servers with a given role. When such a variable stays commented, -default hostname (usually the role name) is used. +public names of Nova servers provisioned with a given role. When such a variable stays commented, +default value (usually the role name) is used. The `openshift_openstack_dns_nameservers` is a list of DNS servers accessible from all the created Nova servers. These will provide the internal name resolution for @@ -203,7 +182,7 @@ When Network Manager is enabled for provisioned cluster nodes, which is normally the case, you should not change the defaults and always deploy dnsmasq. `openshift_openstack_external_nsupdate_keys` describes an external authoritative DNS server(s) -processing dynamic records updates in the public and private cluster views: +processing dynamic records updates in the public only cluster view: openshift_openstack_external_nsupdate_keys: public: @@ -211,10 +190,6 @@ processing dynamic records updates in the public and private cluster views: key_algorithm: 'hmac-md5' key_name: 'update-key' server: <public DNS server IP> - private: - key_secret: <some nsupdate key 2> - key_algorithm: 'hmac-sha256' - server: <public or private DNS server IP> Here, for the public view section, we specified another key algorithm and optional `key_name`, which normally defaults to the cluster's DNS domain. @@ -222,24 +197,6 @@ This just illustrates a compatibility mode with a DNS service deployed by OpenShift on OSP10 reference architecture, and used in a mixed mode with another external DNS server. -Another example defines an external DNS server for the public view -additionally to the in-stack DNS server used for the private view only: - - openshift_openstack_external_nsupdate_keys: - public: - key_secret: <some nsupdate key> - key_algorithm: 'hmac-sha256' - server: <public DNS server IP> - -Here, updates matching the public view will be hitting the given public -server IP. While updates matching the private view will be sent to the -auto evaluated in-stack DNS server's **public** IP. - -Note, for the in-stack DNS server, private view updates may be sent only -via the public IP of the server. You can not send updates via the private -IP yet. This forces the in-stack private server to have a floating IP. -See also the [security notes](#security-notes) - ## Flannel networking In order to configure the @@ -328,14 +285,6 @@ The `openshift_openstack_required_packages` variable also provides a list of the prerequisite packages to be installed before to deploy an OpenShift cluster. Those are ignored though, if the `manage_packages: False`. -The `openstack_inventory` controls either a static inventory will be created after the -cluster nodes provisioned on OpenStack cloud. Note, the fully dynamic inventory -is yet to be supported, so the static inventory will be created anyway. - -The `openstack_inventory_path` points the directory to host the generated static inventory. -It should point to the copied example inventory directory, otherwise ti creates -a new one for you. - ## Multi-master configuration Please refer to the official documentation for the @@ -345,7 +294,6 @@ variables](https://docs.openshift.com/container-platform/3.6/install_config/inst in `inventory/group_vars/OSEv3.yml`. For example, given a load balancer node under the ansible group named `ext_lb`: - openshift_master_cluster_method: native openshift_master_cluster_hostname: "{{ groups.ext_lb.0 }}" openshift_master_cluster_public_hostname: "{{ groups.ext_lb.0 }}" @@ -384,18 +332,6 @@ be the case for development environments. When turned off, the servers will be provisioned omitting the ``yum update`` command. This brings security implications though, and is not recommended for production deployments. -### DNS servers security options - -Aside from `openshift_openstack_node_ingress_cidr` restricting public access to in-stack DNS -servers, there are following (bind/named specific) DNS security -options available: - - named_public_recursion: 'no' - named_private_recursion: 'yes' - -External DNS servers, which is not included in the 'dns' hosts group, -are not managed. It is up to you to configure such ones. - ## Configure the OpenShift parameters Finally, you need to update the DNS entry in @@ -538,43 +474,6 @@ You can also run the registry setup playbook directly: -## Configure static inventory and access via a bastion node - -Example inventory variables: - - openshift_openstack_use_bastion: true - openshift_openstack_bastion_ingress_cidr: "{{openshift_openstack_subnet_prefix}}.0/24" - openstack_private_ssh_key: ~/.ssh/id_rsa - openstack_inventory: static - openstack_inventory_path: ../../../../inventory - openstack_ssh_config_path: /tmp/ssh.config.openshift.ansible.openshift.example.com - -The `openshift_openstack_subnet_prefix` is the openstack private network for your cluster. -And the `openshift_openstack_bastion_ingress_cidr` defines accepted range for SSH connections to nodes -additionally to the `openshift_openstack_ssh_ingress_cidr`` (see the security notes above). - -The SSH config will be stored on the ansible control node by the -gitven path. Ansible uses it automatically. To access the cluster nodes with -that ssh config, use the `-F` prefix, f.e.: - - ssh -F /tmp/ssh.config.openshift.ansible.openshift.example.com master-0.openshift.example.com echo OK - -Note, relative paths will not work for the `openstack_ssh_config_path`, but it -works for the `openstack_private_ssh_key` and `openstack_inventory_path`. In this -guide, the latter points to the current directory, where you run ansible commands -from. - -To verify nodes connectivity, use the command: - - ansible -v -i inventory/hosts -m ping all - -If something is broken, double-check the inventory variables, paths and the -generated `<openstack_inventory_path>/hosts` and `openstack_ssh_config_path` files. - -The `inventory: dynamic` can be used instead to access cluster nodes directly via -floating IPs. In this mode you can not use a bastion node and should specify -the dynamic inventory file in your ansible commands , like `-i openstack.py`. - ## Using Docker on the Ansible host If you don't want to worry about the dependencies, you can use the @@ -604,28 +503,6 @@ the playbooks: ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml -### Run the playbook - -Assuming your OpenStack (Keystone) credentials are in the `keystonerc` -this is how you stat the provisioning process from your ansible control node: - - . keystonerc - ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/provision.yaml - -Note, here you start with an empty inventory. The static inventory will be populated -with data so you can omit providing additional arguments for future ansible commands. - -If bastion enabled, the generates SSH config must be applied for ansible. -Otherwise, it is auto included by the previous step. In order to execute it -as a separate playbook, use the following command: - - ansible-playbook openshift-ansible-contrib/playbooks/provisioning/openstack/post-provision-openstack.yml - -The first infra node then becomes a bastion node as well and proxies access -for future ansible commands. The post-provision step also configures Satellite, -if requested, and DNS server, and ensures other OpenShift requirements to be met. - - ## Running Custom Post-Provision Actions A custom playbook can be run like this: @@ -733,21 +610,6 @@ Once it succeeds, you can install openshift by running: OpenShift UI may be accessed via the 1st master node FQDN, port 8443. -When using a bastion, you may want to make an SSH tunnel from your control node -to access UI on the `https://localhost:8443`, with this inventory variable: - - openshift_openstack_ui_ssh_tunnel: True - -Note, this requires sudo rights on the ansible control node and an absolute path -for the `openstack_private_ssh_key`. You should also update the control node's -`/etc/hosts`: - - 127.0.0.1 master-0.openshift.example.com - -In order to access UI, the ssh-tunnel service will be created and started on the -control node. Make sure to remove these changes and the service manually, when not -needed anymore. - ## Scale Deployment up/down ### Scaling up @@ -766,5 +628,3 @@ Usage: ``` ansible-playbook -i <path to inventory> openshift-ansible-contrib/playbooks/provisioning/openstack/scale-up.yaml` [-e increment_by=<number>] [-e openshift_ansible_dir=<path to openshift-ansible>] ``` - -Note: This playbook works only without a bastion node (`openshift_openstack_use_bastion: False`). diff --git a/playbooks/openstack/openshift-cluster/install.yml b/playbooks/openstack/openshift-cluster/install.yml index 1c4f609e3..8ed01b192 100644 --- a/playbooks/openstack/openshift-cluster/install.yml +++ b/playbooks/openstack/openshift-cluster/install.yml @@ -8,8 +8,5 @@ # values here. We do it in the OSEv3 group vars. Do we need to add # some logic here? -- name: run the initialization - include: ../../init/main.yml - -- name: run the config - include: ../../common/openshift-cluster/config.yml +- name: run the cluster deploy + include: ../../deploy_cluster.yml diff --git a/playbooks/openstack/openshift-cluster/provision.yml b/playbooks/openstack/openshift-cluster/provision.yml index 36d8c8215..3e295b2c8 100644 --- a/playbooks/openstack/openshift-cluster/provision.yml +++ b/playbooks/openstack/openshift-cluster/provision.yml @@ -26,9 +26,9 @@ - name: Gather facts for the new nodes setup: +- name: set common facts + include: ../../init/facts.yml -# NOTE(shadower): the (internal) DNS must be functional at this point!! -# That will have happened in provision.yml if nsupdate was configured. # TODO(shadower): consider splitting this up so people can stop here # and configure their DNS if they have to. @@ -47,6 +47,13 @@ hosts: oo_all_hosts become: yes gather_facts: yes + roles: + - role: rhel_subscribe + when: + - ansible_distribution == "RedHat" + - rhsub_user | default(False) + - rhsub_pass | default(False) + tasks: - name: Install dependencies include_role: diff --git a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml index 90608bbc0..933117127 100644 --- a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml +++ b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml @@ -1,10 +1,11 @@ --- +## Openshift product versions and repos to install from openshift_deployment_type: origin +#openshift_repos_enable_testing: true #openshift_deployment_type: openshift-enterprise #openshift_release: v3.5 openshift_master_default_subdomain: "apps.{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}" -openshift_master_cluster_method: native openshift_master_cluster_public_hostname: "console.{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}" osm_default_node_selector: 'region=primary' diff --git a/playbooks/openstack/sample-inventory/group_vars/all.yml b/playbooks/openstack/sample-inventory/group_vars/all.yml index ae1528123..c7afe9a24 100644 --- a/playbooks/openstack/sample-inventory/group_vars/all.yml +++ b/playbooks/openstack/sample-inventory/group_vars/all.yml @@ -82,27 +82,10 @@ openshift_openstack_docker_volume_size: "15" openshift_openstack_subnet_prefix: "192.168.99" -## Red Hat subscription defaults to false which means we will not attempt to -## subscribe the nodes -#rhsm_register: False - -# # Using Red Hat Satellite: -#rhsm_register: True -#rhsm_satellite: 'sat-6.example.com' -#rhsm_org: 'OPENSHIFT_ORG' -#rhsm_activationkey: '<activation-key>' - -# # Or using RHN username, password and optionally pool: -#rhsm_register: True -#rhsm_username: '<username>' -#rhsm_password: '<password>' -#rhsm_pool: '<pool id>' - -#rhsm_repos: -# - "rhel-7-server-rpms" -# - "rhel-7-server-ose-3.5-rpms" -# - "rhel-7-server-extras-rpms" -# - "rhel-7-fast-datapath-rpms" +## Red Hat subscription: +#rhsub_user: '<username>' +#rhsub_pass: '<password>' +#rhsub_pool: '<pool name>' # # Roll-your-own DNS diff --git a/playbooks/prerequisites.yml b/playbooks/prerequisites.yml index 582dfe794..0cc5fcef8 100644 --- a/playbooks/prerequisites.yml +++ b/playbooks/prerequisites.yml @@ -1,7 +1,6 @@ --- -- name: Place holder for prerequisites - hosts: localhost - gather_facts: false - tasks: - - name: Debug placeholder - debug: msg="Prerequisites ran." +- import_playbook: init/main.yml + vars: + skip_verison: True + +- import_playbook: container-runtime/private/config.yml diff --git a/playbooks/roles b/playbooks/roles new file mode 120000 index 000000000..d8c4472ca --- /dev/null +++ b/playbooks/roles @@ -0,0 +1 @@ +../roles
\ No newline at end of file diff --git a/roles/calico/handlers/main.yml b/roles/calico/handlers/main.yml index 67fc0065f..9cc0604a3 100644 --- a/roles/calico/handlers/main.yml +++ b/roles/calico/handlers/main.yml @@ -3,10 +3,10 @@ become: yes systemd: name=calico state=restarted -- name: restart docker +- name: restart container runtime become: yes systemd: - name: "{{ openshift.docker.service_name }}" + name: "{{ openshift_docker_service_name }}" state: restarted register: l_docker_restart_docker_in_calico_result until: not l_docker_restart_docker_in_calico_result | failed diff --git a/roles/calico/templates/calico.service.j2 b/roles/calico/templates/calico.service.j2 index 7653e19b1..a7809b9f9 100644 --- a/roles/calico/templates/calico.service.j2 +++ b/roles/calico/templates/calico.service.j2 @@ -1,7 +1,7 @@ [Unit] Description=calico -After={{ openshift.docker.service_name }}.service -Requires={{ openshift.docker.service_name }}.service +After={{ openshift_docker_service_name }}.service +Requires={{ openshift_docker_service_name }}.service [Service] Restart=always diff --git a/roles/container_runtime/README.md b/roles/container_runtime/README.md new file mode 100644 index 000000000..51f469aaf --- /dev/null +++ b/roles/container_runtime/README.md @@ -0,0 +1,44 @@ +Container Runtime +========= + +Ensures docker package or system container is installed, and optionally raises timeout for systemd-udevd.service to 5 minutes. + +container-daemon.json items may be found at https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file + +This role is designed to be used with include_role and tasks_from. + +Entry points +------------ +* package_docker.yml - install and setup docker container runtime. +* systemcontainer_docker.yml - utilize docker + systemcontainer +* systemcontainer_crio.yml - utilize crio + systemcontainer +* registry_auth.yml - place docker login credentials. + +Requirements +------------ + +Ansible 2.4 + + +Dependencies +------------ + +Depends on openshift_facts having already been run. + +Example Playbook +---------------- + + - hosts: servers + tasks: + - include_role: container_runtime + tasks_from: package_docker.yml + +License +------- + +ASL 2.0 + +Author Information +------------------ + +Red Hat, Inc diff --git a/roles/container_runtime/defaults/main.yml b/roles/container_runtime/defaults/main.yml new file mode 100644 index 000000000..bd96965ac --- /dev/null +++ b/roles/container_runtime/defaults/main.yml @@ -0,0 +1,129 @@ +--- +docker_cli_auth_config_path: '/root/.docker' +openshift_docker_signature_verification: False + +repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}" + +openshift_docker_alternative_creds: False + +# oreg_url is defined by user input. +oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}" +oreg_auth_credentials_replace: False + +openshift_docker_use_system_container: False +openshift_docker_disable_push_dockerhub: False # bool +openshift_docker_selinux_enabled: True +openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" + +openshift_docker_hosted_registry_insecure: False # bool + +openshift_docker_hosted_registry_network_default: "{{ openshift_portal_net | default(False) }}" +openshift_docker_hosted_registry_network: "{{ openshift_docker_hosted_registry_network_default }}" + +openshift_docker_additional_registries: [] +openshift_docker_blocked_registries: [] +openshift_docker_insecure_registries: [] + +openshift_docker_ent_reg: 'registry.access.redhat.com' + +openshift_docker_options: False # str +openshift_docker_log_driver: False # str +openshift_docker_log_options: [] + +# The l2_docker_* variables convert csv strings to lists, if +# necessary. These variables should be used in place of their respective +# openshift_docker_* counterparts to ensure the properly formatted lists are +# utilized. +l2_docker_additional_registries: "{% if openshift_docker_additional_registries is string %}{% if openshift_docker_additional_registries == '' %}[]{% elif ',' in openshift_docker_additional_registries %}{{ openshift_docker_additional_registries.split(',') | list }}{% else %}{{ [ openshift_docker_additional_registries ] }}{% endif %}{% else %}{{ openshift_docker_additional_registries }}{% endif %}" +l2_docker_blocked_registries: "{% if openshift_docker_blocked_registries is string %}{% if openshift_docker_blocked_registries == '' %}[]{% elif ',' in openshift_docker_blocked_registries %}{{ openshift_docker_blocked_registries.split(',') | list }}{% else %}{{ [ openshift_docker_blocked_registries ] }}{% endif %}{% else %}{{ openshift_docker_blocked_registries }}{% endif %}" +l2_docker_insecure_registries: "{% if openshift_docker_insecure_registries is string %}{% if openshift_docker_insecure_registries == '' %}[]{% elif ',' in openshift_docker_insecure_registries %}{{ openshift_docker_insecure_registries.split(',') | list }}{% else %}{{ [ openshift_docker_insecure_registries ] }}{% endif %}{% else %}{{ openshift_docker_insecure_registries }}{% endif %}" +l2_docker_log_options: "{% if openshift_docker_log_options is string %}{% if ',' in openshift_docker_log_options %}{{ openshift_docker_log_options.split(',') | list }}{% else %}{{ [ openshift_docker_log_options ] }}{% endif %}{% else %}{{ openshift_docker_log_options }}{% endif %}" + +openshift_docker_use_etc_containers: False +containers_registries_conf_path: /etc/containers/registries.conf + +r_crio_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_crio_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" + +r_crio_os_firewall_deny: [] +r_crio_os_firewall_allow: +- service: crio + port: 10010/tcp + + +openshift_docker_is_node_or_master: "{{ True if inventory_hostname in (groups['oo_masters_to_config']|default([])) or inventory_hostname in (groups['oo_nodes_to_config']|default([])) else False | bool }}" + +docker_alt_storage_path: /var/lib/containers/docker +docker_default_storage_path: /var/lib/docker + +# Set local versions of facts that must be in json format for container-daemon.json +# NOTE: When jinja2.9+ is used the container-daemon.json file can move to using tojson +l_docker_log_options: "{{ l2_docker_log_options | to_json }}" +l_docker_additional_registries: "{{ l2_docker_additional_registries | to_json }}" +l_docker_blocked_registries: "{{ l2_docker_blocked_registries | to_json }}" +l_docker_insecure_registries: "{{ l2_docker_insecure_registries | to_json }}" +l_docker_selinux_enabled: "{{ openshift_docker_selinux_enabled | to_json }}" + +docker_http_proxy: "{{ openshift_http_proxy | default('') }}" +docker_https_proxy: "{{ openshift.common.https_proxy | default('') }}" +docker_no_proxy: "{{ openshift.common.no_proxy | default('') }}" + +openshift_use_crio: False +openshift_use_crio_only: False + +l_openshift_image_tag_default: "{{ openshift_release | default('latest') }}" +l_openshift_image_tag: "{{ openshift_image_tag | default(l_openshift_image_tag_default) | string}}" + +# --------------------- # +# systemcontainers_crio # +# --------------------- # +l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l2_docker_insecure_registries)) }}" +l_crio_registries: "{{ l2_docker_additional_registries + ['docker.io'] }}" +l_additional_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l_crio_registries)) }}" + +openshift_crio_image_tag_default: "latest" + +l_crt_crio_image_tag_dict: + openshift-enterprise: "{{ l_openshift_image_tag }}" + origin: "{{ openshift_crio_image_tag | default(openshift_crio_image_tag_default) }}" + +l_crt_crio_image_prepend_dict: + openshift-enterprise: "registry.access.redhat.com/openshift3" + origin: "docker.io/gscrivano" + +l_crt_crio_image_dict: + Fedora: + crio_image_name: "cri-o-fedora" + crio_image_tag: "latest" + CentOS: + crio_image_name: "cri-o-centos" + crio_image_tag: "latest" + RedHat: + crio_image_name: "cri-o" + crio_image_tag: "{{ openshift_crio_image_tag | default(l_crt_crio_image_tag_dict[openshift_deployment_type]) }}" + +l_crio_image_prepend: "{{ l_crt_crio_image_prepend_dict[openshift_deployment_type] }}" +l_crio_image_name: "{{ l_crt_crio_image_dict[ansible_distribution]['crio_image_name'] }}" +l_crio_image_tag: "{{ l_crt_crio_image_dict[ansible_distribution] }}" + +l_crio_image_default: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:{{ l_crio_image_tag }}" +l_crio_image: "{{ openshift_crio_systemcontainer_image_override | default(l_crio_image_default) }}" + +# ----------------------- # +# systemcontainers_docker # +# ----------------------- # +l_crt_docker_image_prepend_dict: + Fedora: "registry.fedoraproject.org/f25" + Centos: "docker.io/gscrivano" + RedHat: "registry.access.redhat.com/openshift3" + +openshift_docker_image_tag_default: "latest" +l_crt_docker_image_tag_dict: + openshift-enterprise: "{{ l_openshift_image_tag }}" + origin: "{{ openshift_docker_image_tag | default(openshift_docker_image_tag_default) }}" + +l_docker_image_prepend: "{{ l_crt_docker_image_prepend_dict[ansible_distribution] }}" +l_docker_image_tag: "{{ l_crt_docker_image_tag_dict[openshift_deployment_type] }}" + +l_docker_image_default: "{{ l_docker_image_prepend }}/{{ openshift_docker_service_name }}:{{ l_docker_image_tag }}" +l_docker_image: "{{ openshift_docker_systemcontainer_image_override | default(l_docker_image_default) }}" diff --git a/roles/docker/handlers/main.yml b/roles/container_runtime/handlers/main.yml index 866ed0452..67cd6d782 100644 --- a/roles/docker/handlers/main.yml +++ b/roles/container_runtime/handlers/main.yml @@ -1,8 +1,8 @@ --- -- name: restart docker +- name: restart container runtime systemd: - name: "{{ openshift.docker.service_name }}" + name: "{{ openshift_docker_service_name }}" state: restarted daemon_reload: yes register: r_docker_restart_docker_result diff --git a/roles/docker/meta/main.yml b/roles/container_runtime/meta/main.yml index d5faae8df..02fceb745 100644 --- a/roles/docker/meta/main.yml +++ b/roles/container_runtime/meta/main.yml @@ -1,7 +1,7 @@ --- galaxy_info: author: OpenShift - description: docker package install + description: container runtime install and configure company: Red Hat, Inc license: ASL 2.0 min_ansible_version: 2.2 diff --git a/roles/openshift_atomic/tasks/proxy.yml b/roles/container_runtime/tasks/common/atomic_proxy.yml index dde099984..dde099984 100644 --- a/roles/openshift_atomic/tasks/proxy.yml +++ b/roles/container_runtime/tasks/common/atomic_proxy.yml diff --git a/roles/container_runtime/tasks/common/post.yml b/roles/container_runtime/tasks/common/post.yml new file mode 100644 index 000000000..d790eb2c0 --- /dev/null +++ b/roles/container_runtime/tasks/common/post.yml @@ -0,0 +1,26 @@ +--- +- name: Ensure /var/lib/containers exists + file: + path: /var/lib/containers + state: directory + +- name: Fix SELinux Permissions on /var/lib/containers + command: "restorecon -R /var/lib/containers/" + changed_when: false + +- meta: flush_handlers + +# This needs to run after docker is restarted to account for proxy settings. +# registry_auth is called directly with include_role in some places, so we +# have to put it in the root of the tasks/ directory. +- include_tasks: ../registry_auth.yml + +- name: stat the docker data dir + stat: + path: "{{ docker_default_storage_path }}" + register: dockerstat + +- include_tasks: setup_docker_symlink.yml + when: + - openshift_use_crio + - dockerstat.stat.islnk is defined and not (dockerstat.stat.islnk | bool) diff --git a/roles/container_runtime/tasks/common/pre.yml b/roles/container_runtime/tasks/common/pre.yml new file mode 100644 index 000000000..990fe66da --- /dev/null +++ b/roles/container_runtime/tasks/common/pre.yml @@ -0,0 +1,12 @@ +--- +- include_tasks: udev_workaround.yml + when: docker_udev_workaround | default(False) | bool + +- name: Add enterprise registry, if necessary + set_fact: + l2_docker_additional_registries: "{{ l2_docker_additional_registries + [openshift_docker_ent_reg] }}" + when: + - openshift.common.deployment_type == 'openshift-enterprise' + - openshift_docker_ent_reg != '' + - openshift_docker_ent_reg not in l2_docker_additional_registries + - not openshift_use_crio_only | bool diff --git a/roles/container_runtime/tasks/common/setup_docker_symlink.yml b/roles/container_runtime/tasks/common/setup_docker_symlink.yml new file mode 100644 index 000000000..d7aeb192e --- /dev/null +++ b/roles/container_runtime/tasks/common/setup_docker_symlink.yml @@ -0,0 +1,38 @@ +--- +- block: + - name: stop the current running docker + systemd: + state: stopped + name: "{{ openshift_docker_service_name }}" + + - name: copy "{{ docker_default_storage_path }}" to "{{ docker_alt_storage_path }}" + command: "cp -r {{ docker_default_storage_path }} {{ docker_alt_storage_path }}" + register: results + failed_when: + - results.rc != 0 + + - name: "Set the selinux context on {{ docker_alt_storage_path }}" + command: "semanage fcontext -a -e {{ docker_default_storage_path }} {{ docker_alt_storage_path }}" + register: results + failed_when: + - results.rc == 1 + - "'already exists' not in results.stderr" + + - name: "restorecon the {{ docker_alt_storage_path }}" + command: "restorecon -r {{ docker_alt_storage_path }}" + + - name: Remove the old docker location + file: + state: absent + path: "{{ docker_default_storage_path }}" + + - name: Setup the link + file: + state: link + src: "{{ docker_alt_storage_path }}" + path: "{{ docker_default_storage_path }}" + + - name: start docker + systemd: + state: started + name: "{{ openshift_docker_service_name }}" diff --git a/roles/container_runtime/tasks/common/syscontainer_packages.yml b/roles/container_runtime/tasks/common/syscontainer_packages.yml new file mode 100644 index 000000000..715ed492d --- /dev/null +++ b/roles/container_runtime/tasks/common/syscontainer_packages.yml @@ -0,0 +1,28 @@ +--- + +- name: Ensure container-selinux is installed + package: + name: container-selinux + state: present + when: not openshift.common.is_atomic | bool + register: result + until: result | success + +# Used to pull and install the system container +- name: Ensure atomic is installed + package: + name: atomic + state: present + when: not openshift.common.is_atomic | bool + register: result + until: result | success + +# At the time of writing the atomic command requires runc for it's own use. This +# task is here in the even that the atomic package ever removes the dependency. +- name: Ensure runc is installed + package: + name: runc + state: present + when: not openshift.common.is_atomic | bool + register: result + until: result | success diff --git a/roles/docker/tasks/udev_workaround.yml b/roles/container_runtime/tasks/common/udev_workaround.yml index 257c3123d..257c3123d 100644 --- a/roles/docker/tasks/udev_workaround.yml +++ b/roles/container_runtime/tasks/common/udev_workaround.yml diff --git a/roles/docker/tasks/crio_firewall.yml b/roles/container_runtime/tasks/crio_firewall.yml index fbd1ff515..fbd1ff515 100644 --- a/roles/docker/tasks/crio_firewall.yml +++ b/roles/container_runtime/tasks/crio_firewall.yml diff --git a/roles/container_runtime/tasks/docker_sanity.yml b/roles/container_runtime/tasks/docker_sanity.yml new file mode 100644 index 000000000..e62cf5505 --- /dev/null +++ b/roles/container_runtime/tasks/docker_sanity.yml @@ -0,0 +1,27 @@ +--- +# Sanity checks to ensure the role will complete and provide helpful error +# messages for common problems. + +- name: Error out if Docker pre-installed but too old + fail: + msg: "Docker {{ curr_docker_version.stdout }} is installed, but >= 1.9.1 is required." + when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.9.1', '<') and not docker_version is defined + +- name: Error out if requested Docker is too old + fail: + msg: "Docker {{ docker_version }} requested, but >= 1.9.1 is required." + when: docker_version is defined and docker_version | version_compare('1.9.1', '<') + +# If a docker_version was requested, sanity check that we can install or upgrade to it, and +# no downgrade is required. +- name: Fail if Docker version requested but downgrade is required + fail: + msg: "Docker {{ curr_docker_version.stdout }} is installed, but version {{ docker_version }} was requested." + when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and docker_version is defined and curr_docker_version.stdout | version_compare(docker_version, '>') + +# This involves an extremely slow migration process, users should instead run the +# Docker 1.10 upgrade playbook to accomplish this. +- name: Error out if attempting to upgrade Docker across the 1.10 boundary + fail: + msg: "Cannot upgrade Docker to >= 1.10, please upgrade or remove Docker manually, or use the Docker upgrade playbook if OpenShift is already installed." + when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.10', '<') and docker_version is defined and docker_version | version_compare('1.10', '>=') diff --git a/roles/container_runtime/tasks/main.yml b/roles/container_runtime/tasks/main.yml new file mode 100644 index 000000000..96d8606c6 --- /dev/null +++ b/roles/container_runtime/tasks/main.yml @@ -0,0 +1,2 @@ +--- +# This role is meant to be used with include_role and tasks_from. diff --git a/roles/docker/tasks/package_docker.yml b/roles/container_runtime/tasks/package_docker.yml index 044b04478..89899c9cf 100644 --- a/roles/docker/tasks/package_docker.yml +++ b/roles/container_runtime/tasks/package_docker.yml @@ -1,4 +1,6 @@ --- +- include_tasks: common/pre.yml + - name: Get current installed Docker version command: "{{ repoquery_installed }} --qf '%{version}' docker" when: not openshift.common.is_atomic | bool @@ -7,35 +9,16 @@ until: curr_docker_version | succeeded changed_when: false -- name: Error out if Docker pre-installed but too old - fail: - msg: "Docker {{ curr_docker_version.stdout }} is installed, but >= 1.9.1 is required." - when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.9.1', '<') and not docker_version is defined - -- name: Error out if requested Docker is too old - fail: - msg: "Docker {{ docker_version }} requested, but >= 1.9.1 is required." - when: docker_version is defined and docker_version | version_compare('1.9.1', '<') - -# If a docker_version was requested, sanity check that we can install or upgrade to it, and -# no downgrade is required. -- name: Fail if Docker version requested but downgrade is required - fail: - msg: "Docker {{ curr_docker_version.stdout }} is installed, but version {{ docker_version }} was requested." - when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and docker_version is defined and curr_docker_version.stdout | version_compare(docker_version, '>') - -# This involves an extremely slow migration process, users should instead run the -# Docker 1.10 upgrade playbook to accomplish this. -- name: Error out if attempting to upgrade Docker across the 1.10 boundary - fail: - msg: "Cannot upgrade Docker to >= 1.10, please upgrade or remove Docker manually, or use the Docker upgrade playbook if OpenShift is already installed." - when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.10', '<') and docker_version is defined and docker_version | version_compare('1.10', '>=') +# Some basic checks to ensure the role will complete +- include_tasks: docker_sanity.yml # Make sure Docker is installed, but does not update a running version. # Docker upgrades are handled by a separate playbook. # Note: The curr_docker_version.stdout check can be removed when https://github.com/ansible/ansible/issues/33187 gets fixed. - name: Install Docker - package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present + package: + name: "docker{{ '-' + docker_version if docker_version is defined else '' }}" + state: present when: not openshift.common.is_atomic | bool and not curr_docker_version | skipped and not curr_docker_version.stdout != '' register: result until: result | success @@ -52,7 +35,7 @@ dest: "{{ docker_systemd_dir }}/custom.conf" src: custom.conf.j2 notify: - - restart docker + - restart container runtime when: not (os_firewall_use_firewalld | default(False)) | bool - stat: path=/etc/sysconfig/docker @@ -78,7 +61,7 @@ reg_fact_val: "{{ l2_docker_insecure_registries }}" reg_flag: --insecure-registry notify: - - restart docker + - restart container runtime - name: Place additional/blocked/insecure registries in /etc/containers/registries.conf template: @@ -86,7 +69,7 @@ src: registries.conf when: openshift_docker_use_etc_containers | bool notify: - - restart docker + - restart container runtime - name: Set Proxy Settings lineinfile: @@ -96,30 +79,34 @@ state: "{{ 'present' if item.reg_fact_val != '' else 'absent'}}" with_items: - reg_conf_var: HTTP_PROXY - reg_fact_val: "{{ docker_http_proxy | default('') }}" + reg_fact_val: "{{ docker_http_proxy }}" - reg_conf_var: HTTPS_PROXY - reg_fact_val: "{{ docker_https_proxy | default('') }}" + reg_fact_val: "{{ docker_https_proxy }}" - reg_conf_var: NO_PROXY - reg_fact_val: "{{ docker_no_proxy | default('') }}" + reg_fact_val: "{{ docker_no_proxy }}" notify: - - restart docker + - restart container runtime when: - - docker_check.stat.isreg is defined and docker_check.stat.isreg and '"http_proxy" in openshift.common or "https_proxy" in openshift.common' + - docker_check.stat.isreg is defined + - docker_check.stat.isreg + - docker_http_proxy != '' or docker_https_proxy != '' - name: Set various Docker options lineinfile: dest: /etc/sysconfig/docker regexp: '^OPTIONS=.*$' line: "OPTIONS='\ - {% if ansible_selinux.status | default(None) == 'enabled' and docker_selinux_enabled | default(true) | bool %} --selinux-enabled {% endif %} \ - {% if docker_log_driver is defined %} --log-driver {{ docker_log_driver }}{% endif %} \ - {% if docker_log_options is defined %} {{ docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %} \ + {% if ansible_selinux.status | default(None) == 'enabled' and openshift_docker_selinux_enabled | default(true) | bool %} --selinux-enabled {% endif %} \ + {% if openshift_docker_log_driver | bool %} --log-driver {{ openshift_docker_log_driver }}{% endif %} \ + {% if l2_docker_log_options != [] %} {{ l2_docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %} \ + {% if openshift_docker_hosted_registry_insecure and (openshift_docker_hosted_registry_network | bool) %} --insecure-registry={{ openshift_docker_hosted_registry_network }} {% endif %} \ {% if docker_options is defined %} {{ docker_options }}{% endif %} \ - {% if docker_disable_push_dockerhub is defined %} --confirm-def-push={{ docker_disable_push_dockerhub | bool }}{% endif %} \ + {% if openshift_docker_options %} {{ openshift_docker_options }}{% endif %} \ + {% if openshift_docker_disable_push_dockerhub %} --confirm-def-push={{ openshift_docker_disable_push_dockerhub | bool }}{% endif %} \ --signature-verification={{ openshift_docker_signature_verification | bool }}'" when: docker_check.stat.isreg is defined and docker_check.stat.isreg notify: - - restart docker + - restart container runtime - stat: path=/etc/sysconfig/docker-network register: sysconfig_docker_network_check @@ -134,7 +121,7 @@ - sysconfig_docker_network_check.stat.isreg is defined - sysconfig_docker_network_check.stat.isreg notify: - - restart docker + - restart container runtime # The following task is needed as the systemd module may report a change in # state even though docker is already running. @@ -157,7 +144,4 @@ - set_fact: docker_service_status_changed: "{{ (r_docker_package_docker_start_result | changed) and (r_docker_already_running_result.stdout != 'ActiveState=active' ) }}" -- meta: flush_handlers - -# This needs to run after docker is restarted to account for proxy settings. -- include_tasks: registry_auth.yml +- include_tasks: common/post.yml diff --git a/roles/docker/tasks/registry_auth.yml b/roles/container_runtime/tasks/registry_auth.yml index 2c7bc5711..2c7bc5711 100644 --- a/roles/docker/tasks/registry_auth.yml +++ b/roles/container_runtime/tasks/registry_auth.yml diff --git a/roles/container_runtime/tasks/systemcontainer_crio.yml b/roles/container_runtime/tasks/systemcontainer_crio.yml new file mode 100644 index 000000000..5ea7df650 --- /dev/null +++ b/roles/container_runtime/tasks/systemcontainer_crio.yml @@ -0,0 +1,96 @@ +--- +# TODO: Much of this file is shared with container engine tasks +- name: Check we are not using node as a Docker container with CRI-O + fail: msg='Cannot use CRI-O with node configured as a Docker container' + when: + - openshift.common.is_containerized | bool + - not openshift.common.is_node_system_container | bool + +- include_tasks: common/pre.yml + +- include_tasks: common/syscontainer_packages.yml + +- name: Check that overlay is in the kernel + shell: lsmod | grep overlay + register: l_has_overlay_in_kernel + ignore_errors: yes + failed_when: false + +- when: l_has_overlay_in_kernel.rc != 0 + block: + + - name: Add overlay to modprobe.d + template: + dest: /etc/modules-load.d/overlay.conf + src: overlay.conf.j2 + backup: yes + + - name: Manually modprobe overlay into the kernel + command: modprobe overlay + + - name: Enable and start systemd-modules-load + service: + name: systemd-modules-load + enabled: yes + state: restarted + +- name: Ensure proxies are in the atomic.conf + include_tasks: common/atomic_proxy.yml + +# Be nice and let the user see the variable result +- debug: + var: l_crio_image + +# NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released +- name: Pre-pull CRI-O System Container image + command: "atomic pull --storage ostree {{ l_crio_image }}" + changed_when: false + environment: + NO_PROXY: "{{ openshift.common.no_proxy | default('') }}" + +- name: Install CRI-O System Container + oc_atomic_container: + name: "cri-o" + image: "{{ l_crio_image }}" + state: latest + +- name: Remove CRI-O default configuration files + file: + path: "{{ item }}" + state: absent + with_items: + - /etc/cni/net.d/200-loopback.conf + - /etc/cni/net.d/100-crio-bridge.conf + +- name: Create the CRI-O configuration + template: + dest: /etc/crio/crio.conf + src: crio.conf.j2 + backup: yes + +- name: Ensure CNI configuration directory exists + file: + path: /etc/cni/net.d/ + state: directory + +- name: setup firewall for CRI-O + import_tasks: crio_firewall.yml + +- name: Configure the CNI network + template: + dest: /etc/cni/net.d/openshift-sdn.conf + src: 80-openshift-sdn.conf.j2 + +- name: Start the CRI-O service + systemd: + name: "cri-o" + enabled: yes + state: started + daemon_reload: yes + register: start_result + +# If we are using crio only, docker.service might not be available for +# 'docker login' +- include_tasks: common/post.yml + vars: + openshift_docker_alternative_creds: "{{ openshift_use_crio_only }}" diff --git a/roles/container_runtime/tasks/systemcontainer_docker.yml b/roles/container_runtime/tasks/systemcontainer_docker.yml new file mode 100644 index 000000000..10570fe34 --- /dev/null +++ b/roles/container_runtime/tasks/systemcontainer_docker.yml @@ -0,0 +1,101 @@ +--- +# If docker_options are provided we should fail. We should not install docker and ignore +# the users configuration. NOTE: docker_options == inventory:openshift_docker_options +- name: Fail quickly if openshift_docker_options are set + assert: + that: + - "{% if not openshift_docker_options %}1{% else %}0{% endif %}" + msg: | + Docker via System Container does not allow for the use of the openshift_docker_options + variable. If you want to use openshift_docker_options you will need to use the + traditional docker package install. Otherwise, comment out openshift_docker_options + in your inventory file. + +- include_tasks: common/pre.yml + +- include_tasks: common/syscontainer_packages.yml + +# Make sure Docker is installed so we are able to use the client +- name: Install Docker so we can use the client + package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present + when: not openshift.common.is_atomic | bool + register: result + until: result | success + +# Make sure docker is disabled. Errors are ignored. +- name: Disable Docker + systemd: + name: docker + enabled: no + state: stopped + daemon_reload: yes + ignore_errors: True + register: r_docker_systemcontainer_docker_stop_result + until: not r_docker_systemcontainer_docker_stop_result | failed + retries: 3 + delay: 30 + +- name: Ensure proxies are in the atomic.conf + include_tasks: common/atomic_proxy.yml + +# Be nice and let the user see the variable result +- debug: + var: l_docker_image + +# NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released +- name: Pre-pull Container Engine System Container image + command: "atomic pull --storage ostree {{ l_docker_image }}" + changed_when: false + environment: + NO_PROXY: "{{ docker_no_proxy }}" + + +- name: Ensure container-engine.service.d directory exists + file: + path: "{{ container_engine_systemd_dir }}" + state: directory + +- name: Ensure /etc/docker directory exists + file: + path: "{{ docker_conf_dir }}" + state: directory + +- name: Install Container Engine System Container + oc_atomic_container: + name: "{{ openshift_docker_service_name }}" + image: "{{ l_docker_image }}" + state: latest + +- name: Configure Container Engine Service File + template: + dest: "{{ container_engine_systemd_dir }}/custom.conf" + src: systemcontainercustom.conf.j2 + +# Configure container-engine using the container-daemon.json file +# NOTE: daemon.json and container-daemon.json have been seperated to avoid +# collision. +- name: Configure Container Engine + template: + dest: "{{ docker_conf_dir }}/container-daemon.json" + src: daemon.json + +# Enable and start the container-engine service +- name: Start the Container Engine service + systemd: + name: "{{ openshift_docker_service_name }}" + enabled: yes + state: started + daemon_reload: yes + register: r_docker_systemcontainer_docker_start_result + until: not r_docker_systemcontainer_docker_start_result | failed + retries: 3 + delay: 30 + +- set_fact: + docker_service_status_changed: "{{ r_docker_systemcontainer_docker_start_result | changed }}" + +# Since docker is running as a system container, docker login will fail to create +# credentials. Use alternate method if requiring authenticated registries. +- include_tasks: common/post.yml + vars: + openshift_docker_alternative_creds: True diff --git a/roles/docker/templates/80-openshift-sdn.conf.j2 b/roles/container_runtime/templates/80-openshift-sdn.conf.j2 index a693aea5f..a693aea5f 100644 --- a/roles/docker/templates/80-openshift-sdn.conf.j2 +++ b/roles/container_runtime/templates/80-openshift-sdn.conf.j2 diff --git a/roles/docker/templates/crio.conf.j2 b/roles/container_runtime/templates/crio.conf.j2 index 3f066a17f..3f066a17f 100644 --- a/roles/docker/templates/crio.conf.j2 +++ b/roles/container_runtime/templates/crio.conf.j2 diff --git a/roles/docker/templates/custom.conf.j2 b/roles/container_runtime/templates/custom.conf.j2 index 713412473..713412473 100644 --- a/roles/docker/templates/custom.conf.j2 +++ b/roles/container_runtime/templates/custom.conf.j2 diff --git a/roles/docker/templates/daemon.json b/roles/container_runtime/templates/daemon.json index a41b7cdbd..383963bd3 100644 --- a/roles/docker/templates/daemon.json +++ b/roles/container_runtime/templates/daemon.json @@ -5,8 +5,8 @@ "disable-legacy-registry": false, "exec-opts": ["native.cgroupdriver=systemd"], "insecure-registries": {{ l_docker_insecure_registries }}, -{% if docker_log_driver is defined %} - "log-driver": "{{ docker_log_driver }}", +{% if openshift_docker_log_driver is defined %} + "log-driver": "{{ openshift_docker_log_driver }}", {%- endif %} "log-opts": {{ l_docker_log_options }}, "runtimes": { diff --git a/roles/docker/templates/overlay.conf.j2 b/roles/container_runtime/templates/overlay.conf.j2 index 782f46c2e..782f46c2e 100644 --- a/roles/docker/templates/overlay.conf.j2 +++ b/roles/container_runtime/templates/overlay.conf.j2 diff --git a/roles/docker/templates/registries.conf b/roles/container_runtime/templates/registries.conf index d379b2be0..d379b2be0 100644 --- a/roles/docker/templates/registries.conf +++ b/roles/container_runtime/templates/registries.conf diff --git a/roles/docker/templates/systemcontainercustom.conf.j2 b/roles/container_runtime/templates/systemcontainercustom.conf.j2 index 86eebfba6..86eebfba6 100644 --- a/roles/docker/templates/systemcontainercustom.conf.j2 +++ b/roles/container_runtime/templates/systemcontainercustom.conf.j2 diff --git a/roles/docker/vars/main.yml b/roles/container_runtime/vars/main.yml index 4e940b7f5..4e940b7f5 100644 --- a/roles/docker/vars/main.yml +++ b/roles/container_runtime/vars/main.yml diff --git a/roles/contiv/defaults/main.yml b/roles/contiv/defaults/main.yml index b5d2f7c6e..aa976d921 100644 --- a/roles/contiv/defaults/main.yml +++ b/roles/contiv/defaults/main.yml @@ -119,3 +119,5 @@ contiv_h1_gw_default: "10.129.0.1" # contiv default private subnet for ext access contiv_private_ext_subnet: "10.130.0.0/16" + +openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" diff --git a/roles/contiv/tasks/netplugin.yml b/roles/contiv/tasks/netplugin.yml index 0b2f91bab..cf92a8cc0 100644 --- a/roles/contiv/tasks/netplugin.yml +++ b/roles/contiv/tasks/netplugin.yml @@ -105,7 +105,7 @@ - name: Docker | Restart docker service: - name: "{{ openshift.docker.service_name }}" + name: "{{ openshift_docker_service_name }}" state: restarted when: docker_updated|changed register: l_docker_restart_docker_in_contiv_result diff --git a/roles/contiv/templates/aci-gw.service b/roles/contiv/templates/aci-gw.service index 4506d2231..90bb98001 100644 --- a/roles/contiv/templates/aci-gw.service +++ b/roles/contiv/templates/aci-gw.service @@ -1,6 +1,6 @@ [Unit] Description=Contiv ACI gw -After=auditd.service systemd-user-sessions.service time-sync.target {{ openshift.docker.service_name }}.service +After=auditd.service systemd-user-sessions.service time-sync.target {{ openshift_docker_service_name }}.service [Service] ExecStart={{ bin_dir }}/aci_gw.sh start diff --git a/roles/docker/README.md b/roles/docker/README.md deleted file mode 100644 index 19908c036..000000000 --- a/roles/docker/README.md +++ /dev/null @@ -1,43 +0,0 @@ -Docker -========= - -Ensures docker package or system container is installed, and optionally raises timeout for systemd-udevd.service to 5 minutes. - -container-daemon.json items may be found at https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file - -Requirements ------------- - -Ansible 2.2 - -Role Variables --------------- - -docker_conf_dir: location of the Docker configuration directory -docker_systemd_dir location of the systemd directory for Docker -docker_udev_workaround: raises udevd timeout to 5 minutes (https://bugzilla.redhat.com/show_bug.cgi?id=1272446) -udevw_udevd_dir: location of systemd config for systemd-udevd.service - -Dependencies ------------- - -Depends on the os_firewall role. - -Example Playbook ----------------- - - - hosts: servers - roles: - - role: docker - docker_udev_workaround: "true" - docker_use_system_container: False - -License -------- - -ASL 2.0 - -Author Information ------------------- - -OpenShift operations, Red Hat, Inc diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml deleted file mode 100644 index 224844a06..000000000 --- a/roles/docker/defaults/main.yml +++ /dev/null @@ -1,40 +0,0 @@ ---- -docker_cli_auth_config_path: '/root/.docker' -openshift_docker_signature_verification: False - -openshift_docker_alternative_creds: False - -# oreg_url is defined by user input. -oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}" -oreg_auth_credentials_replace: False - -openshift_docker_additional_registries: [] -openshift_docker_blocked_registries: [] -openshift_docker_insecure_registries: [] - -openshift_docker_ent_reg: 'registry.access.redhat.com' - -# The l2_docker_* variables convert csv strings to lists, if -# necessary. These variables should be used in place of their respective -# openshift_docker_* counterparts to ensure the properly formatted lists are -# utilized. -l2_docker_additional_registries: "{% if openshift_docker_additional_registries is string %}{% if openshift_docker_additional_registries == '' %}[]{% elif ',' in openshift_docker_additional_registries %}{{ openshift_docker_additional_registries.split(',') | list }}{% else %}{{ [ openshift_docker_additional_registries ] }}{% endif %}{% else %}{{ openshift_docker_additional_registries }}{% endif %}" -l2_docker_blocked_registries: "{% if openshift_docker_blocked_registries is string %}{% if openshift_docker_blocked_registries == '' %}[]{% elif ',' in openshift_docker_blocked_registries %}{{ openshift_docker_blocked_registries.split(',') | list }}{% else %}{{ [ openshift_docker_blocked_registries ] }}{% endif %}{% else %}{{ openshift_docker_blocked_registries }}{% endif %}" -l2_docker_insecure_registries: "{% if openshift_docker_insecure_registries is string %}{% if openshift_docker_insecure_registries == '' %}[]{% elif ',' in openshift_docker_insecure_registries %}{{ openshift_docker_insecure_registries.split(',') | list }}{% else %}{{ [ openshift_docker_insecure_registries ] }}{% endif %}{% else %}{{ openshift_docker_insecure_registries }}{% endif %}" - -openshift_docker_use_etc_containers: False -containers_registries_conf_path: /etc/containers/registries.conf - -r_crio_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" -r_crio_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" - -r_crio_os_firewall_deny: [] -r_crio_os_firewall_allow: -- service: crio - port: 10010/tcp - - -openshift_docker_is_node_or_master: "{{ True if inventory_hostname in (groups['oo_masters_to_config']|default([])) or inventory_hostname in (groups['oo_nodes_to_config']|default([])) else False | bool }}" - -docker_alt_storage_path: /var/lib/containers/docker -docker_default_storage_path: /var/lib/docker diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml deleted file mode 100644 index b02a74711..000000000 --- a/roles/docker/tasks/main.yml +++ /dev/null @@ -1,93 +0,0 @@ ---- -# These tasks dispatch to the proper set of docker tasks based on the -# inventory:openshift_docker_use_system_container variable - -- include_tasks: udev_workaround.yml - when: docker_udev_workaround | default(False) | bool - -- set_fact: - l_use_system_container: "{{ openshift.docker.use_system_container | default(False) }}" - l_use_crio: "{{ openshift_use_crio | default(False) }}" - l_use_crio_only: "{{ openshift_use_crio_only | default(False) }}" - -- name: Add enterprise registry, if necessary - set_fact: - l2_docker_additional_registries: "{{ l2_docker_additional_registries + [openshift_docker_ent_reg] }}" - when: - - openshift.common.deployment_type == 'openshift-enterprise' - - openshift_docker_ent_reg != '' - - openshift_docker_ent_reg not in l2_docker_additional_registries - - not l_use_crio_only - -- name: Use Package Docker if Requested - include_tasks: package_docker.yml - when: - - not l_use_system_container - - not l_use_crio_only - -- name: Ensure /var/lib/containers exists - file: - path: /var/lib/containers - state: directory - -- name: Fix SELinux Permissions on /var/lib/containers - command: "restorecon -R /var/lib/containers/" - changed_when: false - -- name: Use System Container Docker if Requested - include_tasks: systemcontainer_docker.yml - when: - - l_use_system_container - - not l_use_crio_only - -- name: Add CRI-O usage Requested - include_tasks: systemcontainer_crio.yml - when: - - l_use_crio - - openshift_docker_is_node_or_master | bool - -- name: stat the docker data dir - stat: - path: "{{ docker_default_storage_path }}" - register: dockerstat - -- when: - - l_use_crio - - dockerstat.stat.islnk is defined and not (dockerstat.stat.islnk | bool) - block: - - name: stop the current running docker - systemd: - state: stopped - name: "{{ openshift.docker.service_name }}" - - - name: copy "{{ docker_default_storage_path }}" to "{{ docker_alt_storage_path }}" - command: "cp -r {{ docker_default_storage_path }} {{ docker_alt_storage_path }}" - register: results - failed_when: - - results.rc != 0 - - - name: "Set the selinux context on {{ docker_alt_storage_path }}" - command: "semanage fcontext -a -e {{ docker_default_storage_path }} {{ docker_alt_storage_path }}" - register: results - failed_when: - - results.rc == 1 - - "'already exists' not in results.stderr" - - - name: "restorecon the {{ docker_alt_storage_path }}" - command: "restorecon -r {{ docker_alt_storage_path }}" - - - name: Remove the old docker location - file: - state: absent - path: "{{ docker_default_storage_path }}" - - - name: Setup the link - file: - state: link - src: "{{ docker_alt_storage_path }}" - path: "{{ docker_default_storage_path }}" - - - name: start docker - systemd: - state: started - name: "{{ openshift.docker.service_name }}" diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml deleted file mode 100644 index 3439aa353..000000000 --- a/roles/docker/tasks/systemcontainer_crio.yml +++ /dev/null @@ -1,187 +0,0 @@ ---- - -# TODO: Much of this file is shared with container engine tasks -- set_fact: - l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l2_docker_insecure_registries)) }}" -- set_fact: - l_crio_registries: "{{ l2_docker_additional_registries + ['docker.io'] }}" -- set_fact: - l_additional_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l_crio_registries)) }}" - -- set_fact: - l_openshift_image_tag: "{{ openshift_image_tag | string }}" - when: openshift_image_tag is defined - -- set_fact: - l_openshift_image_tag: "latest" - when: - - openshift_image_tag is not defined - - openshift_release == "latest" - -- set_fact: - l_openshift_image_tag: "{{ openshift_release | string }}" - when: - - openshift_image_tag is not defined - - openshift_release != "latest" - -- name: Ensure container-selinux is installed - package: - name: container-selinux - state: present - when: not openshift.common.is_atomic | bool - register: result - until: result | success - -- name: Check we are not using node as a Docker container with CRI-O - fail: msg='Cannot use CRI-O with node configured as a Docker container' - when: - - openshift.common.is_containerized | bool - - not openshift.common.is_node_system_container | bool - -# Used to pull and install the system container -- name: Ensure atomic is installed - package: - name: atomic - state: present - when: not openshift.common.is_atomic | bool - register: result - until: result | success - -# At the time of writing the atomic command requires runc for it's own use. This -# task is here in the even that the atomic package ever removes the dependency. -- name: Ensure runc is installed - package: - name: runc - state: present - when: not openshift.common.is_atomic | bool - register: result - until: result | success - - -- name: Check that overlay is in the kernel - shell: lsmod | grep overlay - register: l_has_overlay_in_kernel - ignore_errors: yes - failed_when: false - -- when: l_has_overlay_in_kernel.rc != 0 - block: - - - name: Add overlay to modprobe.d - template: - dest: /etc/modules-load.d/overlay.conf - src: overlay.conf.j2 - backup: yes - - - name: Manually modprobe overlay into the kernel - command: modprobe overlay - - - name: Enable and start systemd-modules-load - service: - name: systemd-modules-load - enabled: yes - state: restarted - -- name: Ensure proxies are in the atomic.conf - include_role: - name: openshift_atomic - tasks_from: proxy - -- block: - - - name: Set CRI-O image defaults - set_fact: - l_crio_image_prepend: "docker.io/gscrivano" - l_crio_image_name: "cri-o-fedora" - l_crio_image_tag: "latest" - - - name: Use Centos based image when distribution is CentOS - set_fact: - l_crio_image_name: "cri-o-centos" - when: ansible_distribution == "CentOS" - - - name: Set CRI-O image tag - set_fact: - l_crio_image_tag: "{{ l_openshift_image_tag }}" - when: - - openshift_deployment_type == 'openshift-enterprise' - - - name: Use RHEL based image when distribution is Red Hat - set_fact: - l_crio_image_prepend: "registry.access.redhat.com/openshift3" - l_crio_image_name: "cri-o" - when: ansible_distribution == "RedHat" - - - name: Set the full image name - set_fact: - l_crio_image: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:{{ l_crio_image_tag }}" - - # For https://github.com/openshift/aos-cd-jobs/pull/624#pullrequestreview-61816548 - - name: Use a specific image if requested - set_fact: - l_crio_image: "{{ openshift_crio_systemcontainer_image_override }}" - when: - - openshift_crio_systemcontainer_image_override is defined - - openshift_crio_systemcontainer_image_override != "" - - # Be nice and let the user see the variable result - - debug: - var: l_crio_image - -# NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released -- name: Pre-pull CRI-O System Container image - command: "atomic pull --storage ostree {{ l_crio_image }}" - changed_when: false - environment: - NO_PROXY: "{{ openshift.common.no_proxy | default('') }}" - - -- name: Install CRI-O System Container - oc_atomic_container: - name: "cri-o" - image: "{{ l_crio_image }}" - state: latest - -- name: Remove CRI-O default configuration files - file: - path: "{{ item }}" - state: absent - with_items: - - /etc/cni/net.d/200-loopback.conf - - /etc/cni/net.d/100-crio-bridge.conf - -- name: Create the CRI-O configuration - template: - dest: /etc/crio/crio.conf - src: crio.conf.j2 - backup: yes - -- name: Ensure CNI configuration directory exists - file: - path: /etc/cni/net.d/ - state: directory - -- name: setup firewall for CRI-O - include_tasks: crio_firewall.yml - static: yes - -- name: Configure the CNI network - template: - dest: /etc/cni/net.d/openshift-sdn.conf - src: 80-openshift-sdn.conf.j2 - -- name: Start the CRI-O service - systemd: - name: "cri-o" - enabled: yes - state: started - daemon_reload: yes - register: start_result - -- meta: flush_handlers - -# If we are using crio only, docker.service might not be available for -# 'docker login' -- include_tasks: registry_auth.yml - vars: - openshift_docker_alternative_creds: "{{ l_use_crio_only }}" diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml deleted file mode 100644 index 881d83f50..000000000 --- a/roles/docker/tasks/systemcontainer_docker.yml +++ /dev/null @@ -1,190 +0,0 @@ ---- - -- set_fact: - l_openshift_image_tag: "{{ openshift_image_tag | string }}" - when: openshift_image_tag is defined - -- set_fact: - l_openshift_image_tag: "latest" - when: - - openshift_image_tag is not defined - - openshift_release == "latest" - -- set_fact: - l_openshift_image_tag: "{{ openshift_release | string }}" - when: - - openshift_image_tag is not defined - - openshift_release != "latest" - -# If docker_options are provided we should fail. We should not install docker and ignore -# the users configuration. NOTE: docker_options == inventory:openshift_docker_options -- name: Fail quickly if openshift_docker_options are set - assert: - that: - - docker_options is defined - - docker_options != "" - msg: | - Docker via System Container does not allow for the use of the openshift_docker_options - variable. If you want to use openshift_docker_options you will need to use the - traditional docker package install. Otherwise, comment out openshift_docker_options - in your inventory file. - -- name: Ensure container-selinux is installed - package: - name: container-selinux - state: present - when: not openshift.common.is_atomic | bool - register: result - until: result | success - -# Used to pull and install the system container -- name: Ensure atomic is installed - package: - name: atomic - state: present - when: not openshift.common.is_atomic | bool - register: result - until: result | success - -# At the time of writing the atomic command requires runc for it's own use. This -# task is here in the even that the atomic package ever removes the dependency. -- name: Ensure runc is installed - package: - name: runc - state: present - when: not openshift.common.is_atomic | bool - register: result - until: result | success - -# Make sure Docker is installed so we are able to use the client -- name: Install Docker so we can use the client - package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present - when: not openshift.common.is_atomic | bool - register: result - until: result | success - -# Make sure docker is disabled. Errors are ignored. -- name: Disable Docker - systemd: - name: docker - enabled: no - state: stopped - daemon_reload: yes - ignore_errors: True - register: r_docker_systemcontainer_docker_stop_result - until: not r_docker_systemcontainer_docker_stop_result | failed - retries: 3 - delay: 30 - -- name: Ensure proxies are in the atomic.conf - include_role: - name: openshift_atomic - tasks_from: proxy - -- block: - - - name: Set to default prepend - set_fact: - l_docker_image_prepend: "gscrivano" - l_docker_image_tag: "latest" - - - name: Set container engine image tag - set_fact: - l_docker_image_tag: "{{ l_openshift_image_tag }}" - when: - - openshift_deployment_type == 'openshift-enterprise' - - - name: Use Red Hat Registry for image when distribution is Red Hat - set_fact: - l_docker_image_prepend: "registry.access.redhat.com/openshift3" - when: ansible_distribution == 'RedHat' - - - name: Use Fedora Registry for image when distribution is Fedora - set_fact: - l_docker_image_prepend: "registry.fedoraproject.org/f25" - when: ansible_distribution == 'Fedora' - - - name: Set the full image name - set_fact: - l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift.docker.service_name }}:{{ l_docker_image_tag }}" - - # For https://github.com/openshift/openshift-ansible/pull/5354#issuecomment-328552959 - - name: Use a specific image if requested - set_fact: - l_docker_image: "{{ openshift_docker_systemcontainer_image_override }}" - when: - - openshift_docker_systemcontainer_image_override is defined - - openshift_docker_systemcontainer_image_override != "" - - # Be nice and let the user see the variable result - - debug: - var: l_docker_image - -# NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released -- name: Pre-pull Container Engine System Container image - command: "atomic pull --storage ostree {{ l_docker_image }}" - changed_when: false - environment: - NO_PROXY: "{{ openshift.common.no_proxy | default('') }}" - - -- name: Ensure container-engine.service.d directory exists - file: - path: "{{ container_engine_systemd_dir }}" - state: directory - -- name: Ensure /etc/docker directory exists - file: - path: "{{ docker_conf_dir }}" - state: directory - -- name: Install Container Engine System Container - oc_atomic_container: - name: "{{ openshift.docker.service_name }}" - image: "{{ l_docker_image }}" - state: latest - -- name: Configure Container Engine Service File - template: - dest: "{{ container_engine_systemd_dir }}/custom.conf" - src: systemcontainercustom.conf.j2 - -# Set local versions of facts that must be in json format for container-daemon.json -# NOTE: When jinja2.9+ is used the container-daemon.json file can move to using tojson -- set_fact: - l_docker_insecure_registries: "{{ l2_docker_insecure_registries | default([]) | to_json }}" - l_docker_log_options: "{{ docker_log_options | default({}) | to_json }}" - l_docker_additional_registries: "{{ l2_docker_additional_registries | default([]) | to_json }}" - l_docker_blocked_registries: "{{ l2_docker_blocked_registries | default([]) | to_json }}" - l_docker_selinux_enabled: "{{ docker_selinux_enabled | default(true) | to_json }}" - -# Configure container-engine using the container-daemon.json file -# NOTE: daemon.json and container-daemon.json have been seperated to avoid -# collision. -- name: Configure Container Engine - template: - dest: "{{ docker_conf_dir }}/container-daemon.json" - src: daemon.json - -# Enable and start the container-engine service -- name: Start the Container Engine service - systemd: - name: "{{ openshift.docker.service_name }}" - enabled: yes - state: started - daemon_reload: yes - register: r_docker_systemcontainer_docker_start_result - until: not r_docker_systemcontainer_docker_start_result | failed - retries: 3 - delay: 30 - -- set_fact: - docker_service_status_changed: "{{ r_docker_systemcontainer_docker_start_result | changed }}" - -- meta: flush_handlers - -# Since docker is running as a system container, docker login will fail to create -# credentials. Use alternate method if requiring authenticated registries. -- include_tasks: registry_auth.yml - vars: - openshift_docker_alternative_creds: True diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml index a069e4d87..3038ed9f6 100644 --- a/roles/etcd/defaults/main.yaml +++ b/roles/etcd/defaults/main.yaml @@ -97,3 +97,5 @@ r_etcd_os_firewall_allow: # set the backend quota to 4GB by default etcd_quota_backend_bytes: 4294967296 + +openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml index 82ac4fc84..ca8b6a707 100644 --- a/roles/etcd/tasks/system_container.yml +++ b/roles/etcd/tasks/system_container.yml @@ -1,9 +1,4 @@ --- -- name: Ensure proxies are in the atomic.conf - include_role: - name: openshift_atomic - tasks_from: proxy - - name: Pull etcd system container command: atomic pull --storage=ostree {{ etcd_image }} register: pull_result diff --git a/roles/etcd/templates/etcd.docker.service b/roles/etcd/templates/etcd.docker.service index 99ae37319..4c25a9955 100644 --- a/roles/etcd/templates/etcd.docker.service +++ b/roles/etcd/templates/etcd.docker.service @@ -1,8 +1,8 @@ [Unit] Description=The Etcd Server container -After={{ openshift.docker.service_name }}.service -Requires={{ openshift.docker.service_name }}.service -PartOf={{ openshift.docker.service_name }}.service +After={{ openshift_docker_service_name }}.service +Requires={{ openshift_docker_service_name }}.service +PartOf={{ openshift_docker_service_name }}.service [Service] EnvironmentFile={{ etcd_conf_file }} @@ -14,4 +14,4 @@ Restart=always RestartSec=5s [Install] -WantedBy={{ openshift.docker.service_name }}.service +WantedBy={{ openshift_docker_service_name }}.service diff --git a/roles/flannel/defaults/main.yaml b/roles/flannel/defaults/main.yaml index 988731ef2..488b6b0bc 100644 --- a/roles/flannel/defaults/main.yaml +++ b/roles/flannel/defaults/main.yaml @@ -5,3 +5,5 @@ etcd_hosts: "{{ etcd_urls }}" etcd_peer_ca_file: "{{ openshift.common.config_base }}/node/{{ 'ca' if (embedded_etcd | bool) else 'flannel.etcd-ca' }}.crt" etcd_peer_cert_file: "{{ openshift.common.config_base }}/node/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'flannel.etcd-client' }}.crt" etcd_peer_key_file: "{{ openshift.common.config_base }}/node/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'flannel.etcd-client' }}.key" + +openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml index 889069485..80e4d391d 100644 --- a/roles/flannel/handlers/main.yml +++ b/roles/flannel/handlers/main.yml @@ -6,7 +6,7 @@ - name: restart docker become: yes systemd: - name: "{{ openshift.docker.service_name }}" + name: "{{ openshift_docker_service_name }}" state: restarted register: l_docker_restart_docker_in_flannel_result until: not l_docker_restart_docker_in_flannel_result | failed diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py index 3cb1fa8d0..83ca83350 100644 --- a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py +++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py @@ -86,7 +86,7 @@ class CallbackModule(CallbackBase): }, 'installer_phase_logging': { 'title': 'Logging Install', - 'playbook': 'playbooks/byo/openshift-cluster/openshift-logging.yml' + 'playbook': 'playbooks/openshift-logging/config.yml' }, 'installer_phase_prometheus': { 'title': 'Prometheus Install', diff --git a/roles/kuryr/templates/configmap.yaml.j2 b/roles/kuryr/templates/configmap.yaml.j2 index 6bf6c1db2..96c215f00 100644 --- a/roles/kuryr/templates/configmap.yaml.j2 +++ b/roles/kuryr/templates/configmap.yaml.j2 @@ -229,6 +229,7 @@ data: # TODO (apuimedo): Remove the duplicated line just after this one once the # RDO packaging contains the upstream patch worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }} + external_svc_subnet = {{ kuryr_openstack_external_svc_subnet_id }} [pod_vif_nested] worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }} diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml index 410b739e9..cb83c8ead 100644 --- a/roles/nuage_master/handlers/main.yaml +++ b/roles/nuage_master/handlers/main.yaml @@ -3,8 +3,7 @@ systemd: name={{ openshift.common.service_type }}-master-api state=restarted when: > (openshift_master_ha | bool) and - (not master_api_service_status_changed | default(false)) and - openshift.master.cluster_method == 'native' + (not master_api_service_status_changed | default(false)) # TODO: need to fix up ignore_errors here # We retry the controllers because the API may not be 100% initialized yet. @@ -16,6 +15,5 @@ until: result.rc == 0 when: > (openshift_master_ha | bool) and - (not master_controllers_service_status_changed | default(false)) and - openshift.master.cluster_method == 'native' + (not master_controllers_service_status_changed | default(false)) ignore_errors: yes diff --git a/roles/openshift_atomic/README.md b/roles/openshift_atomic/README.md deleted file mode 100644 index 8c10c9991..000000000 --- a/roles/openshift_atomic/README.md +++ /dev/null @@ -1,28 +0,0 @@ -OpenShift Atomic -================ - -This role houses atomic specific tasks. - -Requirements ------------- - -Role Variables --------------- - -Dependencies ------------- - -Example Playbook ----------------- - -``` -- name: Ensure atomic proxies are defined - hosts: localhost - roles: - - role: openshift_atomic -``` - -License -------- - -Apache License Version 2.0 diff --git a/roles/openshift_atomic/meta/main.yml b/roles/openshift_atomic/meta/main.yml deleted file mode 100644 index ea129f514..000000000 --- a/roles/openshift_atomic/meta/main.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -galaxy_info: - author: OpenShift - description: Atomic related tasks - company: Red Hat, Inc - license: ASL 2.0 - min_ansible_version: 2.2 - platforms: - - name: EL - versions: - - 7 -dependencies: -- role: lib_openshift diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml index 7e8e9b679..2c1e88cfb 100644 --- a/roles/openshift_aws/tasks/build_node_group.yml +++ b/roles/openshift_aws/tasks/build_node_group.yml @@ -28,10 +28,10 @@ l_epoch_time: "{{ ansible_date_time.epoch }}" - when: openshift_aws_create_iam_role - include: iam_role.yml + include_tasks: iam_role.yml - when: openshift_aws_create_launch_config - include: launch_config.yml + include_tasks: launch_config.yml - when: openshift_aws_create_scale_group - include: scale_group.yml + include_tasks: scale_group.yml diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml index 0dbeba5a0..fed80b7eb 100644 --- a/roles/openshift_aws/tasks/launch_config.yml +++ b/roles/openshift_aws/tasks/launch_config.yml @@ -9,7 +9,7 @@ when: - openshift_deployment_type is undefined -- include: launch_config_create.yml +- include_tasks: launch_config_create.yml with_dict: "{{ l_nodes_to_build }}" loop_control: loop_var: launch_config_item diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml index 91538ed5c..06f649343 100644 --- a/roles/openshift_aws/tasks/provision.yml +++ b/roles/openshift_aws/tasks/provision.yml @@ -1,16 +1,16 @@ --- - when: openshift_aws_create_iam_cert | bool name: create the iam_cert for elb certificate - include: iam_cert.yml + include_tasks: iam_cert.yml - when: openshift_aws_create_s3 | bool name: create s3 bucket for registry - include: s3.yml + include_tasks: s3.yml -- include: vpc_and_subnet_id.yml +- include_tasks: vpc_and_subnet_id.yml - name: create elbs - include: elb.yml + include_tasks: elb.yml with_dict: "{{ openshift_aws_elb_dict }}" vars: l_elb_security_groups: "{{ openshift_aws_elb_security_groups }}" @@ -19,7 +19,7 @@ loop_var: l_elb_dict_item - name: include scale group creation for master - include: build_node_group.yml + include_tasks: build_node_group.yml vars: l_nodes_to_build: "{{ openshift_aws_master_group_config }}" l_launch_config_security_groups: "{{ openshift_aws_launch_config_security_groups }}" diff --git a/roles/openshift_aws/tasks/provision_instance.yml b/roles/openshift_aws/tasks/provision_instance.yml index 3349acb7a..8cc75cd0c 100644 --- a/roles/openshift_aws/tasks/provision_instance.yml +++ b/roles/openshift_aws/tasks/provision_instance.yml @@ -3,7 +3,7 @@ set_fact: openshift_node_bootstrap: True -- include: vpc_and_subnet_id.yml +- include_tasks: vpc_and_subnet_id.yml - name: create instance for ami creation ec2: diff --git a/roles/openshift_aws/tasks/provision_nodes.yml b/roles/openshift_aws/tasks/provision_nodes.yml index 3e84666a2..041ed0791 100644 --- a/roles/openshift_aws/tasks/provision_nodes.yml +++ b/roles/openshift_aws/tasks/provision_nodes.yml @@ -27,17 +27,17 @@ set_fact: openshift_aws_launch_config_bootstrap_token: "{{ bootstrap['content'] | b64decode }}" -- include: vpc_and_subnet_id.yml +- include_tasks: vpc_and_subnet_id.yml - name: include build compute and infra node groups - include: build_node_group.yml + include_tasks: build_node_group.yml vars: l_nodes_to_build: "{{ openshift_aws_node_group_config }}" l_launch_config_security_groups: "{{ openshift_aws_launch_config_security_groups }}" l_aws_ami_map: "{{ openshift_aws_ami_map }}" - name: include build node group for extra nodes - include: build_node_group.yml + include_tasks: build_node_group.yml when: openshift_aws_node_group_config_extra is defined vars: l_nodes_to_build: "{{ openshift_aws_node_group_config_extra | default({}) }}" @@ -47,4 +47,4 @@ # instances aren't scaling fast enough here, we need to wait for them - when: openshift_aws_wait_for_ssh | bool name: wait for our new nodes to come up - include: wait_for_groups.yml + include_tasks: wait_for_groups.yml diff --git a/roles/openshift_aws/tasks/seal_ami.yml b/roles/openshift_aws/tasks/seal_ami.yml index 0cb749dcc..7a3d0fb68 100644 --- a/roles/openshift_aws/tasks/seal_ami.yml +++ b/roles/openshift_aws/tasks/seal_ami.yml @@ -31,7 +31,7 @@ source-ami: "{{ amioutput.image_id }}" - name: copy the ami for encrypted disks - include: ami_copy.yml + include_tasks: ami_copy.yml vars: openshift_aws_ami_copy_name: "{{ openshift_aws_ami_name }}-encrypted" openshift_aws_ami_copy_src_ami: "{{ amioutput.image_id }}" diff --git a/roles/openshift_aws/tasks/security_group.yml b/roles/openshift_aws/tasks/security_group.yml index 5cc7ae537..43834079e 100644 --- a/roles/openshift_aws/tasks/security_group.yml +++ b/roles/openshift_aws/tasks/security_group.yml @@ -6,11 +6,11 @@ "tag:Name": "{{ openshift_aws_clusterid }}" register: vpcout -- include: security_group_create.yml +- include_tasks: security_group_create.yml vars: l_security_groups: "{{ openshift_aws_node_security_groups }}" -- include: security_group_create.yml +- include_tasks: security_group_create.yml when: openshift_aws_node_security_groups_extra is defined vars: l_security_groups: "{{ openshift_aws_node_security_groups_extra | default({}) }}" diff --git a/roles/openshift_aws/tasks/upgrade_node_group.yml b/roles/openshift_aws/tasks/upgrade_node_group.yml index d7851d887..c3f86f523 100644 --- a/roles/openshift_aws/tasks/upgrade_node_group.yml +++ b/roles/openshift_aws/tasks/upgrade_node_group.yml @@ -4,13 +4,13 @@ when: - openshift_aws_current_version == openshift_aws_new_version -- include: provision_nodes.yml +- include_tasks: provision_nodes.yml -- include: accept_nodes.yml +- include_tasks: accept_nodes.yml -- include: setup_scale_group_facts.yml +- include_tasks: setup_scale_group_facts.yml -- include: setup_master_group.yml +- include_tasks: setup_master_group.yml vars: # we do not set etcd here as its limited to 1 or 3 openshift_aws_masters_groups: masters,nodes diff --git a/roles/openshift_cli/defaults/main.yml b/roles/openshift_cli/defaults/main.yml index 82da0639e..631a0455e 100644 --- a/roles/openshift_cli/defaults/main.yml +++ b/roles/openshift_cli/defaults/main.yml @@ -4,3 +4,8 @@ system_images_registry_dict: origin: "docker.io" system_images_registry: "{{ system_images_registry_dict[openshift_deployment_type | default('origin')] }}" + +openshift_use_crio_only: False + +l_is_system_container_image: "{{ openshift_use_master_system_container | default(openshift_use_system_containers | default(False)) | bool }}" +l_use_cli_atomic_image: "{{ openshift_use_crio_only or l_is_system_container_image }}" diff --git a/roles/openshift_cli/meta/main.yml b/roles/openshift_cli/meta/main.yml index 29ed82783..5d2b6abed 100644 --- a/roles/openshift_cli/meta/main.yml +++ b/roles/openshift_cli/meta/main.yml @@ -12,6 +12,4 @@ galaxy_info: categories: - cloud dependencies: -- role: openshift_docker - when: not skip_docker_role | default(False) | bool - role: openshift_facts diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml index 7b046b2c4..140c6ea26 100644 --- a/roles/openshift_cli/tasks/main.yml +++ b/roles/openshift_cli/tasks/main.yml @@ -1,10 +1,4 @@ --- -- set_fact: - l_use_crio_only: "{{ openshift_use_crio_only | default(false) }}" - l_is_system_container_image: "{{ openshift_use_master_system_container | default(openshift_use_system_containers | default(false)) | bool }}" -- set_fact: - l_use_cli_atomic_image: "{{ l_use_crio_only or l_is_system_container_image }}" - - name: Install clients package: name={{ openshift.common.service_type }}-clients state=present when: not openshift.common.is_containerized | bool diff --git a/roles/openshift_cluster_autoscaler/tasks/main.yml b/roles/openshift_cluster_autoscaler/tasks/main.yml index 173dcf044..ca7dfb885 100644 --- a/roles/openshift_cluster_autoscaler/tasks/main.yml +++ b/roles/openshift_cluster_autoscaler/tasks/main.yml @@ -31,7 +31,7 @@ type: role name: "{{ openshift_cluster_autoscaler_name }}" -- include: aws.yml +- include_tasks: aws.yml when: openshift_cluster_autoscaler_cloud_provider == 'aws' - name: create the policies diff --git a/roles/openshift_docker/defaults/main.yml b/roles/openshift_docker/defaults/main.yml deleted file mode 100644 index ed97d539c..000000000 --- a/roles/openshift_docker/defaults/main.yml +++ /dev/null @@ -1 +0,0 @@ ---- diff --git a/roles/openshift_docker/meta/main.yml b/roles/openshift_docker/meta/main.yml deleted file mode 100644 index 60efd4e45..000000000 --- a/roles/openshift_docker/meta/main.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -galaxy_info: - author: Jason DeTiberus - description: OpenShift Docker - company: Red Hat, Inc. - license: Apache License, Version 2.0 - min_ansible_version: 1.9 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud -dependencies: -- role: openshift_docker_facts -- role: docker diff --git a/roles/openshift_docker/tasks/main.yml b/roles/openshift_docker/tasks/main.yml deleted file mode 100644 index ed97d539c..000000000 --- a/roles/openshift_docker/tasks/main.yml +++ /dev/null @@ -1 +0,0 @@ ---- diff --git a/roles/openshift_docker_facts/defaults/main.yml b/roles/openshift_docker_facts/defaults/main.yml deleted file mode 100644 index ed97d539c..000000000 --- a/roles/openshift_docker_facts/defaults/main.yml +++ /dev/null @@ -1 +0,0 @@ ---- diff --git a/roles/openshift_docker_facts/meta/main.yml b/roles/openshift_docker_facts/meta/main.yml deleted file mode 100644 index 5b1be7a8d..000000000 --- a/roles/openshift_docker_facts/meta/main.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -galaxy_info: - author: Jason DeTiberus - description: OpenShift Docker Facts - company: Red Hat, Inc. - license: Apache License, Version 2.0 - min_ansible_version: 1.9 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud -dependencies: -- { role: openshift_facts } diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml deleted file mode 100644 index 5a3e50678..000000000 --- a/roles/openshift_docker_facts/tasks/main.yml +++ /dev/null @@ -1,39 +0,0 @@ ---- -- name: Set docker facts - openshift_facts: - role: "{{ item.role }}" - local_facts: "{{ item.local_facts }}" - with_items: - - role: docker - local_facts: - selinux_enabled: "{{ openshift_docker_selinux_enabled | default(None) }}" - log_driver: "{{ openshift_docker_log_driver | default(None) }}" - log_options: "{{ openshift_docker_log_options | default(None) }}" - options: "{{ openshift_docker_options | default(None) }}" - disable_push_dockerhub: "{{ openshift_disable_push_dockerhub | default(None) }}" - hosted_registry_insecure: "{{ openshift_docker_hosted_registry_insecure | default(openshift.docker.hosted_registry_insecure | default(False)) }}" - hosted_registry_network: "{{ openshift_docker_hosted_registry_network | default(None) }}" - use_system_container: "{{ openshift_docker_use_system_container | default(False) }}" - use_crio: "{{ openshift_use_crio | default(False) }}" - - role: node - local_facts: - sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" - -- set_fact: - docker_selinux_enabled: "{{ openshift.docker.selinux_enabled | default(omit) }}" - docker_log_driver: "{{ openshift.docker.log_driver | default(omit) }}" - docker_log_options: "{{ openshift.docker.log_options | default(omit) }}" - docker_push_dockerhub: "{{ openshift.docker.disable_push_dockerhub - | default(omit) }}" - docker_http_proxy: "{{ openshift.common.http_proxy | default(omit) }}" - docker_https_proxy: "{{ openshift.common.https_proxy | default(omit) }}" - docker_no_proxy: "{{ openshift.common.no_proxy | default(omit) }}" - -- set_fact: - docker_options: "--insecure-registry={{ openshift.docker.hosted_registry_network }} {{ openshift.docker.options | default ('') }}" - when: openshift.docker.hosted_registry_insecure | default(False) | bool and openshift.docker.hosted_registry_network is defined - register: hosted_registry_options - -- set_fact: - docker_options: "{{ openshift.docker.options | default(omit) }}" - when: hosted_registry_options | skipped diff --git a/roles/openshift_docker_facts/vars/main.yml b/roles/openshift_docker_facts/vars/main.yml deleted file mode 100644 index 55c04b0c1..000000000 --- a/roles/openshift_docker_facts/vars/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}" diff --git a/roles/openshift_etcd/meta/main.yml b/roles/openshift_etcd/meta/main.yml index 7cc548f69..0e28fec03 100644 --- a/roles/openshift_etcd/meta/main.yml +++ b/roles/openshift_etcd/meta/main.yml @@ -13,6 +13,4 @@ galaxy_info: - cloud dependencies: - role: openshift_etcd_facts -- role: openshift_docker - when: openshift.common.is_containerized | bool - role: etcd diff --git a/roles/openshift_facts/defaults/main.yml b/roles/openshift_facts/defaults/main.yml index 7064d727a..a182d23c5 100644 --- a/roles/openshift_facts/defaults/main.yml +++ b/roles/openshift_facts/defaults/main.yml @@ -3,4 +3,98 @@ openshift_cli_image_dict: origin: 'openshift/origin' openshift-enterprise: 'openshift3/ose' +openshift_hosted_images_dict: + origin: 'openshift/origin-${component}:${version}' + openshift-enterprise: 'openshift3/ose-${component}:${version}' + openshift_cli_image: "{{ osm_image | default(openshift_cli_image_dict[openshift_deployment_type]) }}" + +# osm_default_subdomain is an old migrated fact, can probably be removed. +osm_default_subdomain: "router.default.svc.cluster.local" +openshift_master_default_subdomain: "{{ osm_default_subdomain }}" + +openshift_hosted_etcd_storage_nfs_directory: '/exports' +openshift_hosted_etcd_storage_nfs_options: '*(rw,root_squash)' +openshift_hosted_etcd_storage_volume_name: 'etcd' +openshift_hosted_etcd_storage_volume_size: '1Gi' +openshift_hosted_etcd_storage_create_pv: True +openshift_hosted_etcd_storage_create_pvc: False +openshift_hosted_etcd_storage_access_modes: + - 'ReadWriteOnce' + +openshift_hosted_registry_namespace: 'default' +openshift_hosted_registry_storage_volume_name: 'registry' +openshift_hosted_registry_storage_volume_size: '5Gi' +openshift_hosted_registry_storage_create_pv: True +openshift_hosted_registry_storage_create_pvc: True +openshift_hosted_registry_storage_nfs_directory: '/exports' +openshift_hosted_registry_storage_nfs_options: '*(rw,root_squash)' +openshift_hosted_registry_storage_glusterfs_endpoints: 'glusterfs-registry-endpoints' +openshift_hosted_registry_storage_glusterfs_path: glusterfs-registry-volume +openshift_hosted_registry_storage_glusterfs_readOnly: False +openshift_hosted_registry_storage_glusterfs_swap: False +openshift_hosted_registry_storage_glusterfs_swapcopy: True +openshift_hosted_registry_storage_glusterfs_ips: [] +openshift_hosted_registry_storage_access_modes: + - 'ReadWriteMany' + +openshift_logging_storage_nfs_directory: '/exports' +openshift_logging_storage_nfs_options: '*(rw,root_squash)' +openshift_logging_storage_volume_name: 'logging-es' +openshift_logging_storage_create_pv: True +openshift_logging_storage_create_pvc: False +openshift_logging_storage_access_modes: + - ['ReadWriteOnce'] + +openshift_loggingops_storage_volume_name: 'logging-es-ops' +openshift_loggingops_storage_volume_size: '10Gi' +openshift_loggingops_storage_create_pv: True +openshift_loggingops_storage_create_pvc: False +openshift_loggingops_storage_nfs_directory: '/exports' +openshift_loggingops_storage_nfs_options: '*(rw,root_squash)' +openshift_loggingops_storage_access_modes: + - 'ReadWriteOnce' + +openshift_metrics_deploy: False +openshift_metrics_duration: 7 +openshift_metrics_resolution: '10s' +openshift_metrics_storage_volume_name: 'metrics' +openshift_metrics_storage_volume_size: '10Gi' +openshift_metrics_storage_create_pv: True +openshift_metrics_storage_create_pvc: False +openshift_metrics_storage_nfs_directory: '/exports' +openshift_metrics_storage_nfs_options: '*(rw,root_squash)' +openshift_metrics_storage_access_modes: + - 'ReadWriteOnce' + +openshift_prometheus_storage_volume_name: 'prometheus' +openshift_prometheus_storage_volume_size: '10Gi' +openshift_prometheus_storage_nfs_directory: '/exports' +openshift_prometheus_storage_nfs_options: '*(rw,root_squash)' +openshift_prometheus_storage_access_modes: + - 'ReadWriteOnce' +openshift_prometheus_storage_create_pv: True +openshift_prometheus_storage_create_pvc: False + +openshift_prometheus_alertmanager_storage_volume_name: 'prometheus-alertmanager' +openshift_prometheus_alertmanager_storage_volume_size: '10Gi' +openshift_prometheus_alertmanager_storage_nfs_directory: '/exports' +openshift_prometheus_alertmanager_storage_nfs_options: '*(rw,root_squash)' +openshift_prometheus_alertmanager_storage_access_modes: + - 'ReadWriteOnce' +openshift_prometheus_alertmanager_storage_create_pv: True +openshift_prometheus_alertmanager_storage_create_pvc: False + +openshift_prometheus_alertbuffer_storage_volume_name: 'prometheus-alertbuffer' +openshift_prometheus_alertbuffer_storage_volume_size: '10Gi' +openshift_prometheus_alertbuffer_storage_nfs_directory: '/exports' +openshift_prometheus_alertbuffer_storage_nfs_options: '*(rw,root_squash)' +openshift_prometheus_alertbuffer_storage_access_modes: + - 'ReadWriteOnce' +openshift_prometheus_alertbuffer_storage_create_pv: True +openshift_prometheus_alertbuffer_storage_create_pvc: False + + +openshift_router_selector: "region=infra" +openshift_hosted_router_selector: "{{ openshift_router_selector }}" +openshift_hosted_registry_selector: "{{ openshift_router_selector }}" diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 9424f3cde..f57b59085 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -11,14 +11,13 @@ import copy import errno import json import re -import io import os import yaml import struct import socket from distutils.util import strtobool from distutils.version import LooseVersion -from ansible.module_utils.six import string_types, text_type +from ansible.module_utils.six import string_types from ansible.module_utils.six.moves import configparser # ignore pylint errors related to the module_utils import @@ -51,39 +50,6 @@ EXAMPLES = ''' ''' -def migrate_docker_facts(facts): - """ Apply migrations for docker facts """ - params = { - 'common': ( - 'options' - ), - 'node': ( - 'log_driver', - 'log_options' - ) - } - if 'docker' not in facts: - facts['docker'] = {} - # pylint: disable=consider-iterating-dictionary - for role in params.keys(): - if role in facts: - for param in params[role]: - old_param = 'docker_' + param - if old_param in facts[role]: - facts['docker'][param] = facts[role].pop(old_param) - - if 'node' in facts and 'portal_net' in facts['node']: - facts['docker']['hosted_registry_network'] = facts['node'].pop('portal_net') - - # log_options was originally meant to be a comma separated string, but - # we now prefer an actual list, with backward compatibility: - if 'log_options' in facts['docker'] and \ - isinstance(facts['docker']['log_options'], string_types): - facts['docker']['log_options'] = facts['docker']['log_options'].split(",") - - return facts - - # TODO: We should add a generic migration function that takes source and destination # paths and does the right thing rather than one function for common, one for node, etc. def migrate_common_facts(facts): @@ -119,24 +85,6 @@ def migrate_node_facts(facts): return facts -def migrate_hosted_facts(facts): - """ Apply migrations for master facts """ - if 'master' in facts: - if 'router_selector' in facts['master']: - if 'hosted' not in facts: - facts['hosted'] = {} - if 'router' not in facts['hosted']: - facts['hosted']['router'] = {} - facts['hosted']['router']['selector'] = facts['master'].pop('router_selector') - if 'registry_selector' in facts['master']: - if 'hosted' not in facts: - facts['hosted'] = {} - if 'registry' not in facts['hosted']: - facts['hosted']['registry'] = {} - facts['hosted']['registry']['selector'] = facts['master'].pop('registry_selector') - return facts - - def migrate_admission_plugin_facts(facts): """ Apply migrations for admission plugin facts """ if 'master' in facts: @@ -155,10 +103,8 @@ def migrate_admission_plugin_facts(facts): def migrate_local_facts(facts): """ Apply migrations of local facts """ migrated_facts = copy.deepcopy(facts) - migrated_facts = migrate_docker_facts(migrated_facts) migrated_facts = migrate_common_facts(migrated_facts) migrated_facts = migrate_node_facts(migrated_facts) - migrated_facts = migrate_hosted_facts(migrated_facts) migrated_facts = migrate_admission_plugin_facts(migrated_facts) return migrated_facts @@ -445,58 +391,6 @@ def normalize_provider_facts(provider, metadata): return facts -# pylint: disable=too-many-branches -def set_selectors(facts): - """ Set selectors facts if not already present in facts dict - Args: - facts (dict): existing facts - Returns: - dict: the facts dict updated with the generated selectors - facts if they were not already present - - """ - selector = "region=infra" - - if 'hosted' not in facts: - facts['hosted'] = {} - if 'router' not in facts['hosted']: - facts['hosted']['router'] = {} - if 'selector' not in facts['hosted']['router'] or facts['hosted']['router']['selector'] in [None, 'None']: - facts['hosted']['router']['selector'] = selector - if 'registry' not in facts['hosted']: - facts['hosted']['registry'] = {} - if 'selector' not in facts['hosted']['registry'] or facts['hosted']['registry']['selector'] in [None, 'None']: - facts['hosted']['registry']['selector'] = selector - if 'metrics' not in facts['hosted']: - facts['hosted']['metrics'] = {} - if 'selector' not in facts['hosted']['metrics'] or facts['hosted']['metrics']['selector'] in [None, 'None']: - facts['hosted']['metrics']['selector'] = None - if 'logging' not in facts or not isinstance(facts['logging'], dict): - facts['logging'] = {} - if 'selector' not in facts['logging'] or facts['logging']['selector'] in [None, 'None']: - facts['logging']['selector'] = None - if 'etcd' not in facts['hosted']: - facts['hosted']['etcd'] = {} - if 'selector' not in facts['hosted']['etcd'] or facts['hosted']['etcd']['selector'] in [None, 'None']: - facts['hosted']['etcd']['selector'] = None - if 'prometheus' not in facts: - facts['prometheus'] = {} - if 'selector' not in facts['prometheus'] or facts['prometheus']['selector'] in [None, 'None']: - facts['prometheus']['selector'] = None - if 'alertmanager' not in facts['prometheus']: - facts['prometheus']['alertmanager'] = {} - # pylint: disable=line-too-long - if 'selector' not in facts['prometheus']['alertmanager'] or facts['prometheus']['alertmanager']['selector'] in [None, 'None']: - facts['prometheus']['alertmanager']['selector'] = None - if 'alertbuffer' not in facts['prometheus']: - facts['prometheus']['alertbuffer'] = {} - # pylint: disable=line-too-long - if 'selector' not in facts['prometheus']['alertbuffer'] or facts['prometheus']['alertbuffer']['selector'] in [None, 'None']: - facts['prometheus']['alertbuffer']['selector'] = None - - return facts - - def set_identity_providers_if_unset(facts): """ Set identity_providers fact if not already present in facts dict @@ -641,60 +535,6 @@ def set_aggregate_facts(facts): return facts -def set_etcd_facts_if_unset(facts): - """ - If using embedded etcd, loads the data directory from master-config.yaml. - - If using standalone etcd, loads ETCD_DATA_DIR from etcd.conf. - - If anything goes wrong parsing these, the fact will not be set. - """ - if 'master' in facts and safe_get_bool(facts['master']['embedded_etcd']): - etcd_facts = facts['etcd'] if 'etcd' in facts else dict() - - if 'etcd_data_dir' not in etcd_facts: - try: - # Parse master config to find actual etcd data dir: - master_cfg_path = os.path.join(facts['common']['config_base'], - 'master/master-config.yaml') - master_cfg_f = open(master_cfg_path, 'r') - config = yaml.safe_load(master_cfg_f.read()) - master_cfg_f.close() - - etcd_facts['etcd_data_dir'] = \ - config['etcdConfig']['storageDirectory'] - - facts['etcd'] = etcd_facts - - # We don't want exceptions bubbling up here: - # pylint: disable=broad-except - except Exception: - pass - else: - etcd_facts = facts['etcd'] if 'etcd' in facts else dict() - - # Read ETCD_DATA_DIR from /etc/etcd/etcd.conf: - try: - # Add a fake section for parsing: - ini_str = text_type('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8') - ini_fp = io.StringIO(ini_str) - config = configparser.RawConfigParser() - config.readfp(ini_fp) - etcd_data_dir = config.get('root', 'ETCD_DATA_DIR') - if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'): - etcd_data_dir = etcd_data_dir[1:-1] - - etcd_facts['etcd_data_dir'] = etcd_data_dir - facts['etcd'] = etcd_facts - - # We don't want exceptions bubbling up here: - # pylint: disable=broad-except - except Exception: - pass - - return facts - - def set_deployment_facts_if_unset(facts): """ Set Facts that vary based on deployment_type. This currently includes common.service_type, master.registry_url, node.registry_url, @@ -1104,6 +944,7 @@ def get_version_output(binary, version_cmd): return output +# We may need this in the future. def get_docker_version_info(): """ Parses and returns the docker version info """ result = None @@ -1117,25 +958,6 @@ def get_docker_version_info(): return result -def get_hosted_registry_insecure(): - """ Parses OPTIONS from /etc/sysconfig/docker to determine if the - registry is currently insecure. - """ - hosted_registry_insecure = None - if os.path.exists('/etc/sysconfig/docker'): - try: - ini_str = text_type('[root]\n' + open('/etc/sysconfig/docker', 'r').read(), 'utf-8') - ini_fp = io.StringIO(ini_str) - config = configparser.RawConfigParser() - config.readfp(ini_fp) - options = config.get('root', 'OPTIONS') - if 'insecure-registry' in options: - hosted_registry_insecure = True - except Exception: # pylint: disable=broad-except - pass - return hosted_registry_insecure - - def get_openshift_version(facts): """ Get current version of openshift on the host. @@ -1565,13 +1387,6 @@ def set_container_facts_if_unset(facts): deployer_image = 'openshift/origin-deployer' facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted') - # If openshift_docker_use_system_container is set and is True .... - if 'use_system_container' in list(facts['docker'].keys()): - # use safe_get_bool as the inventory variable may not be a - # valid boolean on it's own. - if safe_get_bool(facts['docker']['use_system_container']): - # ... set the service name to container-engine - facts['docker']['service_name'] = 'container-engine' if 'is_containerized' not in facts['common']: facts['common']['is_containerized'] = facts['common']['is_atomic'] @@ -1664,15 +1479,9 @@ class OpenShiftFacts(object): 'buildoverrides', 'cloudprovider', 'common', - 'docker', 'etcd', - 'hosted', 'master', - 'node', - 'logging', - 'loggingops', - 'metrics', - 'prometheus'] + 'node'] # Disabling too-many-arguments, this should be cleaned up as a TODO item. # pylint: disable=too-many-arguments,no-value-for-parameter @@ -1745,7 +1554,6 @@ class OpenShiftFacts(object): facts = migrate_oauth_template_facts(facts) facts['current_config'] = get_current_config(facts) facts = set_url_facts_if_unset(facts) - facts = set_selectors(facts) facts = set_identity_providers_if_unset(facts) facts = set_deployment_facts_if_unset(facts) facts = set_sdn_facts_if_unset(facts, self.system_facts) @@ -1755,7 +1563,6 @@ class OpenShiftFacts(object): facts = build_api_server_args(facts) facts = set_version_facts_if_unset(facts) facts = set_aggregate_facts(facts) - facts = set_etcd_facts_if_unset(facts) facts = set_proxy_facts(facts) facts = set_builddefaults_facts(facts) facts = set_buildoverrides_facts(facts) @@ -1818,200 +1625,9 @@ class OpenShiftFacts(object): local_quota_per_fsgroup="", set_node_ip=False) - if 'docker' in roles: - docker = dict(disable_push_dockerhub=False, - options='--log-driver=journald') - # NOTE: This is a workaround for a dnf output racecondition that can occur in - # some situations. See https://bugzilla.redhat.com/show_bug.cgi?id=918184 - if self.system_facts['ansible_pkg_mgr'] == 'dnf': - rpm_rebuilddb() - - version_info = get_docker_version_info() - if version_info is not None: - docker['api_version'] = version_info['api_version'] - docker['version'] = version_info['version'] - docker['gte_1_10'] = LooseVersion(version_info['version']) >= LooseVersion('1.10') - hosted_registry_insecure = get_hosted_registry_insecure() - if hosted_registry_insecure is not None: - docker['hosted_registry_insecure'] = hosted_registry_insecure - docker['service_name'] = 'docker' - defaults['docker'] = docker - if 'cloudprovider' in roles: defaults['cloudprovider'] = dict(kind=None) - if 'hosted' in roles or self.role == 'hosted': - defaults['hosted'] = dict( - etcd=dict( - storage=dict( - kind=None, - volume=dict( - name='etcd', - size='1Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)' - ), - host=None, - access=dict( - modes=['ReadWriteOnce'] - ), - create_pv=True, - create_pvc=False - ) - ), - registry=dict( - storage=dict( - kind=None, - volume=dict( - name='registry', - size='5Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)'), - glusterfs=dict( - endpoints='glusterfs-registry-endpoints', - path='glusterfs-registry-volume', - ips=[], - readOnly=False, - swap=False, - swapcopy=True), - host=None, - access=dict( - modes=['ReadWriteMany'] - ), - create_pv=True, - create_pvc=True - ) - ), - router=dict() - ) - - defaults['logging'] = dict( - storage=dict( - kind=None, - volume=dict( - name='logging-es', - size='10Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)' - ), - host=None, - access=dict( - modes=['ReadWriteOnce'] - ), - create_pv=True, - create_pvc=False - ) - ) - - defaults['loggingops'] = dict( - storage=dict( - kind=None, - volume=dict( - name='logging-es-ops', - size='10Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)' - ), - host=None, - access=dict( - modes=['ReadWriteOnce'] - ), - create_pv=True, - create_pvc=False - ) - ) - - defaults['metrics'] = dict( - deploy=False, - duration=7, - resolution='10s', - storage=dict( - kind=None, - volume=dict( - name='metrics', - size='10Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)' - ), - host=None, - access=dict( - modes=['ReadWriteOnce'] - ), - create_pv=True, - create_pvc=False - ) - ) - - defaults['prometheus'] = dict( - storage=dict( - kind=None, - volume=dict( - name='prometheus', - size='10Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)' - ), - host=None, - access=dict( - modes=['ReadWriteOnce'] - ), - create_pv=True, - create_pvc=False - ) - ) - - defaults['prometheus']['alertmanager'] = dict( - storage=dict( - kind=None, - volume=dict( - name='prometheus-alertmanager', - size='10Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)' - ), - host=None, - access=dict( - modes=['ReadWriteOnce'] - ), - create_pv=True, - create_pvc=False - ) - ) - - defaults['prometheus']['alertbuffer'] = dict( - storage=dict( - kind=None, - volume=dict( - name='prometheus-alertbuffer', - size='10Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)' - ), - host=None, - access=dict( - modes=['ReadWriteOnce'] - ), - create_pv=True, - create_pvc=False - ) - ) - return defaults def guess_host_provider(self): @@ -2189,12 +1805,6 @@ class OpenShiftFacts(object): facts_to_set, additive_facts_to_overwrite) - if 'docker' in new_local_facts: - # Convert legacy log_options comma sep string to a list if present: - if 'log_options' in new_local_facts['docker'] and \ - isinstance(new_local_facts['docker']['log_options'], string_types): - new_local_facts['docker']['log_options'] = new_local_facts['docker']['log_options'].split(',') - new_local_facts = self.remove_empty_facts(new_local_facts) if new_local_facts != local_facts: diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml index e70c0c420..b6501d288 100644 --- a/roles/openshift_hosted/defaults/main.yml +++ b/roles/openshift_hosted/defaults/main.yml @@ -27,6 +27,9 @@ openshift_cluster_domain: 'cluster.local' r_openshift_hosted_router_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" r_openshift_hosted_router_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" +openshift_hosted_router_selector: "{{ openshift_router_selector | default(openshift_hosted_infra_selector) }}" +openshift_hosted_router_namespace: 'default' + openshift_hosted_router_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}" openshift_hosted_router_edits: @@ -40,13 +43,14 @@ openshift_hosted_router_edits: value: 21600 action: put +openshift_hosted_router_registryurl: "{{ openshift_hosted_images_dict[openshift_deployment_type] }}" openshift_hosted_routers: - name: router replicas: "{{ replicas | default(1) }}" namespace: default serviceaccount: router selector: "{{ openshift_hosted_router_selector | default(None) }}" - images: "{{ openshift_hosted_router_image | default(None) }}" + images: "{{ openshift_hosted_router_registryurl }}" edits: "{{ openshift_hosted_router_edits }}" stats_port: 1936 ports: @@ -64,6 +68,11 @@ r_openshift_hosted_router_os_firewall_allow: [] # Registry # ############ +openshift_hosted_registry_selector: "{{ openshift_registry_selector | default(openshift_hosted_infra_selector) }}" +penshift_hosted_registry_registryurl: "{{ openshift_hosted_images_dict[openshift_deployment_type] }}" +openshift_hosted_registry_routecertificates: {} +openshift_hosted_registry_routetermination: "passthrough" + r_openshift_hosted_registry_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" r_openshift_hosted_registry_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" diff --git a/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py b/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py index 7f41529ac..003ce5f9e 100644 --- a/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py +++ b/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py @@ -12,7 +12,7 @@ class FilterModule(object): def get_router_replicas(replicas=None, router_nodes=None): ''' This function will return the number of replicas based on the results from the defined - openshift.hosted.router.replicas OR + openshift_hosted_router_replicas OR the query from oc_obj on openshift nodes with a selector OR default to 1 diff --git a/roles/openshift_hosted/tasks/registry.yml b/roles/openshift_hosted/tasks/registry.yml index 9f2ef4e40..de302c740 100644 --- a/roles/openshift_hosted/tasks/registry.yml +++ b/roles/openshift_hosted/tasks/registry.yml @@ -6,20 +6,20 @@ check_mode: no - name: setup firewall - include: firewall.yml + import_tasks: firewall.yml vars: l_openshift_hosted_firewall_enabled: "{{ r_openshift_hosted_registry_firewall_enabled }}" l_openshift_hosted_use_firewalld: "{{ r_openshift_hosted_registry_use_firewalld }}" l_openshift_hosted_fw_allow: "{{ r_openshift_hosted_registry_os_firewall_allow }}" l_openshift_hosted_fw_deny: "{{ r_openshift_hosted_registry_os_firewall_deny }}" -- when: openshift.hosted.registry.replicas | default(none) is none +- when: openshift_hosted_registry_replicas | default(none) is none block: - name: Retrieve list of openshift nodes matching registry selector oc_obj: state: list kind: node - selector: "{{ openshift.hosted.registry.selector | default(omit) }}" + selector: "{{ openshift_hosted_registry_selector }}" register: registry_nodes - name: set_fact l_node_count to number of nodes matching registry selector @@ -39,16 +39,13 @@ # just 1: - name: set_fact l_default_replicas when l_node_count > 0 set_fact: - l_default_replicas: "{{ l_node_count if openshift.hosted.registry.storage.kind | default(none) is not none else 1 }}" + l_default_replicas: "{{ l_node_count if openshift_hosted_registry_storage_kind | default(none) is not none else 1 }}" when: l_node_count | int > 0 - name: set openshift_hosted facts set_fact: - openshift_hosted_registry_replicas: "{{ openshift.hosted.registry.replicas | default(l_default_replicas) }}" - openshift_hosted_registry_namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" - openshift_hosted_registry_selector: "{{ openshift.hosted.registry.selector }}" - openshift_hosted_registry_images: "{{ openshift.hosted.registry.registryurl | default('openshift3/ose-${component}:${version}')}}" - openshift_hosted_registry_storage_glusterfs_ips: "{%- set gluster_ips = [] %}{% if groups.glusterfs_registry is defined %}{% for node in groups.glusterfs_registry %}{%- set _ = gluster_ips.append(hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip)) %}{% endfor %}{{ gluster_ips }}{% elif groups.glusterfs is defined %}{% for node in groups.glusterfs %}{%- set _ = gluster_ips.append(hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip)) %}{% endfor %}{{ gluster_ips }}{% else %}{{ openshift.hosted.registry.storage.glusterfs.ips }}{% endif %}" + # This determines the gluster_ips to use for the registry by looping over the glusterfs_registry group + openshift_hosted_registry_storage_glusterfs_ips: "{%- set gluster_ips = [] %}{% if groups.glusterfs_registry is defined %}{% for node in groups.glusterfs_registry %}{%- set _ = gluster_ips.append(hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip)) %}{% endfor %}{{ gluster_ips }}{% elif groups.glusterfs is defined %}{% for node in groups.glusterfs %}{%- set _ = gluster_ips.append(hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip)) %}{% endfor %}{{ gluster_ips }}{% else %}{{ openshift_hosted_registry_storage_glusterfs_ips }}{% endif %}" - name: Update registry environment variables when pushing via dns set_fact: @@ -97,16 +94,14 @@ service_type: ClusterIP clusterip: '{{ openshift_hosted_registry_clusterip | default(omit) }}' -- include: secure.yml - static: no +- include_tasks: secure.yml run_once: true when: - - not (openshift.docker.hosted_registry_insecure | default(false) | bool) + - not (openshift_docker_hosted_registry_insecure | default(False)) | bool -- include: storage/object_storage.yml - static: no +- include_tasks: storage/object_storage.yml when: - - openshift.hosted.registry.storage.kind | default(none) == 'object' + - openshift_hosted_registry_storage_kind | default(none) == 'object' - name: Update openshift_hosted facts for persistent volumes set_fact: @@ -115,23 +110,23 @@ pvc_volume_mounts: - name: registry-storage type: persistentVolumeClaim - claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-claim" + claim_name: "{{ openshift_hosted_registry_storage_volume_name }}-claim" when: - - openshift.hosted.registry.storage.kind | default(none) in ['nfs', 'openstack', 'glusterfs'] + - openshift_hosted_registry_storage_kind | default(none) in ['nfs', 'openstack', 'glusterfs'] -- include: storage/glusterfs_endpoints.yml +- include_tasks: storage/glusterfs_endpoints.yml when: - openshift_hosted_registry_storage_glusterfs_ips|length > 0 - - openshift.hosted.registry.storage.kind | default(none) in ['glusterfs'] + - openshift_hosted_registry_storage_kind | default(none) in ['glusterfs'] - name: Create OpenShift registry oc_adm_registry: name: "{{ openshift_hosted_registry_name }}" namespace: "{{ openshift_hosted_registry_namespace }}" selector: "{{ openshift_hosted_registry_selector }}" - replicas: "{{ openshift_hosted_registry_replicas }}" + replicas: "{{ openshift_hosted_registry_replicas | default(l_default_replicas) }}" service_account: "{{ openshift_hosted_registry_serviceaccount }}" - images: "{{ openshift_hosted_registry_images }}" + images: "{{ penshift_hosted_registry_registryurl }}" env_vars: "{{ openshift_hosted_registry_env_vars }}" volume_mounts: "{{ openshift_hosted_registry_volumes }}" edits: "{{ openshift_hosted_registry_edits }}" @@ -144,14 +139,14 @@ namespace: "{{ openshift_hosted_registry_namespace }}" - name: Wait for pod (Registry) - include: wait_for_pod.yml + include_tasks: wait_for_pod.yml vars: l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_registry_wait }}" l_openshift_hosted_wfp_items: "{{ r_openshift_hosted_registry_list }}" -- include: storage/glusterfs.yml +- include_tasks: storage/glusterfs.yml when: - - openshift.hosted.registry.storage.kind | default(none) == 'glusterfs' or openshift.hosted.registry.storage.glusterfs.swap + - openshift_hosted_registry_storage_kind | default(none) == 'glusterfs' or openshift_hosted_registry_storage_glusterfs_swap - name: Delete temp directory file: diff --git a/roles/openshift_hosted/tasks/router.yml b/roles/openshift_hosted/tasks/router.yml index dd7053656..4e9219477 100644 --- a/roles/openshift_hosted/tasks/router.yml +++ b/roles/openshift_hosted/tasks/router.yml @@ -1,6 +1,6 @@ --- - name: setup firewall - include: firewall.yml + import_tasks: firewall.yml vars: l_openshift_hosted_firewall_enabled: "{{ r_openshift_hosted_router_firewall_enabled }}" l_openshift_hosted_use_firewalld: "{{ r_openshift_hosted_router_use_firewalld }}" @@ -11,16 +11,14 @@ oc_obj: state: list kind: node - namespace: "{{ openshift.hosted.router.namespace | default('default') }}" - selector: "{{ openshift.hosted.router.selector | default(omit) }}" + namespace: "{{ openshift_hosted_router_namespace }}" + selector: "{{ openshift_hosted_router_selector }}" register: router_nodes - when: openshift.hosted.router.replicas | default(none) is none + when: openshift_hosted_router_replicas | default(none) is none - name: set_fact replicas set_fact: - replicas: "{{ openshift.hosted.router.replicas|default(None) | get_router_replicas(router_nodes) }}" - openshift_hosted_router_selector: "{{ openshift.hosted.router.selector | default(None) }}" - openshift_hosted_router_image: "{{ openshift.hosted.router.registryurl }}" + replicas: "{{ openshift_hosted_router_replicas | default(None) | get_router_replicas(router_nodes) }}" - name: Get the certificate contents for router copy: @@ -42,8 +40,8 @@ signer_key: "{{ openshift_master_config_dir }}/ca.key" signer_serial: "{{ openshift_master_config_dir }}/ca.serial.txt" hostnames: - - "{{ openshift_master_default_subdomain | default('router.default.svc.cluster.local') }}" - - "*.{{ openshift_master_default_subdomain | default('router.default.svc.cluster.local') }}" + - "{{ openshift_master_default_subdomain }}" + - "*.{{ openshift_master_default_subdomain }}" cert: "{{ openshift_master_config_dir ~ '/openshift-router.crt' }}" key: "{{ openshift_master_config_dir ~ '/openshift-router.key' }}" with_items: "{{ openshift_hosted_routers }}" @@ -102,7 +100,7 @@ with_items: "{{ openshift_hosted_routers }}" - name: Wait for pod (Routers) - include: wait_for_pod.yml + include_tasks: wait_for_pod.yml vars: l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_router_wait }}" l_openshift_hosted_wfp_items: "{{ openshift_hosted_routers }}" diff --git a/roles/openshift_hosted/tasks/secure.yml b/roles/openshift_hosted/tasks/secure.yml index 174bc39a4..378ae32dc 100644 --- a/roles/openshift_hosted/tasks/secure.yml +++ b/roles/openshift_hosted/tasks/secure.yml @@ -1,18 +1,10 @@ --- -- name: Configure facts for docker-registry - set_fact: - openshift_hosted_registry_routecertificates: "{{ ('routecertificates' in openshift.hosted.registry.keys()) | ternary(openshift_hosted_registry_routecertificates, {}) }}" - openshift_hosted_registry_routehost: "{{ ('routehost' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routehost, False) }}" - openshift_hosted_registry_routetermination: "{{ ('routetermination' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routetermination, 'passthrough') }}" - - name: Include reencrypt route configuration - include: secure/reencrypt.yml - static: no + include_tasks: secure/reencrypt.yml when: openshift_hosted_registry_routetermination == 'reencrypt' - name: Include passthrough route configuration - include: secure/passthrough.yml - static: no + include_tasks: secure/passthrough.yml when: openshift_hosted_registry_routetermination == 'passthrough' - name: Fetch the docker-registry route @@ -39,7 +31,7 @@ - "{{ docker_registry_route.results[0].spec.host }}" - "{{ openshift_hosted_registry_name }}.default.svc" - "{{ openshift_hosted_registry_name }}.default.svc.{{ openshift_cluster_domain }}" - - "{{ openshift_hosted_registry_routehost }}" + - "{{ openshift_hosted_registry_routehost | default(omit) }}" cert: "{{ docker_registry_cert_path }}" key: "{{ docker_registry_key_path }}" expire_days: "{{ openshift_hosted_registry_cert_expire_days }}" diff --git a/roles/openshift_hosted/tasks/storage/glusterfs.yml b/roles/openshift_hosted/tasks/storage/glusterfs.yml index 7cae67baa..18b2edcc6 100644 --- a/roles/openshift_hosted/tasks/storage/glusterfs.yml +++ b/roles/openshift_hosted/tasks/storage/glusterfs.yml @@ -17,7 +17,7 @@ until: - "registry_pods.results.results[0]['items'] | count > 0" # There must be as many matching pods with 'Ready' status True as there are expected replicas - - "registry_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == openshift_hosted_registry_replicas | int" + - "registry_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == openshift_hosted_registry_replicas | default(l_default_replicas) | int" delay: 10 retries: "{{ (600 / 10) | int }}" @@ -35,7 +35,7 @@ mount: state: mounted fstype: glusterfs - src: "{% if 'glusterfs_registry' in groups %}{% set node = groups.glusterfs_registry[0] %}{% elif 'glusterfs' in groups %}{% set node = groups.glusterfs[0] %}{% endif %}{% if openshift_hosted_registry_storage_glusterfs_ips is defined and openshift_hosted_registry_storage_glusterfs_ips|length > 0 %}{{ openshift_hosted_registry_storage_glusterfs_ips[0] }}{% elif 'glusterfs_hostname' in hostvars[node] %}{{ hostvars[node].glusterfs_hostname }}{% elif 'openshift' in hostvars[node] %}{{ hostvars[node].openshift.node.nodename }}{% else %}{{ node }}{% endif %}:/{{ openshift.hosted.registry.storage.glusterfs.path }}" + src: "{% if 'glusterfs_registry' in groups %}{% set node = groups.glusterfs_registry[0] %}{% elif 'glusterfs' in groups %}{% set node = groups.glusterfs[0] %}{% endif %}{% if openshift_hosted_registry_storage_glusterfs_ips is defined and openshift_hosted_registry_storage_glusterfs_ips|length > 0 %}{{ openshift_hosted_registry_storage_glusterfs_ips[0] }}{% elif 'glusterfs_hostname' in hostvars[node] %}{{ hostvars[node].glusterfs_hostname }}{% elif 'openshift' in hostvars[node] %}{{ hostvars[node].openshift.node.nodename }}{% else %}{{ node }}{% endif %}:/{{ openshift_hosted_registry_storage_glusterfs_path }}" name: "{{ mktemp.stdout }}" - name: Set registry volume permissions @@ -60,7 +60,7 @@ - name: Copy current registry contents to new GlusterFS volume command: "oc rsync {{ registry_pod_name }}:/registry/ {{ mktemp.stdout }}/" - when: openshift.hosted.registry.storage.glusterfs.swapcopy + when: openshift_hosted_registry_storage_glusterfs_swapcopy - name: Swap new GlusterFS registry volume oc_volume: @@ -68,7 +68,7 @@ name: "{{ openshift_hosted_registry_name }}" vol_name: registry-storage mount_type: pvc - claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim" + claim_name: "{{ openshift_hosted_registry_storage_volume_name }}-glusterfs-claim" - name: Deactivate registry maintenance mode oc_env: @@ -77,7 +77,7 @@ state: absent env_vars: - REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED: 'true' - when: openshift.hosted.registry.storage.glusterfs.swap + when: openshift_hosted_registry_storage_glusterfs_swap - name: Unmount registry volume and clean up mount point/fstab mount: diff --git a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml index 0f4381748..bd7181c17 100644 --- a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml +++ b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml @@ -10,7 +10,7 @@ dest: "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml" - name: Create GlusterFS registry service and endpoint - command: "{{ openshift.common.client_binary }} apply -f {{ item }} -n {{ openshift.hosted.registry.namespace | default('default') }}" + command: "{{ openshift.common.client_binary }} apply -f {{ item }} -n {{ openshift_hosted_registry_namespace | default('default') }}" with_items: - "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml" - "{{ mktempHosted.stdout }}/glusterfs-registry-endpoints.yml" diff --git a/roles/openshift_hosted/tasks/storage/object_storage.yml b/roles/openshift_hosted/tasks/storage/object_storage.yml index 8553a8098..a8c26fb51 100644 --- a/roles/openshift_hosted/tasks/storage/object_storage.yml +++ b/roles/openshift_hosted/tasks/storage/object_storage.yml @@ -1,6 +1,6 @@ --- -- include: s3.yml - when: openshift.hosted.registry.storage.provider == 's3' +- include_tasks: s3.yml + when: openshift_hosted_registry_storage_provider == 's3' - name: Ensure the registry secret exists oc_secret: diff --git a/roles/openshift_hosted/tasks/storage/s3.yml b/roles/openshift_hosted/tasks/storage/s3.yml index 8e905d905..4c100ee4e 100644 --- a/roles/openshift_hosted/tasks/storage/s3.yml +++ b/roles/openshift_hosted/tasks/storage/s3.yml @@ -2,8 +2,8 @@ - name: Assert that S3 variables are provided for registry_config template assert: that: - - openshift.hosted.registry.storage.s3.bucket | default(none) is not none - - openshift.hosted.registry.storage.s3.bucket | default(none) is not none + - openshift_hosted_registry_storage_s3_bucket | default(none) is not none + - openshift_hosted_registry_storage_s3_region | default(none) is not none msg: | When using S3 storage, the following variables are required: openshift_hosted_registry_storage_s3_bucket diff --git a/roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j2 index 607d25533..3c874d910 100644 --- a/roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j2 +++ b/roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Endpoints metadata: - name: {{ openshift.hosted.registry.storage.glusterfs.endpoints }} + name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }} subsets: - addresses: {% for ip in openshift_hosted_registry_storage_glusterfs_ips %} diff --git a/roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j2 b/roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j2 index 452c7c3e1..f18c94a4f 100644 --- a/roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j2 +++ b/roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Service metadata: - name: {{ openshift.hosted.registry.storage.glusterfs.endpoints }} + name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }} spec: ports: - port: 1 diff --git a/roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j2 index 607d25533..3c874d910 100644 --- a/roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j2 +++ b/roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Endpoints metadata: - name: {{ openshift.hosted.registry.storage.glusterfs.endpoints }} + name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }} subsets: - addresses: {% for ip in openshift_hosted_registry_storage_glusterfs_ips %} diff --git a/roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j2 b/roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j2 index 452c7c3e1..f18c94a4f 100644 --- a/roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j2 +++ b/roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Service metadata: - name: {{ openshift.hosted.registry.storage.glusterfs.endpoints }} + name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }} spec: ports: - port: 1 diff --git a/roles/openshift_hosted_facts/tasks/main.yml b/roles/openshift_hosted_facts/tasks/main.yml index 8fc70cecb..ed97d539c 100644 --- a/roles/openshift_hosted_facts/tasks/main.yml +++ b/roles/openshift_hosted_facts/tasks/main.yml @@ -1,19 +1 @@ --- -# openshift_*_selector variables have been deprecated in favor of -# openshift_hosted_*_selector variables. -- set_fact: - openshift_hosted_router_selector: "{{ openshift_router_selector | default(openshift_hosted_infra_selector) }}" - when: openshift_hosted_router_selector is not defined and openshift_hosted_infra_selector is defined -- set_fact: - openshift_hosted_registry_selector: "{{ openshift_registry_selector | default(openshift_hosted_infra_selector) }}" - when: openshift_hosted_registry_selector is not defined and openshift_hosted_infra_selector is defined - -- name: Set hosted facts - openshift_facts: - role: "{{ item }}" - openshift_env: "{{ hostvars - | oo_merge_hostvars(vars, inventory_hostname) - | oo_openshift_env }}" - openshift_env_structures: - - 'openshift.hosted.router.*' - with_items: [hosted, logging, loggingops, metrics, prometheus] diff --git a/roles/openshift_hosted_metrics/README.md b/roles/openshift_hosted_metrics/README.md deleted file mode 100644 index c2af3c494..000000000 --- a/roles/openshift_hosted_metrics/README.md +++ /dev/null @@ -1,54 +0,0 @@ -OpenShift Metrics with Hawkular -==================== - -OpenShift Metrics Installation - -Requirements ------------- - -* Ansible 2.2 -* It requires subdomain fqdn to be set. -* If persistence is enabled, then it also requires NFS. - -Role Variables --------------- - -From this role: - -| Name | Default value | | -|-------------------------------------------------|-----------------------|-------------------------------------------------------------| -| openshift_hosted_metrics_deploy | `False` | If metrics should be deployed | -| openshift_hosted_metrics_public_url | null | Hawkular metrics public url | -| openshift_hosted_metrics_storage_nfs_directory | `/exports` | Root export directory. | -| openshift_hosted_metrics_storage_volume_name | `metrics` | Metrics volume within openshift_hosted_metrics_volume_dir | -| openshift_hosted_metrics_storage_volume_size | `10Gi` | Metrics volume size | -| openshift_hosted_metrics_storage_nfs_options | `*(rw,root_squash)` | NFS options for configured exports. | -| openshift_hosted_metrics_duration | `7` | Metrics query duration | -| openshift_hosted_metrics_resolution | `10s` | Metrics resolution | - - -Dependencies ------------- -openshift_facts -openshift_examples -openshift_master_facts - -Example Playbook ----------------- - -``` -- name: Configure openshift-metrics - hosts: oo_first_master - roles: - - role: openshift_hosted_metrics -``` - -License -------- - -Apache License, Version 2.0 - -Author Information ------------------- - -Jose David MartÃn (j.david.nieto@gmail.com) diff --git a/roles/openshift_hosted_metrics/defaults/main.yml b/roles/openshift_hosted_metrics/defaults/main.yml deleted file mode 100644 index a01f24df8..000000000 --- a/roles/openshift_hosted_metrics/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -hosted_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/hosted" diff --git a/roles/openshift_hosted_metrics/handlers/main.yml b/roles/openshift_hosted_metrics/handlers/main.yml deleted file mode 100644 index 074b72942..000000000 --- a/roles/openshift_hosted_metrics/handlers/main.yml +++ /dev/null @@ -1,31 +0,0 @@ ---- -- name: restart master api - systemd: name={{ openshift.common.service_type }}-master-api state=restarted - when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' - notify: Verify API Server - -# We retry the controllers because the API may not be 100% initialized yet. -- name: restart master controllers - command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" - retries: 3 - delay: 5 - register: result - until: result.rc == 0 - when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' - -- name: Verify API Server - # Using curl here since the uri module requires python-httplib2 and - # wait_for port doesn't provide health information. - command: > - curl --silent --tlsv1.2 - --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt - {{ openshift.master.api_url }}/healthz/ready - args: - # Disables the following warning: - # Consider using get_url or uri module rather than running curl - warn: no - register: api_available_output - until: api_available_output.stdout == 'ok' - retries: 120 - delay: 1 - changed_when: false diff --git a/roles/openshift_hosted_metrics/meta/main.yaml b/roles/openshift_hosted_metrics/meta/main.yaml deleted file mode 100644 index debca3ca6..000000000 --- a/roles/openshift_hosted_metrics/meta/main.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- -galaxy_info: - author: David MartÃn - description: - company: - license: Apache License, Version 2.0 - min_ansible_version: 2.2 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud - - system -dependencies: -- { role: openshift_examples } -- { role: openshift_facts } -- { role: openshift_master_facts } diff --git a/roles/openshift_hosted_metrics/tasks/install.yml b/roles/openshift_hosted_metrics/tasks/install.yml deleted file mode 100644 index 15dd1bd54..000000000 --- a/roles/openshift_hosted_metrics/tasks/install.yml +++ /dev/null @@ -1,132 +0,0 @@ ---- - -- name: Test if metrics-deployer service account exists - command: > - {{ openshift.common.client_binary }} - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace=openshift-infra - get serviceaccount metrics-deployer -o json - register: serviceaccount - changed_when: false - failed_when: false - -- name: Create metrics-deployer Service Account - shell: > - echo {{ metrics_deployer_sa | to_json | quote }} | - {{ openshift.common.client_binary }} - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace openshift-infra - create -f - - when: serviceaccount.rc == 1 - -- name: Test edit permissions - command: > - {{ openshift.common.client_binary }} - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace openshift-infra - get rolebindings -o jsonpath='{.items[?(@.metadata.name == "edit")].userNames}' - register: edit_rolebindings - changed_when: false - -- name: Add edit permission to the openshift-infra project to metrics-deployer SA - command: > - {{ openshift.common.client_binary }} adm - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace openshift-infra - policy add-role-to-user edit - system:serviceaccount:openshift-infra:metrics-deployer - when: "'system:serviceaccount:openshift-infra:metrics-deployer' not in edit_rolebindings.stdout" - -- name: Test hawkular view permissions - command: > - {{ openshift.common.client_binary }} - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace openshift-infra - get rolebindings -o jsonpath='{.items[?(@.metadata.name == "view")].userNames}' - register: view_rolebindings - changed_when: false - -- name: Add view permissions to hawkular SA - command: > - {{ openshift.common.client_binary }} adm - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace openshift-infra - policy add-role-to-user view - system:serviceaccount:openshift-infra:hawkular - when: "'system:serviceaccount:openshift-infra:hawkular' not in view_rolebindings" - -- name: Test cluster-reader permissions - command: > - {{ openshift.common.client_binary }} - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace openshift-infra - get clusterrolebindings -o jsonpath='{.items[?(@.metadata.name == "cluster-reader")].userNames}' - register: cluster_reader_clusterrolebindings - changed_when: false - -- name: Add cluster-reader permission to the openshift-infra project to heapster SA - command: > - {{ openshift.common.client_binary }} adm - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace openshift-infra - policy add-cluster-role-to-user cluster-reader - system:serviceaccount:openshift-infra:heapster - when: "'system:serviceaccount:openshift-infra:heapster' not in cluster_reader_clusterrolebindings.stdout" - -- name: Create metrics-deployer secret - command: > - {{ openshift.common.client_binary }} - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace openshift-infra - secrets new metrics-deployer nothing=/dev/null - register: metrics_deployer_secret - changed_when: metrics_deployer_secret.rc == 0 - failed_when: metrics_deployer_secret.rc == 1 and 'already exists' not in metrics_deployer_secret.stderr - -# TODO: extend this to allow user passed in certs or generating cert with -# OpenShift CA -- name: Build metrics deployer command - set_fact: - deployer_cmd: "{{ openshift.common.client_binary }} process -f \ - {{ hosted_base }}/metrics-deployer.yaml -v \ - HAWKULAR_METRICS_HOSTNAME={{ g_metrics_hostname }} \ - -v USE_PERSISTENT_STORAGE={{metrics_persistence | string | lower }} \ - -v DYNAMICALLY_PROVISION_STORAGE={{metrics_dynamic_vol | string | lower }} \ - -v METRIC_DURATION={{ openshift.hosted.metrics.duration }} \ - -v METRIC_RESOLUTION={{ openshift.hosted.metrics.resolution }} - {{ image_prefix }} \ - {{ image_version }} \ - -v MODE={{ deployment_mode }} \ - | {{ openshift.common.client_binary }} --namespace openshift-infra \ - --config={{ openshift_hosted_metrics_kubeconfig }} \ - create -o name -f -" - -- name: Deploy Metrics - shell: "{{ deployer_cmd }}" - register: deploy_metrics - failed_when: "'already exists' not in deploy_metrics.stderr and deploy_metrics.rc != 0" - changed_when: deploy_metrics.rc == 0 - -- set_fact: - deployer_pod: "{{ deploy_metrics.stdout[1:2] }}" - -# TODO: re-enable this once the metrics deployer validation issue is fixed -# when using dynamically provisioned volumes -- name: "Wait for image pull and deployer pod" - shell: > - {{ openshift.common.client_binary }} - --namespace openshift-infra - --config={{ openshift_hosted_metrics_kubeconfig }} - get {{ deploy_metrics.stdout }} - register: deploy_result - until: "{{ 'Completed' in deploy_result.stdout }}" - failed_when: False - retries: 60 - delay: 10 - -- name: Configure master for metrics - modify_yaml: - dest: "{{ openshift.common.config_base }}/master/master-config.yaml" - yaml_key: assetConfig.metricsPublicURL - yaml_value: "{{ openshift_hosted_metrics_deploy_url }}" - notify: restart master diff --git a/roles/openshift_hosted_metrics/tasks/main.yaml b/roles/openshift_hosted_metrics/tasks/main.yaml deleted file mode 100644 index 5ce8aa92b..000000000 --- a/roles/openshift_hosted_metrics/tasks/main.yaml +++ /dev/null @@ -1,75 +0,0 @@ ---- -- name: Create temp directory for kubeconfig - command: mktemp -d /tmp/openshift-ansible-XXXXXX - register: mktemp - changed_when: False - -- name: Record kubeconfig tmp dir - set_fact: - openshift_hosted_metrics_kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig" - -- name: Copy the admin client config(s) - command: > - cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ openshift_hosted_metrics_kubeconfig }} - changed_when: False - -- name: Set hosted metrics facts - openshift_facts: - role: hosted - openshift_env: "{{ hostvars - | oo_merge_hostvars(vars, inventory_hostname) - | oo_openshift_env }}" - openshift_env_structures: - - 'openshift.hosted.metrics.*' - -- set_fact: - metrics_persistence: "{{ openshift.hosted.metrics.storage_kind | default(none) is not none }}" - metrics_dynamic_vol: "{{ openshift.hosted.metrics.storage_kind | default(none) == 'dynamic' }}" - metrics_template_dir: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/examples/infrastructure-templates/{{ 'origin' if deployment_type == 'origin' else 'enterprise' }}" - image_prefix: "{{ '-v IMAGE_PREFIX=' ~ openshift.hosted.metrics.deployer.prefix if 'prefix' in openshift.hosted.metrics.deployer else '' }}" - image_version: "{{ '-v IMAGE_VERSION=' ~ openshift.hosted.metrics.deployer.version if 'version' in openshift.hosted.metrics.deployer else '' }}" - - -- name: Check for existing metrics pods - shell: > - {{ openshift.common.client_binary }} - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace openshift-infra - get pods -l {{ item }} | grep -q Running - register: metrics_pods_status - with_items: - - metrics-infra=hawkular-metrics - - metrics-infra=heapster - - metrics-infra=hawkular-cassandra - failed_when: false - changed_when: false - -- name: Check for previous deployer - shell: > - {{ openshift.common.client_binary }} - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace openshift-infra - get pods -l metrics-infra=deployer --sort-by='{.metadata.creationTimestamp}' | tail -1 | grep metrics-deployer- - register: metrics_deployer_status - failed_when: false - changed_when: false - -- name: Record current deployment status - set_fact: - greenfield: "{{ not metrics_deployer_status.rc == 0 }}" - failed_error: "{{ True if 'Error' in metrics_deployer_status.stdout else False }}" - metrics_running: "{{ metrics_pods_status.results | oo_collect(attribute='rc') == [0,0,0] }}" - -- name: Set deployment mode - set_fact: - deployment_mode: "{{ 'refresh' if (failed_error | bool or metrics_upgrade | bool) else 'deploy' }}" - -# TODO: handle non greenfield deployments in the future -- include: install.yml - when: greenfield - -- name: Delete temp directory - file: - name: "{{ mktemp.stdout }}" - state: absent - changed_when: False diff --git a/roles/openshift_hosted_metrics/vars/main.yaml b/roles/openshift_hosted_metrics/vars/main.yaml deleted file mode 100644 index 6c207d6ac..000000000 --- a/roles/openshift_hosted_metrics/vars/main.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- -hawkular_permission_oc_commands: - - policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer -n openshift-infra - - policy add-cluster-role-to-user cluster-admin system:serviceaccount:openshift-infra:heapster - -metrics_deployer_sa: - apiVersion: v1 - kind: ServiceAccount - metadata: - name: metrics-deployer - secrets: - - name: metrics-deployer - - -hawkular_tmp_conf: /tmp/hawkular_admin.kubeconfig - -hawkular_persistence: "{% if openshift.hosted.metrics.storage.kind != None %}true{% else %}false{% endif %}" - -hawkular_type: "{{ 'origin' if deployment_type == 'origin' else 'enterprise' }}" - -metrics_upgrade: openshift.hosted.metrics.upgrade | default(False) diff --git a/roles/openshift_loadbalancer/defaults/main.yml b/roles/openshift_loadbalancer/defaults/main.yml index 239b16427..f9c16ba40 100644 --- a/roles/openshift_loadbalancer/defaults/main.yml +++ b/roles/openshift_loadbalancer/defaults/main.yml @@ -26,6 +26,8 @@ r_openshift_loadbalancer_os_firewall_allow: port: "{{ nuage_mon_rest_server_port | default(9443) }}/tcp" cond: "{{ r_openshift_lb_use_nuage | bool }}" +openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" + # NOTE # r_openshift_lb_use_nuage_default may be defined external to this role. # openshift_use_nuage, if defined, may affect other roles or play behavior. diff --git a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 index 72182fcdd..0343a7eb0 100644 --- a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 +++ b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 @@ -1,11 +1,11 @@ [Unit] -After={{ openshift.docker.service_name }}.service -Requires={{ openshift.docker.service_name }}.service -PartOf={{ openshift.docker.service_name }}.service +After={{ openshift_docker_service_name }}.service +Requires={{ openshift_docker_service_name }}.service +PartOf={{ openshift_docker_service_name }}.service [Service] ExecStartPre=-/usr/bin/docker rm -f openshift_loadbalancer -ExecStart=/usr/bin/docker run --rm --name openshift_loadbalancer -p {{ openshift_master_api_port | default(8443) }}:{{ openshift_master_api_port | default(8443) }} -v /etc/haproxy/haproxy.cfg:/etc/haproxy/haproxy.cfg:ro --entrypoint=haproxy {{ openshift.common.router_image }}:{{ openshift_image_tag }} -f /etc/haproxy/haproxy.cfg +ExecStart=/usr/bin/docker run --rm --name openshift_loadbalancer {% for frontend in openshift_loadbalancer_frontends %} {% for bind in frontend.binds %} -p {{ bind |regex_replace('^[^:]*:(\d+).*$', '\\1') }}:{{ bind |regex_replace('^[^:]*:(\d+).*$', '\\1') }} {% endfor %} {% endfor %} -v /etc/haproxy/haproxy.cfg:/etc/haproxy/haproxy.cfg:ro --entrypoint=haproxy {{ openshift.common.router_image }}:{{ openshift_image_tag }} -f /etc/haproxy/haproxy.cfg ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop openshift_loadbalancer LimitNOFILE={{ openshift_loadbalancer_limit_nofile | default(100000) }} @@ -14,4 +14,4 @@ Restart=always RestartSec=5s [Install] -WantedBy={{ openshift.docker.service_name }}.service +WantedBy={{ openshift_docker_service_name }}.service diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml index 497c6e0c5..2f1aa061f 100644 --- a/roles/openshift_logging/defaults/main.yml +++ b/roles/openshift_logging/defaults/main.yml @@ -28,7 +28,7 @@ openshift_logging_curator_ops_memory_limit: 256Mi openshift_logging_curator_ops_cpu_request: 100m openshift_logging_curator_ops_nodeselector: {} -openshift_logging_kibana_hostname: "{{ 'kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}" +openshift_logging_kibana_hostname: "{{ 'kibana.' ~ openshift_master_default_subdomain }}" openshift_logging_kibana_cpu_limit: null openshift_logging_kibana_memory_limit: 736Mi openshift_logging_kibana_cpu_request: 100m @@ -54,7 +54,7 @@ openshift_logging_kibana_key: "" #for the public facing kibana certs openshift_logging_kibana_ca: "" -openshift_logging_kibana_ops_hostname: "{{ 'kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}" +openshift_logging_kibana_ops_hostname: "{{ 'kibana-ops.' ~ openshift_master_default_subdomain }}" openshift_logging_kibana_ops_cpu_limit: null openshift_logging_kibana_ops_memory_limit: 736Mi openshift_logging_kibana_ops_cpu_request: 100m @@ -109,7 +109,7 @@ openshift_logging_es_config: {} # for exposing es to external (outside of the cluster) clients openshift_logging_es_allow_external: False -openshift_logging_es_hostname: "{{ 'es.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}" +openshift_logging_es_hostname: "{{ 'es.' ~ openshift_master_default_subdomain }}" #The absolute path on the control node to the cert file to use #for the public facing es certs @@ -145,7 +145,7 @@ openshift_logging_es_ops_nodeselector: {} # for exposing es-ops to external (outside of the cluster) clients openshift_logging_es_ops_allow_external: False -openshift_logging_es_ops_hostname: "{{ 'es-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}" +openshift_logging_es_ops_hostname: "{{ 'es-ops.' ~ openshift_master_default_subdomain }}" #The absolute path on the control node to the cert file to use #for the public facing es-ops certs @@ -165,7 +165,7 @@ openshift_logging_storage_access_modes: ['ReadWriteOnce'] # mux - secure_forward listener service openshift_logging_mux_allow_external: False openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}" -openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}" +openshift_logging_mux_hostname: "{{ 'mux.' ~ openshift_master_default_subdomain}}" openshift_logging_mux_port: 24284 openshift_logging_mux_cpu_limit: null openshift_logging_mux_memory_limit: 512Mi diff --git a/roles/openshift_logging/handlers/main.yml b/roles/openshift_logging/handlers/main.yml index 074b72942..e0329ee7c 100644 --- a/roles/openshift_logging/handlers/main.yml +++ b/roles/openshift_logging/handlers/main.yml @@ -1,7 +1,7 @@ --- - name: restart master api systemd: name={{ openshift.common.service_type }}-master-api state=restarted - when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + when: (not (master_api_service_status_changed | default(false) | bool)) notify: Verify API Server # We retry the controllers because the API may not be 100% initialized yet. @@ -11,7 +11,7 @@ delay: 5 register: result until: result.rc == 0 - when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + when: (not (master_controllers_service_status_changed | default(false) | bool)) - name: Verify API Server # Using curl here since the uri module requires python-httplib2 and diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml index f526fd734..082c0128f 100644 --- a/roles/openshift_logging/tasks/generate_certs.yaml +++ b/roles/openshift_logging/tasks/generate_certs.yaml @@ -36,7 +36,7 @@ - top_dir: '{{generated_certs_dir}}' when: not signing_conf_file.stat.exists -- include: procure_server_certs.yaml +- include_tasks: procure_server_certs.yaml loop_control: loop_var: cert_info with_items: @@ -45,7 +45,7 @@ - procure_component: kibana-internal hostnames: "kibana, kibana-ops, {{openshift_logging_kibana_hostname}}, {{openshift_logging_kibana_ops_hostname}}" -- include: procure_server_certs.yaml +- include_tasks: procure_server_certs.yaml loop_control: loop_var: cert_info with_items: @@ -53,14 +53,14 @@ hostnames: "logging-mux, {{openshift_logging_mux_hostname}}" when: openshift_logging_use_mux | bool -- include: procure_shared_key.yaml +- include_tasks: procure_shared_key.yaml loop_control: loop_var: shared_key_info with_items: - procure_component: mux when: openshift_logging_use_mux | bool -- include: procure_server_certs.yaml +- include_tasks: procure_server_certs.yaml loop_control: loop_var: cert_info with_items: @@ -68,7 +68,7 @@ hostnames: "es, {{openshift_logging_es_hostname}}" when: openshift_logging_es_allow_external | bool -- include: procure_server_certs.yaml +- include_tasks: procure_server_certs.yaml loop_control: loop_var: cert_info with_items: @@ -109,7 +109,7 @@ - not ca_cert_srl_file.stat.exists - name: Generate PEM certs - include: generate_pems.yaml component={{node_name}} + include_tasks: generate_pems.yaml component={{node_name}} with_items: - system.logging.fluentd - system.logging.kibana @@ -119,7 +119,7 @@ loop_var: node_name - name: Generate PEM cert for mux - include: generate_pems.yaml component={{node_name}} + include_tasks: generate_pems.yaml component={{node_name}} with_items: - system.logging.mux loop_control: @@ -127,7 +127,7 @@ when: openshift_logging_use_mux | bool - name: Generate PEM cert for Elasticsearch external route - include: generate_pems.yaml component={{node_name}} + include_tasks: generate_pems.yaml component={{node_name}} with_items: - system.logging.es loop_control: @@ -135,7 +135,7 @@ when: openshift_logging_es_allow_external | bool - name: Creating necessary JKS certs - include: generate_jks.yaml + include_tasks: generate_jks.yaml # TODO: make idempotent - name: Generate proxy session diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index 2fefdc894..bb8ebec6b 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -52,7 +52,7 @@ changed_when: False check_mode: no -- include: generate_certs.yaml +- include_tasks: generate_certs.yaml vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" @@ -250,7 +250,7 @@ when: - openshift_logging_use_ops | bool -- include: annotate_ops_projects.yaml +- include_tasks: annotate_ops_projects.yaml ## Curator - include_role: @@ -311,4 +311,4 @@ openshift_logging_install_eventrouter | default(false) | bool -- include: update_master_config.yaml +- include_tasks: update_master_config.yaml diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml index 7f8e88036..91db457d1 100644 --- a/roles/openshift_logging/tasks/main.yaml +++ b/roles/openshift_logging/tasks/main.yaml @@ -19,11 +19,11 @@ check_mode: no become: no -- include: install_logging.yaml +- include_tasks: install_logging.yaml when: - openshift_logging_install_logging | default(false) | bool -- include: delete_logging.yaml +- include_tasks: delete_logging.yaml when: - not openshift_logging_install_logging | default(false) | bool diff --git a/roles/openshift_logging_curator/meta/main.yaml b/roles/openshift_logging_curator/meta/main.yaml index 6752fb7f9..d4635aab0 100644 --- a/roles/openshift_logging_curator/meta/main.yaml +++ b/roles/openshift_logging_curator/meta/main.yaml @@ -13,3 +13,4 @@ galaxy_info: - cloud dependencies: - role: lib_openshift +- role: openshift_facts diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml index 7ddf57450..e7ef5ff22 100644 --- a/roles/openshift_logging_curator/tasks/main.yaml +++ b/roles/openshift_logging_curator/tasks/main.yaml @@ -12,7 +12,7 @@ openshift_logging_curator_image_prefix: "{{ openshift_logging_curator_image_prefix | default(__openshift_logging_curator_image_prefix) }}" openshift_logging_curator_image_version: "{{ openshift_logging_curator_image_version | default(__openshift_logging_curator_image_version) }}" -- include: determine_version.yaml +- include_tasks: determine_version.yaml # allow passing in a tempdir - name: Create temp directory for doing work in diff --git a/roles/openshift_logging_elasticsearch/meta/main.yaml b/roles/openshift_logging_elasticsearch/meta/main.yaml index 097270772..6a9a6539c 100644 --- a/roles/openshift_logging_elasticsearch/meta/main.yaml +++ b/roles/openshift_logging_elasticsearch/meta/main.yaml @@ -13,3 +13,4 @@ galaxy_info: - cloud dependencies: - role: lib_openshift +- role: openshift_facts diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml index 770892d52..8f2050043 100644 --- a/roles/openshift_logging_elasticsearch/tasks/main.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml @@ -30,7 +30,7 @@ openshift_logging_elasticsearch_image_prefix: "{{ openshift_logging_elasticsearch_image_prefix | default(__openshift_logging_elasticsearch_image_prefix) }}" openshift_logging_elasticsearch_image_version: "{{ openshift_logging_elasticsearch_image_version | default(__openshift_logging_elasticsearch_image_version) }}" -- include: determine_version.yaml +- include_tasks: determine_version.yaml # allow passing in a tempdir - name: Create temp directory for doing work in diff --git a/roles/openshift_logging_eventrouter/tasks/main.yaml b/roles/openshift_logging_eventrouter/tasks/main.yaml index b1f93eeb9..96b181d61 100644 --- a/roles/openshift_logging_eventrouter/tasks/main.yaml +++ b/roles/openshift_logging_eventrouter/tasks/main.yaml @@ -12,8 +12,8 @@ openshift_logging_eventrouter_image_prefix: "{{ openshift_logging_eventrouter_image_prefix | default(__openshift_logging_eventrouter_image_prefix) }}" openshift_logging_eventrouter_image_version: "{{ openshift_logging_eventrouter_image_version | default(__openshift_logging_eventrouter_image_version) }}" -- include: "{{ role_path }}/tasks/install_eventrouter.yaml" +- include_tasks: install_eventrouter.yaml when: openshift_logging_install_eventrouter | default(false) | bool -- include: "{{ role_path }}/tasks/delete_eventrouter.yaml" +- include_tasks: delete_eventrouter.yaml when: not openshift_logging_install_eventrouter | default(false) | bool diff --git a/roles/openshift_logging_fluentd/meta/main.yaml b/roles/openshift_logging_fluentd/meta/main.yaml index 2003aacb2..89c98204f 100644 --- a/roles/openshift_logging_fluentd/meta/main.yaml +++ b/roles/openshift_logging_fluentd/meta/main.yaml @@ -13,3 +13,4 @@ galaxy_info: - cloud dependencies: - role: lib_openshift +- role: openshift_facts diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml index f8683ab75..87eedfb4b 100644 --- a/roles/openshift_logging_fluentd/tasks/main.yaml +++ b/roles/openshift_logging_fluentd/tasks/main.yaml @@ -47,7 +47,7 @@ openshift_logging_fluentd_image_prefix: "{{ openshift_logging_fluentd_image_prefix | default(__openshift_logging_fluentd_image_prefix) }}" openshift_logging_fluentd_image_version: "{{ openshift_logging_fluentd_image_version | default(__openshift_logging_fluentd_image_version) }}" -- include: determine_version.yaml +- include_tasks: determine_version.yaml # allow passing in a tempdir - name: Create temp directory for doing work in @@ -216,7 +216,7 @@ openshift_logging_fluentd_hosts: "{{ fluentd_hosts.results.results[0]['items'] | map(attribute='metadata.name') | list }}" when: "'--all' in openshift_logging_fluentd_hosts" -- include: label_and_wait.yaml +- include_tasks: label_and_wait.yaml vars: node: "{{ fluentd_host }}" with_items: "{{ openshift_logging_fluentd_hosts }}" diff --git a/roles/openshift_logging_kibana/defaults/main.yml b/roles/openshift_logging_kibana/defaults/main.yml index 6cdf7c8f3..899193838 100644 --- a/roles/openshift_logging_kibana/defaults/main.yml +++ b/roles/openshift_logging_kibana/defaults/main.yml @@ -10,7 +10,7 @@ openshift_logging_kibana_cpu_limit: null openshift_logging_kibana_cpu_request: 100m openshift_logging_kibana_memory_limit: 736Mi -openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" +openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ openshift_master_default_subdomain) }}" openshift_logging_kibana_es_host: "logging-es" openshift_logging_kibana_es_port: 9200 diff --git a/roles/openshift_logging_kibana/meta/main.yaml b/roles/openshift_logging_kibana/meta/main.yaml index 89e08abc0..d97586a37 100644 --- a/roles/openshift_logging_kibana/meta/main.yaml +++ b/roles/openshift_logging_kibana/meta/main.yaml @@ -13,3 +13,4 @@ galaxy_info: - cloud dependencies: - role: lib_openshift +- role: openshift_facts diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml index 9d99114c5..77bf8042a 100644 --- a/roles/openshift_logging_kibana/tasks/main.yaml +++ b/roles/openshift_logging_kibana/tasks/main.yaml @@ -15,7 +15,7 @@ openshift_logging_kibana_proxy_image_prefix: "{{ openshift_logging_kibana_proxy_image_prefix | default(__openshift_logging_kibana_proxy_image_prefix) }}" openshift_logging_kibana_proxy_image_version: "{{ openshift_logging_kibana_proxy_image_version | default(__openshift_logging_kibana_proxy_image_version) }}" -- include: determine_version.yaml +- include_tasks: determine_version.yaml # allow passing in a tempdir - name: Create temp directory for doing work in diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml index cd15da939..1e6c501bf 100644 --- a/roles/openshift_logging_mux/defaults/main.yml +++ b/roles/openshift_logging_mux/defaults/main.yml @@ -28,7 +28,7 @@ openshift_logging_mux_journal_read_from_head: "{{ openshift_hosted_logging_journ openshift_logging_mux_allow_external: False openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}" -openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}" +openshift_logging_mux_hostname: "{{ 'mux.' ~ openshift_master_default_subdomain }}" openshift_logging_mux_port: 24284 # the namespace to use for undefined projects should come first, followed by any # additional namespaces to create by default - users will typically not need to set this diff --git a/roles/openshift_logging_mux/meta/main.yaml b/roles/openshift_logging_mux/meta/main.yaml index f40beb79d..f271d8d7d 100644 --- a/roles/openshift_logging_mux/meta/main.yaml +++ b/roles/openshift_logging_mux/meta/main.yaml @@ -13,3 +13,4 @@ galaxy_info: - cloud dependencies: - role: lib_openshift +- role: openshift_facts diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml index 242d92188..68948bce2 100644 --- a/roles/openshift_logging_mux/tasks/main.yaml +++ b/roles/openshift_logging_mux/tasks/main.yaml @@ -20,7 +20,7 @@ openshift_logging_mux_image_prefix: "{{ openshift_logging_mux_image_prefix | default(__openshift_logging_mux_image_prefix) }}" openshift_logging_mux_image_version: "{{ openshift_logging_mux_image_version | default(__openshift_logging_mux_image_version) }}" -- include: determine_version.yaml +- include_tasks: determine_version.yaml # allow passing in a tempdir - name: Create temp directory for doing work in diff --git a/roles/openshift_management/tasks/main.yml b/roles/openshift_management/tasks/main.yml index 3bade9e8c..f212dba7c 100644 --- a/roles/openshift_management/tasks/main.yml +++ b/roles/openshift_management/tasks/main.yml @@ -3,7 +3,7 @@ # Users, projects, and privileges - name: Run pre-install Management validation checks - include: validate.yml + include_tasks: validate.yml # This creates a service account allowing Container Provider # integration (managing OCP/Origin via MIQ/Management) @@ -18,18 +18,18 @@ display_name: "{{ openshift_management_project_description }}" - name: Create and Authorize Management Accounts - include: accounts.yml + include_tasks: accounts.yml ###################################################################### # STORAGE - Initialize basic storage class - name: Determine the correct NFS host if required - include: storage/nfs_server.yml + include_tasks: storage/nfs_server.yml when: openshift_management_storage_class in ['nfs', 'nfs_external'] #--------------------------------------------------------------------- # * nfs - set up NFS shares on the first master for a proof of concept - name: Create required NFS exports for Management app storage - include: storage/nfs.yml + include_tasks: storage/nfs.yml when: openshift_management_storage_class == 'nfs' #--------------------------------------------------------------------- @@ -56,14 +56,14 @@ ###################################################################### # APPLICATION TEMPLATE - name: Install the Management app and PV templates - include: template.yml + include_tasks: template.yml ###################################################################### # APP & DB Storage # For local/external NFS backed installations - name: "Create the required App and DB PVs using {{ openshift_management_storage_class }}" - include: storage/create_nfs_pvs.yml + include_tasks: storage/create_nfs_pvs.yml when: - openshift_management_storage_class in ['nfs', 'nfs_external'] diff --git a/roles/openshift_management/tasks/storage/storage.yml b/roles/openshift_management/tasks/storage/storage.yml index d8bf7aa3e..a3675b29b 100644 --- a/roles/openshift_management/tasks/storage/storage.yml +++ b/roles/openshift_management/tasks/storage/storage.yml @@ -1,3 +1,3 @@ --- -- include: nfs.yml +- include_tasks: nfs.yml when: not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')) diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index 8e4a46ebb..38b2fd8b8 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -52,6 +52,8 @@ openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | containerized_svc_dir: "/usr/lib/systemd/system" ha_svc_template_path: "native-cluster" +openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" + # NOTE # r_openshift_master_*_default may be defined external to this role. # openshift_use_*, if defined, may affect other roles or play behavior. diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml index 359536202..e6b8b8ac8 100644 --- a/roles/openshift_master/handlers/main.yml +++ b/roles/openshift_master/handlers/main.yml @@ -5,7 +5,6 @@ state: restarted when: - not (master_api_service_status_changed | default(false) | bool) - - openshift.master.cluster_method == 'native' notify: - Verify API Server @@ -18,7 +17,6 @@ until: result.rc == 0 when: - not (master_controllers_service_status_changed | default(false) | bool) - - openshift.master.cluster_method == 'native' - name: Verify API Server # Using curl here since the uri module requires python-httplib2 and diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml index a1cda2ad4..bf0cbbf18 100644 --- a/roles/openshift_master/meta/main.yml +++ b/roles/openshift_master/meta/main.yml @@ -15,3 +15,4 @@ dependencies: - role: lib_openshift - role: lib_utils - role: lib_os_firewall +- role: openshift_facts diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index e52cd6231..5f4e6df71 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -11,25 +11,6 @@ - openshift_master_oauth_grant_method is defined - openshift_master_oauth_grant_method not in openshift_master_valid_grant_methods -# HA Variable Validation -- fail: - msg: "openshift_master_cluster_method must be set to either 'native' or 'pacemaker' for multi-master installations" - when: - - openshift.master.ha | bool - - (openshift.master.cluster_method is not defined) or (openshift.master.cluster_method is defined and openshift.master.cluster_method not in ["native", "pacemaker"]) -- fail: - msg: "openshift_master_cluster_password must be set for multi-master installations" - when: - - openshift.master.ha | bool - - openshift.master.cluster_method == "pacemaker" - - openshift_master_cluster_password is not defined or not openshift_master_cluster_password -- fail: - msg: "Pacemaker based HA is not supported at this time when used with containerized installs" - when: - - openshift.master.ha | bool - - openshift.master.cluster_method == "pacemaker" - - openshift.common.is_containerized | bool - - name: Open up firewall ports import_tasks: firewall.yml @@ -226,7 +207,6 @@ enabled: yes state: started when: - - openshift.master.cluster_method == 'native' - inventory_hostname == openshift_master_hosts[0] register: l_start_result until: not l_start_result | failed @@ -241,14 +221,12 @@ - set_fact: master_api_service_status_changed: "{{ l_start_result | changed }}" when: - - openshift.master.cluster_method == 'native' - inventory_hostname == openshift_master_hosts[0] - pause: seconds: 15 when: - openshift.master.ha | bool - - openshift.master.cluster_method == 'native' - name: Start and enable master api all masters systemd: @@ -256,7 +234,6 @@ enabled: yes state: started when: - - openshift.master.cluster_method == 'native' - inventory_hostname != openshift_master_hosts[0] register: l_start_result until: not l_start_result | failed @@ -271,14 +248,12 @@ - set_fact: master_api_service_status_changed: "{{ l_start_result | changed }}" when: - - openshift.master.cluster_method == 'native' - inventory_hostname != openshift_master_hosts[0] # A separate wait is required here for native HA since notifies will # be resolved after all tasks in the role. - include_tasks: check_master_api_is_ready.yml when: - - openshift.master.cluster_method == 'native' - master_api_service_status_changed | bool - name: Start and enable master controller service @@ -286,8 +261,6 @@ name: "{{ openshift.common.service_type }}-master-controllers" enabled: yes state: started - when: - - openshift.master.cluster_method == 'native' register: l_start_result until: not l_start_result | failed retries: 1 @@ -301,30 +274,6 @@ - name: Set fact master_controllers_service_status_changed set_fact: master_controllers_service_status_changed: "{{ l_start_result | changed }}" - when: - - openshift.master.cluster_method == 'native' - -- name: Install cluster packages - package: name=pcs state=present - when: - - openshift.master.cluster_method == 'pacemaker' - - not openshift.common.is_containerized | bool - register: l_install_result - until: l_install_result | success - -- name: Start and enable cluster service - systemd: - name: pcsd - enabled: yes - state: started - when: - - openshift.master.cluster_method == 'pacemaker' - - not openshift.common.is_containerized | bool - -- name: Set the cluster user password - shell: echo {{ openshift_master_cluster_password | quote }} | passwd --stdin hacluster - when: - - l_install_result | changed - name: node bootstrap settings include_tasks: bootstrap.yml diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml index c95f562d0..ca04d2243 100644 --- a/roles/openshift_master/tasks/registry_auth.yml +++ b/roles/openshift_master/tasks/registry_auth.yml @@ -33,7 +33,7 @@ - openshift_docker_alternative_creds | default(False) | bool - oreg_auth_user is defined - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool - register: master_oreg_auth_credentials_create + register: master_oreg_auth_credentials_create_alt notify: - restart master api - restart master controllers @@ -45,4 +45,8 @@ when: - openshift.common.is_containerized | bool - oreg_auth_user is defined - - (master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or master_oreg_auth_credentials_create.changed) | bool + - > + (master_oreg_auth_credentials_stat.stat.exists + or oreg_auth_credentials_replace + or master_oreg_auth_credentials_create.changed + or master_oreg_auth_credentials_create_alt.changed) | bool diff --git a/roles/openshift_master/tasks/system_container.yml b/roles/openshift_master/tasks/system_container.yml index 23386f11b..450f6d803 100644 --- a/roles/openshift_master/tasks/system_container.yml +++ b/roles/openshift_master/tasks/system_container.yml @@ -1,8 +1,4 @@ --- -- name: Ensure proxies are in the atomic.conf - include_role: - name: openshift_atomic - tasks_from: proxy - name: Pre-pull master system container image command: > diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml index 9d11ed574..ee76413e3 100644 --- a/roles/openshift_master/tasks/systemd_units.yml +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -25,7 +25,6 @@ state: absent ignore_errors: true when: - - openshift.master.cluster_method == "native" - not l_is_master_system_container | bool # This is the image used for both HA and non-HA clusters: @@ -43,7 +42,6 @@ src: "{{ ha_svc_template_path }}/atomic-openshift-master-{{ item }}.service.j2" dest: "{{ containerized_svc_dir }}/{{ openshift.common.service_type }}-master-{{ item }}.service" when: - - openshift.master.cluster_method == "native" - not l_is_master_system_container | bool with_items: - api @@ -63,22 +61,17 @@ - api - controllers when: - - openshift.master.cluster_method == "native" - not l_is_master_system_container | bool - name: Preserve Master API Proxy Config options command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-api register: l_master_api_proxy - when: - - openshift.master.cluster_method == "native" failed_when: false changed_when: false - name: Preserve Master API AWS options command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-api register: master_api_aws - when: - - openshift.master.cluster_method == "native" failed_when: false changed_when: false @@ -87,14 +80,11 @@ src: "{{ ha_svc_template_path }}/atomic-openshift-master-api.j2" dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api backup: true - when: - - openshift.master.cluster_method == "native" notify: - restart master api - name: Restore Master API Proxy Config Options when: - - openshift.master.cluster_method == "native" - l_master_api_proxy.rc == 0 - "'http_proxy' not in openshift.common" - "'https_proxy' not in openshift.common" @@ -105,7 +95,6 @@ - name: Restore Master API AWS Options when: - - openshift.master.cluster_method == "native" - master_api_aws.rc == 0 - not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined) lineinfile: @@ -117,16 +106,12 @@ - name: Preserve Master Controllers Proxy Config options command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers register: master_controllers_proxy - when: - - openshift.master.cluster_method == "native" failed_when: false changed_when: false - name: Preserve Master Controllers AWS options command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers register: master_controllers_aws - when: - - openshift.master.cluster_method == "native" failed_when: false changed_when: false @@ -135,8 +120,6 @@ src: "{{ ha_svc_template_path }}/atomic-openshift-master-controllers.j2" dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers backup: true - when: - - openshift.master.cluster_method == "native" notify: - restart master controllers @@ -146,7 +129,6 @@ line: "{{ item }}" with_items: "{{ master_controllers_proxy.stdout_lines | default([]) }}" when: - - openshift.master.cluster_method == "native" - master_controllers_proxy.rc == 0 - "'http_proxy' not in openshift.common" - "'https_proxy' not in openshift.common" @@ -157,6 +139,5 @@ line: "{{ item }}" with_items: "{{ master_controllers_aws.stdout_lines | default([]) }}" when: - - openshift.master.cluster_method == "native" - master_controllers_aws.rc == 0 - not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined) diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 index 5d4a99c97..cec3d3fb1 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 @@ -4,9 +4,9 @@ Documentation=https://github.com/openshift/origin After=etcd_container.service Wants=etcd_container.service Before={{ openshift.common.service_type }}-node.service -After={{ openshift.docker.service_name }}.service -PartOf={{ openshift.docker.service_name }}.service -Requires={{ openshift.docker.service_name }}.service +After={{ openshift_docker_service_name }}.service +PartOf={{ openshift_docker_service_name }}.service +Requires={{ openshift_docker_service_name }}.service [Service] EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api @@ -33,5 +33,5 @@ Restart=always RestartSec=5s [Install] -WantedBy={{ openshift.docker.service_name }}.service +WantedBy={{ openshift_docker_service_name }}.service WantedBy={{ openshift.common.service_type }}-node.service diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 index f93f3b565..a0248151d 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 @@ -3,9 +3,9 @@ Description=Atomic OpenShift Master Controllers Documentation=https://github.com/openshift/origin Wants={{ openshift.common.service_type }}-master-api.service After={{ openshift.common.service_type }}-master-api.service -After={{ openshift.docker.service_name }}.service -Requires={{ openshift.docker.service_name }}.service -PartOf={{ openshift.docker.service_name }}.service +After={{ openshift_docker_service_name }}.service +Requires={{ openshift_docker_service_name }}.service +PartOf={{ openshift_docker_service_name }}.service [Service] EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers @@ -32,4 +32,4 @@ Restart=always RestartSec=5s [Install] -WantedBy={{ openshift.docker.service_name }}.service +WantedBy={{ openshift_docker_service_name }}.service diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index a0f00e545..92668b227 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -120,7 +120,7 @@ kubernetesMasterConfig: - application/vnd.kubernetes.protobuf {% endif %} controllerArguments: {{ openshift.master.controller_args | default(None) | to_padded_yaml( level=2 ) }} - masterCount: {{ openshift.master.master_count if openshift.master.cluster_method | default(None) == 'native' else 1 }} + masterCount: {{ openshift.master.master_count }} masterIP: {{ openshift.common.ip }} podEvictionTimeout: {{ openshift.master.pod_eviction_timeout | default("") }} proxyClientInfo: @@ -204,7 +204,7 @@ projectConfig: mcsLabelsPerProject: {{ osm_mcs_labels_per_project }} uidAllocatorRange: "{{ osm_uid_allocator_range }}" routingConfig: - subdomain: "{{ openshift_master_default_subdomain | default("") }}" + subdomain: "{{ openshift_master_default_subdomain }}" serviceAccountConfig: limitSecretReferences: {{ openshift_master_saconfig_limitsecretreferences | default(false) }} managedNames: diff --git a/roles/openshift_master_cluster/README.md b/roles/openshift_master_cluster/README.md deleted file mode 100644 index 58dd19ac3..000000000 --- a/roles/openshift_master_cluster/README.md +++ /dev/null @@ -1,34 +0,0 @@ -OpenShift Master Cluster -======================== - -TODO - -Requirements ------------- - -* Ansible 2.2 - -Role Variables --------------- - -TODO - -Dependencies ------------- - -TODO - -Example Playbook ----------------- - -TODO - -License -------- - -Apache License Version 2.0 - -Author Information ------------------- - -Jason DeTiberus (jdetiber@redhat.com) diff --git a/roles/openshift_master_cluster/meta/main.yml b/roles/openshift_master_cluster/meta/main.yml deleted file mode 100644 index c452b165e..000000000 --- a/roles/openshift_master_cluster/meta/main.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -galaxy_info: - author: Jason DeTiberus - description: - company: Red Hat, Inc. - license: Apache License, Version 2.0 - min_ansible_version: 2.2 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud - - system -dependencies: [] diff --git a/roles/openshift_master_cluster/tasks/configure.yml b/roles/openshift_master_cluster/tasks/configure.yml deleted file mode 100644 index 1b94598dd..000000000 --- a/roles/openshift_master_cluster/tasks/configure.yml +++ /dev/null @@ -1,43 +0,0 @@ ---- -- fail: - msg: This role requires that openshift_master_cluster_vip is set - when: openshift_master_cluster_vip is not defined or not openshift_master_cluster_vip -- fail: - msg: This role requires that openshift_master_cluster_public_vip is set - when: openshift_master_cluster_public_vip is not defined or not openshift_master_cluster_public_vip - -- name: Authenticate to the cluster - command: pcs cluster auth -u hacluster -p {{ openshift_master_cluster_password }} {{ omc_cluster_hosts }} - -- name: Create the cluster - command: pcs cluster setup --name openshift_master {{ omc_cluster_hosts }} - -- name: Start the cluster - command: pcs cluster start --all - -- name: Enable the cluster on all nodes - command: pcs cluster enable --all - -- name: Set default resource stickiness - command: pcs resource defaults resource-stickiness=100 - -- name: Add the cluster VIP resource - command: pcs resource create virtual-ip IPaddr2 ip={{ openshift_master_cluster_vip }} --group {{ openshift.common.service_type }}-master - -- name: Add the cluster public VIP resource - command: pcs resource create virtual-ip IPaddr2 ip={{ openshift_master_cluster_public_vip }} --group {{ openshift.common.service_type }}-master - when: openshift_master_cluster_public_vip != openshift_master_cluster_vip - -- name: Add the cluster master service resource - command: pcs resource create master systemd:{{ openshift.common.service_type }}-master op start timeout=90s stop timeout=90s --group {{ openshift.common.service_type }}-master - -- name: Disable stonith - command: pcs property set stonith-enabled=false - -- name: Wait for the clustered master service to be available - wait_for: - host: "{{ openshift_master_cluster_vip }}" - port: "{{ openshift.master.api_port }}" - state: started - timeout: 180 - delay: 90 diff --git a/roles/openshift_master_cluster/tasks/main.yml b/roles/openshift_master_cluster/tasks/main.yml deleted file mode 100644 index 41bfc72cb..000000000 --- a/roles/openshift_master_cluster/tasks/main.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -- fail: - msg: "Not possible on atomic hosts for now" - when: openshift.common.is_containerized | bool - -- name: Test if cluster is already configured - command: pcs status - register: pcs_status - changed_when: false - failed_when: false - when: openshift.master.cluster_method == "pacemaker" - -- include_tasks: configure.yml - when: "pcs_status | failed and 'Error: cluster is not currently running on this node' in pcs_status.stderr" diff --git a/roles/openshift_master_facts/defaults/main.yml b/roles/openshift_master_facts/defaults/main.yml index d0dcdae4b..a89f48afa 100644 --- a/roles/openshift_master_facts/defaults/main.yml +++ b/roles/openshift_master_facts/defaults/main.yml @@ -1,5 +1,4 @@ --- -openshift_master_default_subdomain: "router.default.svc.cluster.local" openshift_master_admission_plugin_config: openshift.io/ImagePolicy: configuration: diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py index c827f2d26..ff15f693b 100644 --- a/roles/openshift_master_facts/filter_plugins/openshift_master.py +++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py @@ -485,31 +485,6 @@ class FilterModule(object): Dumper=AnsibleDumper)) @staticmethod - def validate_pcs_cluster(data, masters=None): - ''' Validates output from "pcs status", ensuring that each master - provided is online. - Ex: data = ('...', - 'PCSD Status:', - 'master1.example.com: Online', - 'master2.example.com: Online', - 'master3.example.com: Online', - '...') - masters = ['master1.example.com', - 'master2.example.com', - 'master3.example.com'] - returns True - ''' - if not issubclass(type(data), string_types): - raise errors.AnsibleFilterError("|failed expects data is a string or unicode") - if not issubclass(type(masters), list): - raise errors.AnsibleFilterError("|failed expects masters is a list") - valid = True - for master in masters: - if "{0}: Online".format(master) not in data: - valid = False - return valid - - @staticmethod def certificates_to_synchronize(hostvars, include_keys=True, include_ca=True): ''' Return certificates to synchronize based on facts. ''' if not issubclass(type(hostvars), dict): @@ -553,6 +528,5 @@ class FilterModule(object): def filters(self): ''' returns a mapping of filters to methods ''' return {"translate_idps": self.translate_idps, - "validate_pcs_cluster": self.validate_pcs_cluster, "certificates_to_synchronize": self.certificates_to_synchronize, "oo_htpasswd_users_from_file": self.oo_htpasswd_users_from_file} diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml index 20cc5358e..0cb87dcaa 100644 --- a/roles/openshift_master_facts/tasks/main.yml +++ b/roles/openshift_master_facts/tasks/main.yml @@ -1,14 +1,8 @@ --- -# Ensure the default sub-domain is set: -- name: Migrate legacy osm_default_subdomain fact - set_fact: - openshift_master_default_subdomain: "{{ osm_default_subdomain | default(None) }}" - when: openshift_master_default_subdomain is not defined - - name: Verify required variables are set fail: msg: openshift_master_default_subdomain must be set to deploy metrics - when: openshift_hosted_metrics_deploy | default(false) | bool and openshift_master_default_subdomain | default("") == "" + when: openshift_hosted_metrics_deploy | default(false) | bool and openshift_master_default_subdomain == "" # NOTE: These metrics variables are unfortunately needed by both the master and the metrics roles # to properly configure the master-config.yaml file. @@ -20,7 +14,7 @@ - name: Set g_metrics_hostname set_fact: g_metrics_hostname: "{{ openshift_hosted_metrics_public_url - | default('hawkular-metrics.' ~ (openshift_master_default_subdomain)) + | default('hawkular-metrics.' ~ openshift_master_default_subdomain) | oo_hostname_from_url }}" - set_fact: @@ -31,7 +25,6 @@ openshift_facts: role: master local_facts: - cluster_method: "{{ openshift_master_cluster_method | default('native') }}" cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}" cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}" api_port: "{{ openshift_master_api_port | default(None) }}" diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_metrics/handlers/main.yml index 074b72942..e0329ee7c 100644 --- a/roles/openshift_metrics/handlers/main.yml +++ b/roles/openshift_metrics/handlers/main.yml @@ -1,7 +1,7 @@ --- - name: restart master api systemd: name={{ openshift.common.service_type }}-master-api state=restarted - when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + when: (not (master_api_service_status_changed | default(false) | bool)) notify: Verify API Server # We retry the controllers because the API may not be 100% initialized yet. @@ -11,7 +11,7 @@ delay: 5 register: result until: result.rc == 0 - when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + when: (not (master_controllers_service_status_changed | default(false) | bool)) - name: Verify API Server # Using curl here since the uri module requires python-httplib2 and diff --git a/roles/openshift_nfs/tasks/setup.yml b/roles/openshift_nfs/tasks/setup.yml index edb854467..1aa7e7079 100644 --- a/roles/openshift_nfs/tasks/setup.yml +++ b/roles/openshift_nfs/tasks/setup.yml @@ -1,7 +1,6 @@ --- - name: setup firewall - include: firewall.yml - static: yes + import_tasks: firewall.yml - name: Install nfs-utils package: name=nfs-utils state=present diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index 5a0c09f5c..f3867fe4a 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -101,8 +101,11 @@ oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_ur oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker" oreg_auth_credentials_replace: False l_bind_docker_reg_auth: False +openshift_use_crio: False openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False)) or (openshift_use_crio_only | default(False)) }}" +openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" + # NOTE # r_openshift_node_*_default may be defined external to this role. # openshift_use_*, if defined, may affect other roles or play behavior. diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml index 927d107c6..70057c7f3 100644 --- a/roles/openshift_node/meta/main.yml +++ b/roles/openshift_node/meta/main.yml @@ -17,7 +17,6 @@ dependencies: - role: lib_openshift - role: lib_os_firewall when: not (openshift_node_upgrade_in_progress | default(False)) -- role: openshift_docker - role: openshift_cloud_provider when: not (openshift_node_upgrade_in_progress | default(False)) - role: lib_utils diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index d46b1f9c3..d9f3e920d 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -4,9 +4,9 @@ when: - (not ansible_selinux or ansible_selinux.status != 'enabled') - deployment_type == 'openshift-enterprise' - - not openshift_use_crio | default(false) + - not openshift_use_crio -- include: dnsmasq.yml +- include_tasks: dnsmasq.yml - name: setup firewall import_tasks: firewall.yml @@ -44,13 +44,6 @@ - name: include node installer include_tasks: install.yml -- name: Restart cri-o - systemd: - name: cri-o - enabled: yes - state: restarted - when: openshift_use_crio | default(false) - - name: restart NetworkManager to ensure resolv.conf is present systemd: name: NetworkManager diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml index 73dc9e130..98a391890 100644 --- a/roles/openshift_node/tasks/node_system_container.yml +++ b/roles/openshift_node/tasks/node_system_container.yml @@ -1,8 +1,4 @@ --- -- name: Ensure proxies are in the atomic.conf - include_role: - name: openshift_atomic - tasks_from: proxy - name: Pre-pull node system container image command: > @@ -16,6 +12,6 @@ image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}" values: - "DNS_DOMAIN={{ openshift.common.dns_domain }}" - - "DOCKER_SERVICE={{ openshift.docker.service_name }}.service" + - "DOCKER_SERVICE={{ openshift_docker_service_name }}.service" - "MASTER_SERVICE={{ openshift.common.service_type }}.service" state: latest diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml index 8c3548475..b61bc84c1 100644 --- a/roles/openshift_node/tasks/openvswitch_system_container.yml +++ b/roles/openshift_node/tasks/openvswitch_system_container.yml @@ -1,19 +1,11 @@ --- - set_fact: - l_use_crio: "{{ openshift_use_crio | default(false) }}" - -- set_fact: l_service_name: "cri-o" - when: l_use_crio + when: openshift_use_crio - set_fact: - l_service_name: "{{ openshift.docker.service_name }}" - when: not l_use_crio - -- name: Ensure proxies are in the atomic.conf - include_role: - name: openshift_atomic - tasks_from: proxy + l_service_name: "{{ openshift_docker_service_name }}" + when: not openshift_use_crio - name: Pre-pull OpenVSwitch system container image command: > diff --git a/roles/openshift_node/tasks/registry_auth.yml b/roles/openshift_node/tasks/registry_auth.yml index f5428867a..ab43ec049 100644 --- a/roles/openshift_node/tasks/registry_auth.yml +++ b/roles/openshift_node/tasks/registry_auth.yml @@ -32,7 +32,7 @@ - openshift_docker_alternative_creds | bool - oreg_auth_user is defined - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool - register: node_oreg_auth_credentials_create + register: node_oreg_auth_credentials_create_alt notify: - restart node @@ -43,4 +43,8 @@ when: - openshift.common.is_containerized | bool - oreg_auth_user is defined - - (node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or node_oreg_auth_credentials_create.changed) | bool + - > + (node_oreg_auth_credentials_stat.stat.exists + or oreg_auth_credentials_replace + or node_oreg_auth_credentials_create.changed + or node_oreg_auth_credentials_create_alt.changed) | bool diff --git a/roles/openshift_node/tasks/upgrade/restart.yml b/roles/openshift_node/tasks/upgrade/restart.yml index a4fa51172..3f1abceab 100644 --- a/roles/openshift_node/tasks/upgrade/restart.yml +++ b/roles/openshift_node/tasks/upgrade/restart.yml @@ -13,19 +13,15 @@ - name: Reload systemd to ensure latest unit files command: systemctl daemon-reload -- name: Restart docker +- name: Restart container runtime service: - name: "{{ openshift.docker.service_name }}" + name: "{{ openshift_docker_service_name }}" state: started register: docker_start_result until: not docker_start_result | failed retries: 3 delay: 30 -- name: Update docker facts - openshift_facts: - role: docker - - name: Start services service: name={{ item }} state=started with_items: diff --git a/roles/openshift_node/templates/node.service.j2 b/roles/openshift_node/templates/node.service.j2 index 7602d8ee6..da751bd65 100644 --- a/roles/openshift_node/templates/node.service.j2 +++ b/roles/openshift_node/templates/node.service.j2 @@ -1,14 +1,14 @@ [Unit] Description=OpenShift Node -After={{ openshift.docker.service_name }}.service +After={{ openshift_docker_service_name }}.service Wants=openvswitch.service After=ovsdb-server.service After=ovs-vswitchd.service -Wants={{ openshift.docker.service_name }}.service +Wants={{ openshift_docker_service_name }}.service Documentation=https://github.com/openshift/origin Requires=dnsmasq.service After=dnsmasq.service -{% if openshift_use_crio|default(false) %}Wants=cri-o.service{% endif %} +{% if openshift_use_crio %}Wants=cri-o.service{% endif %} [Service] Type=notify diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index d452cc45c..16fdde02e 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -16,7 +16,7 @@ imageConfig: latest: {{ openshift_node_image_config_latest }} kind: NodeConfig kubeletArguments: {{ openshift.node.kubelet_args | default(None) | to_padded_yaml(level=1) }} -{% if openshift_use_crio | default(False) %} +{% if openshift_use_crio %} container-runtime: - remote container-runtime-endpoint: diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service index fa7238849..5964ac095 100644 --- a/roles/openshift_node/templates/openshift.docker.node.dep.service +++ b/roles/openshift_node/templates/openshift.docker.node.dep.service @@ -1,9 +1,9 @@ [Unit] -Requires={{ openshift.docker.service_name }}.service -After={{ openshift.docker.service_name }}.service +Requires={{ openshift_docker_service_name }}.service +After={{ openshift_docker_service_name }}.service PartOf={{ openshift.common.service_type }}-node.service Before={{ openshift.common.service_type }}-node.service -{% if openshift_use_crio|default(false) %}Wants=cri-o.service{% endif %} +{% if openshift_use_crio %}Wants=cri-o.service{% endif %} [Service] ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi" diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service index 561aa01f4..3b33ca542 100644 --- a/roles/openshift_node/templates/openshift.docker.node.service +++ b/roles/openshift_node/templates/openshift.docker.node.service @@ -1,9 +1,9 @@ [Unit] After={{ openshift.common.service_type }}-master.service -After={{ openshift.docker.service_name }}.service +After={{ openshift_docker_service_name }}.service After=openvswitch.service -PartOf={{ openshift.docker.service_name }}.service -Requires={{ openshift.docker.service_name }}.service +PartOf={{ openshift_docker_service_name }}.service +Requires={{ openshift_docker_service_name }}.service {% if openshift_node_use_openshift_sdn %} Wants=openvswitch.service PartOf=openvswitch.service @@ -26,7 +26,7 @@ ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \ --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node \ -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} \ -e HOST=/rootfs -e HOST_ETC=/host-etc \ - -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} \ + -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}:rslave \ -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node \ {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \ -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro \ @@ -48,4 +48,4 @@ Restart=always RestartSec=5s [Install] -WantedBy={{ openshift.docker.service_name }}.service +WantedBy={{ openshift_docker_service_name }}.service diff --git a/roles/openshift_node/templates/openvswitch.docker.service b/roles/openshift_node/templates/openvswitch.docker.service index 34aaaabd6..37f091c76 100644 --- a/roles/openshift_node/templates/openvswitch.docker.service +++ b/roles/openshift_node/templates/openvswitch.docker.service @@ -1,7 +1,7 @@ [Unit] -After={{ openshift.docker.service_name }}.service -Requires={{ openshift.docker.service_name }}.service -PartOf={{ openshift.docker.service_name }}.service +After={{ openshift_docker_service_name }}.service +Requires={{ openshift_docker_service_name }}.service +PartOf={{ openshift_docker_service_name }}.service [Service] EnvironmentFile=/etc/sysconfig/openvswitch @@ -14,4 +14,4 @@ Restart=always RestartSec=5s [Install] -WantedBy={{ openshift.docker.service_name }}.service +WantedBy={{ openshift_docker_service_name }}.service diff --git a/roles/openshift_node_certificates/defaults/main.yml b/roles/openshift_node_certificates/defaults/main.yml index 455f26f30..b42b75be9 100644 --- a/roles/openshift_node_certificates/defaults/main.yml +++ b/roles/openshift_node_certificates/defaults/main.yml @@ -1,3 +1,5 @@ --- openshift_node_cert_expire_days: 730 openshift_ca_host: '' + +openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" diff --git a/roles/openshift_node_certificates/handlers/main.yml b/roles/openshift_node_certificates/handlers/main.yml index ef66bf9ca..0686ac101 100644 --- a/roles/openshift_node_certificates/handlers/main.yml +++ b/roles/openshift_node_certificates/handlers/main.yml @@ -6,7 +6,7 @@ - name: check for container runtime after updating ca trust command: > - systemctl -q is-active {{ openshift.docker.service_name }}.service + systemctl -q is-active {{ openshift_docker_service_name }}.service register: l_docker_installed # An rc of 0 indicates that the container runtime service is # running. We will restart it by notifying the restart handler since @@ -18,7 +18,7 @@ - name: restart container runtime after updating ca trust systemd: - name: "{{ openshift.docker.service_name }}" + name: "{{ openshift_docker_service_name }}" state: restarted when: not openshift_certificates_redeploy | default(false) | bool register: l_docker_restart_docker_in_cert_result diff --git a/roles/openshift_node_facts/tasks/main.yml b/roles/openshift_node_facts/tasks/main.yml index d33d09980..c234a3000 100644 --- a/roles/openshift_node_facts/tasks/main.yml +++ b/roles/openshift_node_facts/tasks/main.yml @@ -15,7 +15,6 @@ kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}" labels: "{{ openshift_node_labels | default(None) }}" registry_url: "{{ oreg_url_node | default(oreg_url) | default(None) }}" - sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}" set_node_ip: "{{ openshift_set_node_ip | default(None) }}" node_image: "{{ osn_image | default(None) }}" diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml index 5f182e0d6..929b76f54 100644 --- a/roles/openshift_openstack/defaults/main.yml +++ b/roles/openshift_openstack/defaults/main.yml @@ -4,11 +4,9 @@ openshift_openstack_stack_state: 'present' openshift_openstack_ssh_ingress_cidr: 0.0.0.0/0 openshift_openstack_node_ingress_cidr: 0.0.0.0/0 openshift_openstack_lb_ingress_cidr: 0.0.0.0/0 -openshift_openstack_bastion_ingress_cidr: 0.0.0.0/0 openshift_openstack_num_etcd: 0 openshift_openstack_num_masters: 1 openshift_openstack_num_nodes: 1 -openshift_openstack_num_dns: 0 openshift_openstack_num_infra: 1 openshift_openstack_dns_nameservers: [] openshift_openstack_nodes_to_remove: [] @@ -45,7 +43,6 @@ openshift_openstack_container_storage_setup: # populate-dns openshift_openstack_dns_records_add: [] -openshift_openstack_external_nsupdate_keys: {} openshift_openstack_full_dns_domain: "{{ (openshift_openstack_clusterid|trim == '') | ternary(openshift_openstack_public_dns_domain, openshift_openstack_clusterid + '.' + openshift_openstack_public_dns_domain) }}" openshift_openstack_app_subdomain: "apps" @@ -60,20 +57,17 @@ openshift_openstack_infra_hostname: infra-node openshift_openstack_node_hostname: app-node openshift_openstack_lb_hostname: lb openshift_openstack_etcd_hostname: etcd -openshift_openstack_dns_hostname: dns openshift_openstack_keypair_name: openshift openshift_openstack_lb_flavor: "{{ openshift_openstack_default_flavor }}" openshift_openstack_etcd_flavor: "{{ openshift_openstack_default_flavor }}" openshift_openstack_master_flavor: "{{ openshift_openstack_default_flavor }}" openshift_openstack_node_flavor: "{{ openshift_openstack_default_flavor }}" openshift_openstack_infra_flavor: "{{ openshift_openstack_default_flavor }}" -openshift_openstack_dns_flavor: "{{ openshift_openstack_default_flavor }}" openshift_openstack_master_image: "{{ openshift_openstack_default_image_name }}" openshift_openstack_infra_image: "{{ openshift_openstack_default_image_name }}" openshift_openstack_node_image: "{{ openshift_openstack_default_image_name }}" openshift_openstack_lb_image: "{{ openshift_openstack_default_image_name }}" openshift_openstack_etcd_image: "{{ openshift_openstack_default_image_name }}" -openshift_openstack_dns_image: "{{ openshift_openstack_default_image_name }}" openshift_openstack_provider_network_name: null openshift_openstack_external_network_name: null openshift_openstack_private_network: >- @@ -89,8 +83,5 @@ openshift_openstack_master_volume_size: "{{ openshift_openstack_docker_volume_si openshift_openstack_infra_volume_size: "{{ openshift_openstack_docker_volume_size }}" openshift_openstack_node_volume_size: "{{ openshift_openstack_docker_volume_size }}" openshift_openstack_etcd_volume_size: 2 -openshift_openstack_dns_volume_size: 1 openshift_openstack_lb_volume_size: 5 -openshift_openstack_use_bastion: false -openshift_openstack_ui_ssh_tunnel: false openshift_openstack_ephemeral_volumes: false diff --git a/roles/openshift_openstack/tasks/check-prerequisites.yml b/roles/openshift_openstack/tasks/check-prerequisites.yml index 57c7238d1..30996cc47 100644 --- a/roles/openshift_openstack/tasks/check-prerequisites.yml +++ b/roles/openshift_openstack/tasks/check-prerequisites.yml @@ -32,10 +32,12 @@ command: python -c "import dns" ignore_errors: yes register: pythondns_result + when: openshift_openstack_external_nsupdate_keys is defined - name: Check if python-dns is installed assert: that: 'pythondns_result.rc == 0' msg: "Python module python-dns is not installed" + when: openshift_openstack_external_nsupdate_keys is defined # Check jinja2 - name: Try to import jinja2 module @@ -85,21 +87,19 @@ msg: "Keypair {{ openshift_openstack_keypair_name }} is not available" # Check that custom images are available -- include: custom_image_check.yaml +- include_tasks: custom_image_check.yaml with_items: - "{{ openshift_openstack_master_image }}" - "{{ openshift_openstack_infra_image }}" - "{{ openshift_openstack_node_image }}" - "{{ openshift_openstack_lb_image }}" - "{{ openshift_openstack_etcd_image }}" - - "{{ openshift_openstack_dns_image }}" # Check that custom flavors are available -- include: custom_flavor_check.yaml +- include_tasks: custom_flavor_check.yaml with_items: - "{{ openshift_openstack_master_flavor }}" - "{{ openshift_openstack_infra_flavor }}" - "{{ openshift_openstack_node_flavor }}" - "{{ openshift_openstack_lb_flavor }}" - "{{ openshift_openstack_etcd_flavor }}" - - "{{ openshift_openstack_dns_flavor }}" diff --git a/roles/openshift_openstack/tasks/hostname.yml b/roles/openshift_openstack/tasks/hostname.yml deleted file mode 100644 index e1a18425f..000000000 --- a/roles/openshift_openstack/tasks/hostname.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- name: Setting Hostname Fact - set_fact: - new_hostname: "{{ custom_hostname | default(inventory_hostname_short) }}" - -- name: Setting FQDN Fact - set_fact: - new_fqdn: "{{ new_hostname }}.{{ openshift_openstack_full_dns_domain }}" - -- name: Setting hostname and DNS domain - hostname: name="{{ new_fqdn }}" - -- name: Check for cloud.cfg - stat: path=/etc/cloud/cloud.cfg - register: cloud_cfg - -- name: Prevent cloud-init updates of hostname/fqdn (if applicable) - lineinfile: - dest: /etc/cloud/cloud.cfg - state: present - regexp: "{{ item.regexp }}" - line: "{{ item.line }}" - with_items: - - { regexp: '^ - set_hostname', line: '# - set_hostname' } - - { regexp: '^ - update_hostname', line: '# - update_hostname' } - when: cloud_cfg.stat.exists == True diff --git a/roles/openshift_openstack/tasks/node-configuration.yml b/roles/openshift_openstack/tasks/node-configuration.yml index 89e58d830..59df2e396 100644 --- a/roles/openshift_openstack/tasks/node-configuration.yml +++ b/roles/openshift_openstack/tasks/node-configuration.yml @@ -4,8 +4,6 @@ msg: "SELinux is required for OpenShift and has been detected as '{{ ansible_selinux.config_mode }}'" when: ansible_selinux.config_mode != "enforcing" -- include: hostname.yml +- include_tasks: container-storage-setup.yml -- include: container-storage-setup.yml - -- include: node-network.yml +- include_tasks: node-network.yml diff --git a/roles/openshift_openstack/tasks/populate-dns.yml b/roles/openshift_openstack/tasks/populate-dns.yml index c03aceb94..eae4967f7 100644 --- a/roles/openshift_openstack/tasks/populate-dns.yml +++ b/roles/openshift_openstack/tasks/populate-dns.yml @@ -30,7 +30,6 @@ nsupdate_key_algorithm_private: "{{ openshift_openstack_external_nsupdate_keys['private']['key_algorithm'] }}" nsupdate_private_key_name: "{{ openshift_openstack_external_nsupdate_keys['private']['key_name']|default('private-' + openshift_openstack_full_dns_domain) }}" when: - - openshift_openstack_external_nsupdate_keys is defined - openshift_openstack_external_nsupdate_keys['private'] is defined @@ -44,6 +43,8 @@ key_secret: "{{ nsupdate_key_secret_private }}" key_algorithm: "{{ nsupdate_key_algorithm_private | lower }}" entries: "{{ private_records }}" + when: + - openshift_openstack_external_nsupdate_keys['private'] is defined - name: "Generate list of public A records" set_fact: @@ -63,15 +64,6 @@ when: - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined - openshift_openstack_num_masters == 1 - - not openshift_openstack_use_bastion|bool - -- name: "Add public master cluster hostname records to the public A records (single master behind a bastion)" - set_fact: - public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': (hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace(openshift_openstack_full_dns_domain, ''))[:-1], 'ip': hostvars[groups.bastions[0]].public_v4 } ] }}" - when: - - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined - - openshift_openstack_num_masters == 1 - - openshift_openstack_use_bastion|bool - name: "Add public master cluster hostname records to the public A records (multi-master)" set_fact: @@ -87,7 +79,6 @@ nsupdate_key_algorithm_public: "{{ openshift_openstack_external_nsupdate_keys['public']['key_algorithm'] }}" nsupdate_public_key_name: "{{ openshift_openstack_external_nsupdate_keys['public']['key_name']|default('public-' + openshift_openstack_full_dns_domain) }}" when: - - openshift_openstack_external_nsupdate_keys is defined - openshift_openstack_external_nsupdate_keys['public'] is defined - name: "Generate the public Add section for DNS" @@ -100,11 +91,13 @@ key_secret: "{{ nsupdate_key_secret_public }}" key_algorithm: "{{ nsupdate_key_algorithm_public | lower }}" entries: "{{ public_records }}" + when: + - openshift_openstack_external_nsupdate_keys['public'] is defined - name: "Generate the final openshift_openstack_dns_records_add" set_fact: - openshift_openstack_dns_records_add: "{{ private_named_records + public_named_records }}" + openshift_openstack_dns_records_add: "{{ private_named_records|default([]) + public_named_records|default([]) }}" - name: "Add DNS A records" @@ -120,7 +113,7 @@ # TODO(shadower): add a cleanup playbook that removes these records, too! state: present with_subelements: - - "{{ openshift_openstack_dns_records_add | default({}) }}" + - "{{ openshift_openstack_dns_records_add | default([]) }}" - entries register: nsupdate_add_result until: nsupdate_add_result|succeeded diff --git a/roles/openshift_openstack/tasks/provision.yml b/roles/openshift_openstack/tasks/provision.yml index dccbe334c..b774bd620 100644 --- a/roles/openshift_openstack/tasks/provision.yml +++ b/roles/openshift_openstack/tasks/provision.yml @@ -1,6 +1,6 @@ --- - name: Generate the templates - include: generate-templates.yml + include_tasks: generate-templates.yml when: - openshift_openstack_stack_state == 'present' @@ -17,7 +17,7 @@ meta: refresh_inventory - name: CleanUp - include: cleanup.yml + include_tasks: cleanup.yml when: - openshift_openstack_stack_state == 'present' diff --git a/roles/openshift_openstack/templates/heat_stack.yaml.j2 b/roles/openshift_openstack/templates/heat_stack.yaml.j2 index 0e7538629..8d13eb81e 100644 --- a/roles/openshift_openstack/templates/heat_stack.yaml.j2 +++ b/roles/openshift_openstack/templates/heat_stack.yaml.j2 @@ -54,25 +54,8 @@ outputs: description: Floating IPs of the nodes value: { get_attr: [ infra_nodes, floating_ip ] } -{% if openshift_openstack_num_dns|int > 0 %} - dns_name: - description: Name of the DNS - value: - get_attr: - - dns - - name - - dns_floating_ips: - description: Floating IPs of the DNS - value: { get_attr: [ dns, floating_ip ] } - - dns_private_ips: - description: Private IPs of the DNS - value: { get_attr: [ dns, private_ip ] } -{% endif %} - conditions: - no_floating: {% if openshift_openstack_provider_network_name or openshift_openstack_use_bastion|bool %}true{% else %}false{% endif %} + no_floating: {% if openshift_openstack_provider_network_name %}true{% else %}false{% endif %} resources: @@ -180,13 +163,6 @@ resources: port_range_min: 22 port_range_max: 22 remote_ip_prefix: {{ openshift_openstack_ssh_ingress_cidr }} -{% if openshift_openstack_use_bastion|bool %} - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: {{ openshift_openstack_bastion_ingress_cidr }} -{% endif %} - direction: ingress protocol: icmp remote_ip_prefix: {{ openshift_openstack_ssh_ingress_cidr }} @@ -443,44 +419,7 @@ resources: port_range_min: 443 port_range_max: 443 -{% if openshift_openstack_num_dns|int > 0 %} - dns-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-dns-secgrp - params: - cluster_id: {{ openshift_openstack_stack_name }} - description: - str_replace: - template: Security group for cluster_id cluster DNS - params: - cluster_id: {{ openshift_openstack_stack_name }} - rules: - - direction: ingress - protocol: udp - port_range_min: 53 - port_range_max: 53 - remote_ip_prefix: {{ openshift_openstack_node_ingress_cidr }} - - direction: ingress - protocol: udp - port_range_min: 53 - port_range_max: 53 - remote_ip_prefix: "{{ openshift_openstack_subnet_prefix }}.0/24" - - direction: ingress - protocol: tcp - port_range_min: 53 - port_range_max: 53 - remote_ip_prefix: {{ openshift_openstack_node_ingress_cidr }} - - direction: ingress - protocol: tcp - port_range_min: 53 - port_range_max: 53 - remote_ip_prefix: "{{ openshift_openstack_subnet_prefix }}.0/24" -{% endif %} - -{% if openshift_openstack_num_masters|int > 1 or openshift_openstack_ui_ssh_tunnel|bool %} +{% if openshift_openstack_num_masters|int > 1 %} lb-secgrp: type: OS::Neutron::SecurityGroup properties: @@ -491,20 +430,13 @@ resources: protocol: tcp port_range_min: {{ openshift_master_api_port | default(8443) }} port_range_max: {{ openshift_master_api_port | default(8443) }} - remote_ip_prefix: {{ openshift_openstack_lb_ingress_cidr | default(openshift_openstack_bastion_ingress_cidr) }} -{% if openshift_openstack_ui_ssh_tunnel|bool %} - - direction: ingress - protocol: tcp - port_range_min: {{ openshift_master_api_port | default(8443) }} - port_range_max: {{ openshift_master_api_port | default(8443) }} - remote_ip_prefix: {{ openshift_openstack_ssh_ingress_cidr }} -{% endif %} + remote_ip_prefix: {{ openshift_openstack_lb_ingress_cidr }} {% if openshift_master_console_port is defined and openshift_master_console_port != openshift_master_api_port %} - direction: ingress protocol: tcp port_range_min: {{ openshift_master_console_port | default(8443) }} port_range_max: {{ openshift_master_console_port | default(8443) }} - remote_ip_prefix: {{ openshift_openstack_lb_ingress_cidr | default(openshift_openstack_bastion_ingress_cidr) }} + remote_ip_prefix: {{ openshift_openstack_lb_ingress_cidr }} {% endif %} {% endif %} @@ -553,7 +485,7 @@ resources: - no_floating - null - {{ openshift_openstack_external_network_name }} -{% if openshift_openstack_use_bastion|bool or openshift_openstack_provider_network_name %} +{% if openshift_openstack_provider_network_name %} attach_float_net: false {% endif %} volume_size: {{ openshift_openstack_etcd_volume_size }} @@ -685,7 +617,7 @@ resources: - no_floating - null - {{ openshift_openstack_external_network_name }} -{% if openshift_openstack_use_bastion|bool or openshift_openstack_provider_network_name %} +{% if openshift_openstack_provider_network_name %} attach_float_net: false {% endif %} volume_size: {{ openshift_openstack_master_volume_size }} @@ -755,7 +687,7 @@ resources: - no_floating - null - {{ openshift_openstack_external_network_name }} -{% if openshift_openstack_use_bastion|bool or openshift_openstack_provider_network_name %} +{% if openshift_openstack_provider_network_name %} attach_float_net: false {% endif %} volume_size: {{ openshift_openstack_node_volume_size }} @@ -818,9 +750,6 @@ resources: {% else %} - { get_resource: node-secgrp } {% endif %} -{% if openshift_openstack_ui_ssh_tunnel|bool and openshift_openstack_num_masters|int < 2 %} - - { get_resource: lb-secgrp } -{% endif %} - { get_resource: infra-secgrp } - { get_resource: common-secgrp } {% if not openshift_openstack_provider_network_name %} @@ -835,54 +764,3 @@ resources: depends_on: - interface {% endif %} - -{% if openshift_openstack_num_dns|int > 0 %} - dns: - type: OS::Heat::ResourceGroup - properties: - count: {{ openshift_openstack_num_dns }} - resource_def: - type: server.yaml - properties: - name: - str_replace: - template: k8s_type-%index%.cluster_id - params: - cluster_id: {{ openshift_openstack_stack_name }} - k8s_type: {{ openshift_openstack_dns_hostname }} - cluster_env: {{ openshift_openstack_public_dns_domain }} - cluster_id: {{ openshift_openstack_stack_name }} - group: - str_replace: - template: k8s_type.cluster_id - params: - k8s_type: dns - cluster_id: {{ openshift_openstack_stack_name }} - type: dns - image: {{ openshift_openstack_dns_image }} - flavor: {{ openshift_openstack_dns_flavor }} - key_name: {{ openshift_openstack_keypair_name }} -{% if openshift_openstack_provider_network_name %} - net: {{ openshift_openstack_provider_network_name }} - net_name: {{ openshift_openstack_provider_network_name }} -{% else %} - net: { get_resource: net } - subnet: { get_resource: subnet } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: {{ openshift_openstack_stack_name }} -{% endif %} - secgrp: - - { get_resource: dns-secgrp } - - { get_resource: common-secgrp } -{% if not openshift_openstack_provider_network_name %} - floating_network: {{ openshift_openstack_external_network_name }} -{% endif %} - volume_size: {{ openshift_openstack_dns_volume_size }} -{% if not openshift_openstack_provider_network_name %} - depends_on: - - interface -{% endif %} -{% endif %} diff --git a/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py b/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py new file mode 100644 index 000000000..eb13a58ba --- /dev/null +++ b/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py @@ -0,0 +1,157 @@ +""" +Ansible action plugin to generate pv and pvc dictionaries lists +""" + +from ansible.plugins.action import ActionBase +from ansible import errors + + +class ActionModule(ActionBase): + """Action plugin to execute health checks.""" + + def get_templated(self, var_to_template): + """Return a properly templated ansible variable""" + return self._templar.template(self.task_vars.get(var_to_template)) + + def build_common(self, varname=None): + """Retrieve common variables for each pv and pvc type""" + volume = self.get_templated(str(varname) + '_volume_name') + size = self.get_templated(str(varname) + '_volume_size') + labels = self.task_vars.get(str(varname) + '_labels') + if labels: + labels = self._templar.template(labels) + else: + labels = dict() + access_modes = self.get_templated(str(varname) + '_access_modes') + return (volume, size, labels, access_modes) + + def build_pv_nfs(self, varname=None): + """Build pv dictionary for nfs storage type""" + host = self.task_vars.get(str(varname) + '_host') + if host: + self._templar.template(host) + elif host is None: + groups = self.task_vars.get('groups') + default_group_name = self.get_templated('openshift_persistent_volumes_default_nfs_group') + if groups and default_group_name and default_group_name in groups and len(groups[default_group_name]) > 0: + host = groups['oo_nfs_to_config'][0] + else: + raise errors.AnsibleModuleError("|failed no storage host detected") + volume, size, labels, access_modes = self.build_common(varname=varname) + directory = self.get_templated(str(varname) + '_nfs_directory') + path = directory + '/' + volume + return dict( + name="{0}-volume".format(volume), + capacity=size, + labels=labels, + access_modes=access_modes, + storage=dict( + nfs=dict( + server=host, + path=path))) + + def build_pv_openstack(self, varname=None): + """Build pv dictionary for openstack storage type""" + volume, size, labels, access_modes = self.build_common(varname=varname) + filesystem = self.get_templated(str(varname) + '_openstack_filesystem') + volume_id = self.get_templated(str(varname) + '_openstack_volumeID') + return dict( + name="{0}-volume".format(volume), + capacity=size, + labels=labels, + access_modes=access_modes, + storage=dict( + cinder=dict( + fsType=filesystem, + volumeID=volume_id))) + + def build_pv_glusterfs(self, varname=None): + """Build pv dictionary for glusterfs storage type""" + volume, size, labels, access_modes = self.build_common(varname=varname) + endpoints = self.get_templated(str(varname) + '_glusterfs_endpoints') + path = self.get_templated(str(varname) + '_glusterfs_path') + read_only = self.get_templated(str(varname) + '_glusterfs_readOnly') + return dict( + name="{0}-volume".format(volume), + capacity=size, + labels=labels, + access_modes=access_modes, + storage=dict( + glusterfs=dict( + endpoints=endpoints, + path=path, + readOnly=read_only))) + + def build_pv_dict(self, varname=None): + """Check for the existence of PV variables""" + kind = self.task_vars.get(str(varname) + '_kind') + if kind: + kind = self._templar.template(kind) + create_pv = self.task_vars.get(str(varname) + '_create_pv') + if create_pv and self._templar.template(create_pv): + if kind == 'nfs': + return self.build_pv_nfs(varname=varname) + + elif kind == 'openstack': + return self.build_pv_openstack(varname=varname) + + elif kind == 'glusterfs': + return self.build_pv_glusterfs(varname=varname) + + elif not (kind == 'object' or kind == 'dynamic'): + msg = "|failed invalid storage kind '{0}' for component '{1}'".format( + kind, + varname) + raise errors.AnsibleModuleError(msg) + return None + + def build_pvc_dict(self, varname=None): + """Check for the existence of PVC variables""" + kind = self.task_vars.get(str(varname) + '_kind') + if kind: + kind = self._templar.template(kind) + create_pv = self.task_vars.get(str(varname) + '_create_pv') + if create_pv: + create_pv = self._templar.template(create_pv) + create_pvc = self.task_vars.get(str(varname) + '_create_pvc') + if create_pvc: + create_pvc = self._templar.template(create_pvc) + if kind != 'object' and create_pv and create_pvc: + volume, size, _, access_modes = self.build_common(varname=varname) + return dict( + name="{0}-claim".format(volume), + capacity=size, + access_modes=access_modes) + return None + + def run(self, tmp=None, task_vars=None): + """Run generate_pv_pvcs_list action plugin""" + result = super(ActionModule, self).run(tmp, task_vars) + # Ignore settting self.task_vars outside of init. + # pylint: disable=W0201 + self.task_vars = task_vars or {} + + result["changed"] = False + result["failed"] = False + result["msg"] = "persistent_volumes list and persistent_volume_claims list created" + vars_to_check = ['openshift_hosted_registry_storage', + 'openshift_hosted_router_storage', + 'openshift_hosted_etcd_storage', + 'openshift_logging_storage', + 'openshift_loggingops_storage', + 'openshift_metrics_storage', + 'openshift_prometheus_storage', + 'openshift_prometheus_alertmanager_storage', + 'openshift_prometheus_alertbuffer_storage'] + persistent_volumes = [] + persistent_volume_claims = [] + for varname in vars_to_check: + pv_dict = self.build_pv_dict(varname) + if pv_dict: + persistent_volumes.append(pv_dict) + pvc_dict = self.build_pvc_dict(varname) + if pvc_dict: + persistent_volume_claims.append(pvc_dict) + result["persistent_volumes"] = persistent_volumes + result["persistent_volume_claims"] = persistent_volume_claims + return result diff --git a/roles/openshift_persistent_volumes/defaults/main.yml b/roles/openshift_persistent_volumes/defaults/main.yml new file mode 100644 index 000000000..b16e164e6 --- /dev/null +++ b/roles/openshift_persistent_volumes/defaults/main.yml @@ -0,0 +1,9 @@ +--- + +openshift_persistent_volumes_default_nfs_group: 'oo_nfs_to_config' + +openshift_persistent_volume_extras: [] +openshift_persistent_volume_claims_extras: [] + +glusterfs_pv: [] +glusterfs_pvc: [] diff --git a/roles/openshift_persistent_volumes/meta/main.yml b/roles/openshift_persistent_volumes/meta/main.yml index 19e9a56b7..48b0699ab 100644 --- a/roles/openshift_persistent_volumes/meta/main.yml +++ b/roles/openshift_persistent_volumes/meta/main.yml @@ -9,4 +9,5 @@ galaxy_info: - name: EL versions: - 7 -dependencies: {} +dependencies: +- role: openshift_facts diff --git a/roles/openshift_persistent_volumes/tasks/main.yml b/roles/openshift_persistent_volumes/tasks/main.yml index e431e978c..0b4dd7d1f 100644 --- a/roles/openshift_persistent_volumes/tasks/main.yml +++ b/roles/openshift_persistent_volumes/tasks/main.yml @@ -9,39 +9,36 @@ cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig changed_when: False -- name: Deploy PersistentVolume definitions - template: - dest: "{{ mktemp.stdout }}/persistent-volumes.yml" - src: persistent-volume.yml.j2 - when: persistent_volumes | length > 0 - changed_when: False +- set_fact: + glusterfs_pv: + - name: "{{ openshift_hosted_registry_storage_volume_name }}-glusterfs-volume" + capacity: "{{ openshift_hosted_registry_storage_volume_size }}" + access_modes: "{{ openshift_hosted_registry_storage_access_modes }}" + storage: + glusterfs: + endpoints: "{{ openshift_hosted_registry_storage_glusterfs_endpoints }}" + path: "{{ openshift_hosted_registry_storage_glusterfs_path }}" + readOnly: "{{ openshift_hosted_registry_storage_glusterfs_readOnly }}" + glusterfs_pvc: + - name: "{{ openshift_hosted_registry_storage_volume_name }}-glusterfs-claim" + capacity: "{{ openshift_hosted_registry_storage_volume_size }}" + access_modes: "{{ openshift_hosted_registry_storage_access_modes }}" + when: openshift_hosted_registry_storage_glusterfs_swap | default(False) -- name: Create PersistentVolumes - command: > - {{ openshift.common.client_binary }} create - -f {{ mktemp.stdout }}/persistent-volumes.yml - --config={{ mktemp.stdout }}/admin.kubeconfig - register: pv_create_output - when: persistent_volumes | length > 0 - failed_when: ('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout) - changed_when: ('created' in pv_create_output.stdout) +- name: create standard pv and pvc lists + # generate_pv_pvcs_list is a custom action module defined in ../action_plugins + generate_pv_pvcs_list: {} + register: l_pv_pvcs_list -- name: Deploy PersistentVolumeClaim definitions - template: - dest: "{{ mktemp.stdout }}/persistent-volume-claims.yml" - src: persistent-volume-claim.yml.j2 - when: persistent_volume_claims | length > 0 - changed_when: False +- include_tasks: pv.yml + vars: + l_extra_persistent_volumes: "{{ openshift_persistent_volume_extras | union(glusterfs_pv) }}" + persistent_volumes: "{{ l_pv_pvcs_list.persistent_volumes | union(l_extra_persistent_volumes) }}" -- name: Create PersistentVolumeClaims - command: > - {{ openshift.common.client_binary }} create - -f {{ mktemp.stdout }}/persistent-volume-claims.yml - --config={{ mktemp.stdout }}/admin.kubeconfig - register: pvc_create_output - when: persistent_volume_claims | length > 0 - failed_when: ('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout) - changed_when: ('created' in pvc_create_output.stdout) +- include_tasks: pvc.yml + vars: + l_extra_persistent_volume_claims: "{{ openshift_persistent_volume_claims_extras | union(glusterfs_pvc) }}" + persistent_volume_claims: "{{ l_pv_pvcs_list.persistent_volume_claims | union(l_extra_persistent_volume_claims) }}" - name: Delete temp directory file: diff --git a/roles/openshift_persistent_volumes/tasks/pv.yml b/roles/openshift_persistent_volumes/tasks/pv.yml new file mode 100644 index 000000000..346605ff7 --- /dev/null +++ b/roles/openshift_persistent_volumes/tasks/pv.yml @@ -0,0 +1,17 @@ +--- +- name: Deploy PersistentVolume definitions + template: + dest: "{{ mktemp.stdout }}/persistent-volumes.yml" + src: persistent-volume.yml.j2 + when: persistent_volumes | length > 0 + changed_when: False + +- name: Create PersistentVolumes + command: > + {{ openshift.common.client_binary }} create + -f {{ mktemp.stdout }}/persistent-volumes.yml + --config={{ mktemp.stdout }}/admin.kubeconfig + register: pv_create_output + when: persistent_volumes | length > 0 + failed_when: ('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout) + changed_when: ('created' in pv_create_output.stdout) diff --git a/roles/openshift_persistent_volumes/tasks/pvc.yml b/roles/openshift_persistent_volumes/tasks/pvc.yml new file mode 100644 index 000000000..e44f9b18f --- /dev/null +++ b/roles/openshift_persistent_volumes/tasks/pvc.yml @@ -0,0 +1,17 @@ +--- +- name: Deploy PersistentVolumeClaim definitions + template: + dest: "{{ mktemp.stdout }}/persistent-volume-claims.yml" + src: persistent-volume-claim.yml.j2 + when: persistent_volume_claims | length > 0 + changed_when: False + +- name: Create PersistentVolumeClaims + command: > + {{ openshift.common.client_binary }} create + -f {{ mktemp.stdout }}/persistent-volume-claims.yml + --config={{ mktemp.stdout }}/admin.kubeconfig + register: pvc_create_output + when: persistent_volume_claims | length > 0 + failed_when: ('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout) + changed_when: ('created' in pvc_create_output.stdout) diff --git a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 index ee9dac7cb..9ec14208b 100644 --- a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 +++ b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 @@ -17,5 +17,5 @@ items: capacity: storage: "{{ volume.capacity }}" accessModes: {{ volume.access_modes | to_padded_yaml(2, 2) }} - {{ volume.storage.keys()[0] }}: {{ volume.storage[volume.storage.keys()[0]] | to_padded_yaml(3, 2) }} + {{ (volume.storage.keys() | list)[0] }}: {{ volume.storage[(volume.storage.keys() | list)[0]] | to_padded_yaml(3, 2) }} {% endfor %} diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml index 814d6ff28..b7b3c0db2 100644 --- a/roles/openshift_storage_glusterfs/defaults/main.yml +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -45,7 +45,7 @@ openshift_storage_glusterfs_heketi_fstab: "{{ '/var/lib/heketi/fstab' | quote if openshift_storage_glusterfs_namespace: "{{ 'glusterfs' | quote if openshift_storage_glusterfs_is_native or openshift_storage_glusterfs_heketi_is_native else 'default' | quote }}" openshift_storage_glusterfs_registry_timeout: "{{ openshift_storage_glusterfs_timeout }}" -openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.namespace | default(openshift_storage_glusterfs_namespace) }}" +openshift_storage_glusterfs_registry_namespace: "{{ openshift_hosted_registry_namespace | default(openshift_storage_glusterfs_namespace) }}" openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}" openshift_storage_glusterfs_registry_name: 'registry' openshift_storage_glusterfs_registry_nodeselector: "glusterfs={{ openshift_storage_glusterfs_registry_name }}-host" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml index 4b33e91b4..315bc5614 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml @@ -82,7 +82,7 @@ retries: "{{ (glusterfs_timeout | int / 10) | int }}" when: glusterfs_heketi_wipe -- include: glusterfs_deploy.yml +- include_tasks: glusterfs_deploy.yml when: glusterfs_is_native - name: Create heketi service account @@ -212,7 +212,7 @@ when: - glusterfs_heketi_is_native -- include: heketi_deploy_part1.yml +- include_tasks: heketi_deploy_part1.yml when: - glusterfs_heketi_is_native - glusterfs_heketi_deploy_is_missing @@ -256,7 +256,7 @@ when: - glusterfs_heketi_topology_load -- include: heketi_deploy_part2.yml +- include_tasks: heketi_deploy_part2.yml when: - glusterfs_heketi_is_native - glusterfs_heketi_is_missing @@ -312,8 +312,8 @@ when: - glusterfs_storageclass or glusterfs_s3_deploy -- include: glusterblock_deploy.yml +- include_tasks: glusterblock_deploy.yml when: glusterfs_block_deploy -- include: gluster_s3_deploy.yml +- include_tasks: gluster_s3_deploy.yml when: glusterfs_s3_deploy diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml index 71c1311cd..73b9791eb 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml @@ -46,4 +46,4 @@ glusterfs_heketi_fstab: "{{ openshift_storage_glusterfs_heketi_fstab }}" glusterfs_nodes: "{{ groups.glusterfs | default([]) }}" -- include: glusterfs_common.yml +- include_tasks: glusterfs_common.yml diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml index d3cba61cf..7466702b8 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml @@ -46,7 +46,7 @@ glusterfs_heketi_fstab: "{{ openshift_storage_glusterfs_registry_heketi_fstab }}" glusterfs_nodes: "{% if groups.glusterfs_registry is defined %}{% set nodes = groups.glusterfs_registry %}{% elif 'groups.glusterfs' is defined %}{% set nodes = groups.glusterfs %}{% else %}{% set nodes = '[]' %}{% endif %}{{ nodes }}" -- include: glusterfs_common.yml +- include_tasks: glusterfs_common.yml when: - glusterfs_nodes | default([]) | count > 0 - "'glusterfs' not in groups or glusterfs_nodes != groups.glusterfs" @@ -56,5 +56,5 @@ register: registry_volume - name: Create GlusterFS registry volume - command: "{{ glusterfs_heketi_client }} volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}" - when: "openshift.hosted.registry.storage.glusterfs.path not in registry_volume.stdout" + command: "{{ glusterfs_heketi_client }} volume create --size={{ openshift_hosted_registry_storage_volume_size | replace('Gi','') }} --name={{ openshift_hosted_registry_storage_glusterfs_path }}" + when: "openshift_hosted_registry_storage_glusterfs_path not in registry_volume.stdout" diff --git a/roles/openshift_storage_glusterfs/tasks/main.yml b/roles/openshift_storage_glusterfs/tasks/main.yml index d2d8c6c10..b48bfc88e 100644 --- a/roles/openshift_storage_glusterfs/tasks/main.yml +++ b/roles/openshift_storage_glusterfs/tasks/main.yml @@ -5,13 +5,15 @@ changed_when: False check_mode: no -- include: glusterfs_config.yml +- include_tasks: glusterfs_config.yml when: - groups.glusterfs | default([]) | count > 0 -- include: glusterfs_registry.yml - when: - - "groups.glusterfs_registry | default([]) | count > 0 or openshift.hosted.registry.storage.kind == 'glusterfs' or openshift.hosted.registry.storage.glusterfs.swap" +- include_tasks: glusterfs_registry.yml + when: > + groups.glusterfs_registry | default([]) | count > 0 + or (openshift_hosted_registry_storage_kind | default(none) == 'glusterfs') + or (openshift_hosted_registry_storage_glusterfs_swap | default(False)) - name: Delete temp directory file: diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml index c25cad74c..55e4024ec 100644 --- a/roles/openshift_storage_nfs/tasks/main.yml +++ b/roles/openshift_storage_nfs/tasks/main.yml @@ -20,25 +20,25 @@ - name: Ensure exports directory exists file: - path: "{{ openshift.hosted.registry.storage.nfs.directory }}" + path: "{{ openshift_hosted_registry_storage_nfs_directory }}" state: directory - name: Ensure export directories exist file: - path: "{{ item.storage.nfs.directory }}/{{ item.storage.volume.name }}" + path: "{{ item }}" state: directory mode: 0777 owner: nfsnobody group: nfsnobody with_items: - - "{{ openshift.hosted.registry }}" - - "{{ openshift.metrics }}" - - "{{ openshift.logging }}" - - "{{ openshift.loggingops }}" - - "{{ openshift.hosted.etcd }}" - - "{{ openshift.prometheus }}" - - "{{ openshift.prometheus.alertmanager }}" - - "{{ openshift.prometheus.alertbuffer }}" + - "{{ openshift_hosted_registry_storage_nfs_directory }}/{{ openshift_hosted_registry_storage_volume_name }}" + - "{{ openshift_metrics_storage_nfs_directory }}/{{ openshift_metrics_storage_volume_name }}" + - "{{ openshift_logging_storage_nfs_directory }}/{{ openshift_logging_storage_volume_name }}" + - "{{ openshift_loggingops_storage_nfs_directory }}/{{ openshift_loggingops_storage_volume_name }}" + - "{{ openshift_hosted_etcd_storage_nfs_directory }}/{{ openshift_hosted_etcd_storage_volume_name }}" + - "{{ openshift_prometheus_storage_nfs_directory }}/{{ openshift_prometheus_storage_volume_name }}" + - "{{ openshift_prometheus_alertmanager_storage_nfs_directory }}/{{ openshift_prometheus_alertmanager_storage_volume_name }}" + - "{{ openshift_prometheus_alertbuffer_storage_nfs_directory }}/{{ openshift_prometheus_alertbuffer_storage_volume_name }}" - name: Configure exports template: diff --git a/roles/openshift_storage_nfs/templates/exports.j2 b/roles/openshift_storage_nfs/templates/exports.j2 index c2a741035..2ec8db019 100644 --- a/roles/openshift_storage_nfs/templates/exports.j2 +++ b/roles/openshift_storage_nfs/templates/exports.j2 @@ -1,8 +1,8 @@ -{{ openshift.hosted.registry.storage.nfs.directory }}/{{ openshift.hosted.registry.storage.volume.name }} {{ openshift.hosted.registry.storage.nfs.options }} -{{ openshift.metrics.storage.nfs.directory }}/{{ openshift.metrics.storage.volume.name }} {{ openshift.metrics.storage.nfs.options }} -{{ openshift.logging.storage.nfs.directory }}/{{ openshift.logging.storage.volume.name }} {{ openshift.logging.storage.nfs.options }} -{{ openshift.loggingops.storage.nfs.directory }}/{{ openshift.loggingops.storage.volume.name }} {{ openshift.loggingops.storage.nfs.options }} -{{ openshift.hosted.etcd.storage.nfs.directory }}/{{ openshift.hosted.etcd.storage.volume.name }} {{ openshift.hosted.etcd.storage.nfs.options }} -{{ openshift.prometheus.storage.nfs.directory }}/{{ openshift.prometheus.storage.volume.name }} {{ openshift.prometheus.storage.nfs.options }} -{{ openshift.prometheus.alertmanager.storage.nfs.directory }}/{{ openshift.prometheus.alertmanager.storage.volume.name }} {{ openshift.prometheus.alertmanager.storage.nfs.options }} -{{ openshift.prometheus.alertbuffer.storage.nfs.directory }}/{{ openshift.prometheus.alertbuffer.storage.volume.name }} {{ openshift.prometheus.alertbuffer.storage.nfs.options }} +{{ openshift_hosted_registry_storage_nfs_directory }}/{{ openshift_hosted_registry_storage_volume_name }} {{ openshift_hosted_registry_storage_nfs_options }} +{{ openshift_metrics_storage_nfs_directory }}/{{ openshift_metrics_storage_volume_name }} {{ openshift_metrics_storage_nfs_options }} +{{ openshift_logging_storage_nfs_directory }}/{{ openshift_logging_storage_volume_name }} {{ openshift_logging_storage_nfs_options }} +{{ openshift_loggingops_storage_nfs_directory }}/{{ openshift_loggingops_storage_volume_name }} {{ openshift_loggingops_storage_nfs_options }} +{{ openshift_hosted_etcd_storage_nfs_directory }}/{{ openshift_hosted_etcd_storage_volume_name }} {{ openshift_hosted_etcd_storage_nfs_options }} +{{ openshift_prometheus_storage_nfs_directory }}/{{ openshift_prometheus_storage_volume_name }} {{ openshift_prometheus_storage_nfs_options }} +{{ openshift_prometheus_alertmanager_storage_nfs_directory }}/{{ openshift_prometheus_alertmanager_storage_volume_name }} {{ openshift_prometheus_alertmanager_storage_nfs_options }} +{{ openshift_prometheus_alertbuffer_storage_nfs_directory }}/{{ openshift_prometheus_alertbuffer_storage_volume_name }} {{ openshift_prometheus_alertbuffer_storage_nfs_options }} diff --git a/roles/openshift_storage_nfs_lvm/tasks/main.yml b/roles/openshift_storage_nfs_lvm/tasks/main.yml index 49dd657b5..c8e7b6d7c 100644 --- a/roles/openshift_storage_nfs_lvm/tasks/main.yml +++ b/roles/openshift_storage_nfs_lvm/tasks/main.yml @@ -20,7 +20,7 @@ file: path={{osnl_mount_dir}}/{{ item }} owner=nfsnobody group=nfsnobody mode=0700 with_sequence: start={{osnl_volume_num_start}} count={{osnl_number_of_volumes}} format={{osnl_volume_prefix}}{{osnl_volume_size}}g%04d -- include: nfs.yml +- include_tasks: nfs.yml - name: Create volume json file template: src=../templates/nfs.json.j2 dest=/root/persistent-volume.{{ item }}.json diff --git a/roles/openshift_version/meta/main.yml b/roles/openshift_version/meta/main.yml index 38b398343..5d7683120 100644 --- a/roles/openshift_version/meta/main.yml +++ b/roles/openshift_version/meta/main.yml @@ -12,7 +12,4 @@ galaxy_info: categories: - cloud dependencies: -- role: openshift_docker_facts -- role: docker - when: openshift.common.is_containerized | default(False) | bool and not skip_docker_role | default(False) | bool - role: lib_utils diff --git a/roles/openshift_version/tasks/set_version_containerized.yml b/roles/openshift_version/tasks/set_version_containerized.yml index 574e89899..71f957b78 100644 --- a/roles/openshift_version/tasks/set_version_containerized.yml +++ b/roles/openshift_version/tasks/set_version_containerized.yml @@ -1,7 +1,4 @@ --- -- set_fact: - l_use_crio_only: "{{ openshift_use_crio_only | default(false) }}" - - name: Set containerized version to configure if openshift_image_tag specified set_fact: # Expects a leading "v" in inventory, strip it off here unless @@ -24,7 +21,7 @@ register: cli_image_version when: - openshift_version is not defined - - not l_use_crio_only + - not openshift_use_crio_only # Origin latest = pre-release version (i.e. v1.3.0-alpha.1-321-gb095e3a) - set_fact: @@ -33,7 +30,7 @@ - openshift_version is not defined - openshift.common.deployment_type == 'origin' - cli_image_version.stdout_lines[0].split('-') | length > 1 - - not l_use_crio_only + - not openshift_use_crio_only - set_fact: openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}" @@ -48,14 +45,14 @@ when: - openshift_version is defined - openshift_version.split('.') | length == 2 - - not l_use_crio_only + - not openshift_use_crio_only - set_fact: openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0:2][1:] | join('-') if openshift.common.deployment_type == 'origin' else cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}" when: - openshift_version is defined - openshift_version.split('.') | length == 2 - - not l_use_crio_only + - not openshift_use_crio_only # TODO: figure out a way to check for the openshift_version when using CRI-O. # We should do that using the images in the ostree storage so we don't have diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml index 9ca49b569..f83cf9157 100644 --- a/roles/rhel_subscribe/tasks/main.yml +++ b/roles/rhel_subscribe/tasks/main.yml @@ -69,7 +69,7 @@ until: subscribe_pool | succeeded when: openshift_pool_id.stdout != '' -- include: enterprise.yml +- include_tasks: enterprise.yml when: - deployment_type == 'openshift-enterprise' - not ostree_booted.stat.exists | bool diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index 1e2af2c61..dda8eb4c6 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -125,7 +125,6 @@ def write_inventory_vars(base_inventory, lb): base_inventory.write('openshift_override_hostname_check=true\n') if lb is not None: - base_inventory.write('openshift_master_cluster_method=native\n') base_inventory.write("openshift_master_cluster_hostname={}\n".format(lb.hostname)) base_inventory.write( "openshift_master_cluster_public_hostname={}\n".format(lb.public_hostname)) @@ -266,7 +265,6 @@ def default_facts(hosts, verbose=False): facts_env = os.environ.copy() facts_env["OO_INSTALL_CALLBACK_FACTS_YAML"] = CFG.settings['ansible_callback_facts_yaml'] facts_env["ANSIBLE_CALLBACK_PLUGINS"] = CFG.settings['ansible_plugins_directory'] - facts_env["OPENSHIFT_MASTER_CLUSTER_METHOD"] = 'native' if 'ansible_log_path' in CFG.settings: facts_env["ANSIBLE_LOG_PATH"] = CFG.settings['ansible_log_path'] if 'ansible_config' in CFG.settings: |