diff options
83 files changed, 443 insertions, 1292 deletions
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index f9564499d..45795d097 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -710,229 +710,6 @@ def oo_openshift_env(hostvars): return facts -# pylint: disable=too-many-branches, too-many-nested-blocks, too-many-statements, too-many-locals -def oo_component_persistent_volumes(hostvars, groups, component, subcomponent=None): - """ Generate list of persistent volumes based on oo_openshift_env - storage options set in host variables for a specific component. - """ - if not issubclass(type(hostvars), dict): - raise errors.AnsibleFilterError("|failed expects hostvars is a dict") - if not issubclass(type(groups), dict): - raise errors.AnsibleFilterError("|failed expects groups is a dict") - - persistent_volume = None - - if component in hostvars['openshift']: - if subcomponent is not None: - storage_component = hostvars['openshift'][component][subcomponent] - else: - storage_component = hostvars['openshift'][component] - - if 'storage' in storage_component: - params = storage_component['storage'] - kind = params['kind'] - if 'create_pv' in params: - create_pv = params['create_pv'] - if kind is not None and create_pv: - if kind == 'nfs': - host = params['host'] - if host is None: - if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0: - host = groups['oo_nfs_to_config'][0] - else: - raise errors.AnsibleFilterError("|failed no storage host detected") - directory = params['nfs']['directory'] - volume = params['volume']['name'] - path = directory + '/' + volume - size = params['volume']['size'] - if 'labels' in params: - labels = params['labels'] - else: - labels = dict() - access_modes = params['access']['modes'] - persistent_volume = dict( - name="{0}-volume".format(volume), - capacity=size, - labels=labels, - access_modes=access_modes, - storage=dict( - nfs=dict( - server=host, - path=path))) - - elif kind == 'openstack': - volume = params['volume']['name'] - size = params['volume']['size'] - if 'labels' in params: - labels = params['labels'] - else: - labels = dict() - access_modes = params['access']['modes'] - filesystem = params['openstack']['filesystem'] - volume_id = params['openstack']['volumeID'] - persistent_volume = dict( - name="{0}-volume".format(volume), - capacity=size, - labels=labels, - access_modes=access_modes, - storage=dict( - cinder=dict( - fsType=filesystem, - volumeID=volume_id))) - - elif kind == 'glusterfs': - volume = params['volume']['name'] - size = params['volume']['size'] - if 'labels' in params: - labels = params['labels'] - else: - labels = dict() - access_modes = params['access']['modes'] - endpoints = params['glusterfs']['endpoints'] - path = params['glusterfs']['path'] - read_only = params['glusterfs']['readOnly'] - persistent_volume = dict( - name="{0}-volume".format(volume), - capacity=size, - labels=labels, - access_modes=access_modes, - storage=dict( - glusterfs=dict( - endpoints=endpoints, - path=path, - readOnly=read_only))) - - elif not (kind == 'object' or kind == 'dynamic'): - msg = "|failed invalid storage kind '{0}' for component '{1}'".format( - kind, - component) - raise errors.AnsibleFilterError(msg) - return persistent_volume - - -# pylint: disable=too-many-branches, too-many-nested-blocks, too-many-statements -def oo_persistent_volumes(hostvars, groups, persistent_volumes=None): - """ Generate list of persistent volumes based on oo_openshift_env - storage options set in host variables. - """ - if not issubclass(type(hostvars), dict): - raise errors.AnsibleFilterError("|failed expects hostvars is a dict") - if not issubclass(type(groups), dict): - raise errors.AnsibleFilterError("|failed expects groups is a dict") - if persistent_volumes is not None and not issubclass(type(persistent_volumes), list): - raise errors.AnsibleFilterError("|failed expects persistent_volumes is a list") - - if persistent_volumes is None: - persistent_volumes = [] - if 'hosted' in hostvars['openshift']: - for component in hostvars['openshift']['hosted']: - persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'hosted', component) - if persistent_volume is not None: - persistent_volumes.append(persistent_volume) - - if 'logging' in hostvars['openshift']: - persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'logging') - if persistent_volume is not None: - persistent_volumes.append(persistent_volume) - if 'loggingops' in hostvars['openshift']: - persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'loggingops') - if persistent_volume is not None: - persistent_volumes.append(persistent_volume) - if 'metrics' in hostvars['openshift']: - persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'metrics') - if persistent_volume is not None: - persistent_volumes.append(persistent_volume) - if 'prometheus' in hostvars['openshift']: - persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'prometheus') - if persistent_volume is not None: - persistent_volumes.append(persistent_volume) - if 'alertmanager' in hostvars['openshift']['prometheus']: - persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'prometheus', 'alertmanager') - if persistent_volume is not None: - persistent_volumes.append(persistent_volume) - if 'alertbuffer' in hostvars['openshift']['prometheus']: - persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'prometheus', 'alertbuffer') - if persistent_volume is not None: - persistent_volumes.append(persistent_volume) - return persistent_volumes - - -def oo_component_pv_claims(hostvars, component, subcomponent=None): - """ Generate list of persistent volume claims based on oo_openshift_env - storage options set in host variables for a speicific component. - """ - if not issubclass(type(hostvars), dict): - raise errors.AnsibleFilterError("|failed expects hostvars is a dict") - - if component in hostvars['openshift']: - if subcomponent is not None: - storage_component = hostvars['openshift'][component][subcomponent] - else: - storage_component = hostvars['openshift'][component] - - if 'storage' in storage_component: - params = storage_component['storage'] - kind = params['kind'] - if 'create_pv' in params: - if 'create_pvc' in params: - create_pv = params['create_pv'] - create_pvc = params['create_pvc'] - if kind not in [None, 'object'] and create_pv and create_pvc: - volume = params['volume']['name'] - size = params['volume']['size'] - access_modes = params['access']['modes'] - persistent_volume_claim = dict( - name="{0}-claim".format(volume), - capacity=size, - access_modes=access_modes) - return persistent_volume_claim - return None - - -def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None): - """ Generate list of persistent volume claims based on oo_openshift_env - storage options set in host variables. - """ - if not issubclass(type(hostvars), dict): - raise errors.AnsibleFilterError("|failed expects hostvars is a dict") - if persistent_volume_claims is not None and not issubclass(type(persistent_volume_claims), list): - raise errors.AnsibleFilterError("|failed expects persistent_volume_claims is a list") - - if persistent_volume_claims is None: - persistent_volume_claims = [] - if 'hosted' in hostvars['openshift']: - for component in hostvars['openshift']['hosted']: - persistent_volume_claim = oo_component_pv_claims(hostvars, 'hosted', component) - if persistent_volume_claim is not None: - persistent_volume_claims.append(persistent_volume_claim) - - if 'logging' in hostvars['openshift']: - persistent_volume_claim = oo_component_pv_claims(hostvars, 'logging') - if persistent_volume_claim is not None: - persistent_volume_claims.append(persistent_volume_claim) - if 'loggingops' in hostvars['openshift']: - persistent_volume_claim = oo_component_pv_claims(hostvars, 'loggingops') - if persistent_volume_claim is not None: - persistent_volume_claims.append(persistent_volume_claim) - if 'metrics' in hostvars['openshift']: - persistent_volume_claim = oo_component_pv_claims(hostvars, 'metrics') - if persistent_volume_claim is not None: - persistent_volume_claims.append(persistent_volume_claim) - if 'prometheus' in hostvars['openshift']: - persistent_volume_claim = oo_component_pv_claims(hostvars, 'prometheus') - if persistent_volume_claim is not None: - persistent_volume_claims.append(persistent_volume_claim) - if 'alertmanager' in hostvars['openshift']['prometheus']: - persistent_volume_claim = oo_component_pv_claims(hostvars, 'prometheus', 'alertmanager') - if persistent_volume_claim is not None: - persistent_volume_claims.append(persistent_volume_claim) - if 'alertbuffer' in hostvars['openshift']['prometheus']: - persistent_volume_claim = oo_component_pv_claims(hostvars, 'prometheus', 'alertbuffer') - if persistent_volume_claim is not None: - persistent_volume_claims.append(persistent_volume_claim) - return persistent_volume_claims - - def oo_31_rpm_rename_conversion(rpms, openshift_version=None): """ Filters a list of 3.0 rpms and return the corresponding 3.1 rpms names with proper version (if provided) @@ -1220,8 +997,6 @@ class FilterModule(object): "oo_generate_secret": oo_generate_secret, "oo_nodes_with_label": oo_nodes_with_label, "oo_openshift_env": oo_openshift_env, - "oo_persistent_volumes": oo_persistent_volumes, - "oo_persistent_volume_claims": oo_persistent_volume_claims, "oo_31_rpm_rename_conversion": oo_31_rpm_rename_conversion, "oo_pods_match_component": oo_pods_match_component, "oo_get_hosts_from_hostvars": oo_get_hosts_from_hostvars, diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example index 3a9944ba4..e3b56d7a1 100644 --- a/inventory/byo/hosts.example +++ b/inventory/byo/hosts.example @@ -298,24 +298,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Set cockpit plugins #osm_cockpit_plugins=['cockpit-kubernetes'] -# Native high availability cluster method with optional load balancer. +# Native high availability (default cluster method) # If no lb group is defined, the installer assumes that a load balancer has # been preconfigured. For installation the value of # openshift_master_cluster_hostname must resolve to the load balancer # or to one or all of the masters defined in the inventory if no load # balancer is present. -#openshift_master_cluster_method=native -#openshift_master_cluster_hostname=openshift-ansible.test.example.com -#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com - -# Pacemaker high availability cluster method. -# Pacemaker HA environment must be able to self provision the -# configured VIP. For installation openshift_master_cluster_hostname -# must resolve to the configured VIP. -#openshift_master_cluster_method=pacemaker -#openshift_master_cluster_password=openshift_cluster -#openshift_master_cluster_vip=192.168.133.25 -#openshift_master_cluster_public_vip=192.168.133.25 #openshift_master_cluster_hostname=openshift-ansible.test.example.com #openshift_master_cluster_public_hostname=openshift-ansible.test.example.com diff --git a/playbooks/adhoc/openshift_hosted_logging_efk.yaml b/playbooks/adhoc/openshift_hosted_logging_efk.yaml index 44a2ef534..69b2541bb 100644 --- a/playbooks/adhoc/openshift_hosted_logging_efk.yaml +++ b/playbooks/adhoc/openshift_hosted_logging_efk.yaml @@ -8,7 +8,7 @@ hosts: masters:!masters[0] pre_tasks: - set_fact: - openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" + openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ openshift_master_default_subdomain }}" tasks: - include_role: name: openshift_logging diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 0c2a2c7e8..ed7a7bd1a 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -62,7 +62,6 @@ - origin-master - origin-master-api - origin-master-controllers - - pcsd failed_when: false - hosts: etcd @@ -384,8 +383,6 @@ - origin-excluder - origin-docker-excluder - origin-master - - pacemaker - - pcs register: result until: result | success @@ -456,8 +453,6 @@ - /etc/sysconfig/origin-master-api - /etc/sysconfig/origin-master-controllers - /usr/share/openshift/examples - - /var/lib/pacemaker - - /var/lib/pcsd - /usr/lib/systemd/system/atomic-openshift-master-api.service - /usr/lib/systemd/system/atomic-openshift-master-controllers.service - /usr/lib/systemd/system/origin-master-api.service diff --git a/playbooks/aws/openshift-cluster/hosted.yml b/playbooks/aws/openshift-cluster/hosted.yml index da7ec9d21..1dabae357 100644 --- a/playbooks/aws/openshift-cluster/hosted.yml +++ b/playbooks/aws/openshift-cluster/hosted.yml @@ -4,7 +4,7 @@ - include: ../../openshift-metrics/private/config.yml when: openshift_metrics_install_metrics | default(false) | bool -- include: ../../common/openshift-cluster/openshift_logging.yml +- include: ../../openshift-logging/private/config.yml when: openshift_logging_install_logging | default(false) | bool - include: ../../openshift-prometheus/private/config.yml diff --git a/playbooks/byo/openshift-node/network_manager.yml b/playbooks/byo/openshift-node/network_manager.yml deleted file mode 100644 index ca09fb65c..000000000 --- a/playbooks/byo/openshift-node/network_manager.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# TODO (rteague): Temporarily leaving this playbook to allow CI tests to operate until CI jobs are updated. -- include: ../../openshift-node/network_manager.yml diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 2eeb81b86..a8ca5e686 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -23,7 +23,7 @@ - include: ../../openshift-metrics/private/config.yml when: openshift_metrics_install_metrics | default(false) | bool -- include: openshift_logging.yml +- include: ../../openshift-logging/private/config.yml when: openshift_logging_install_logging | default(false) | bool - include: ../../openshift-prometheus/private/config.yml diff --git a/playbooks/openshift-glusterfs/private/registry.yml b/playbooks/openshift-glusterfs/private/registry.yml index 75c1f0300..917b729f9 100644 --- a/playbooks/openshift-glusterfs/private/registry.yml +++ b/playbooks/openshift-glusterfs/private/registry.yml @@ -1,40 +1,11 @@ --- - import_playbook: config.yml -- name: Initialize GlusterFS registry PV and PVC vars - hosts: oo_first_master - tags: hosted - tasks: - - set_fact: - glusterfs_pv: [] - glusterfs_pvc: [] - - - set_fact: - glusterfs_pv: - - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-volume" - capacity: "{{ openshift.hosted.registry.storage.volume.size }}" - access_modes: "{{ openshift.hosted.registry.storage.access.modes }}" - storage: - glusterfs: - endpoints: "{{ openshift.hosted.registry.storage.glusterfs.endpoints }}" - path: "{{ openshift.hosted.registry.storage.glusterfs.path }}" - readOnly: "{{ openshift.hosted.registry.storage.glusterfs.readOnly }}" - glusterfs_pvc: - - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim" - capacity: "{{ openshift.hosted.registry.storage.volume.size }}" - access_modes: "{{ openshift.hosted.registry.storage.access.modes }}" - when: openshift.hosted.registry.storage.glusterfs.swap - - name: Create persistent volumes hosts: oo_first_master - tags: - - hosted - vars: - persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups, glusterfs_pv) }}" - persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims(glusterfs_pvc) }}" roles: - role: openshift_persistent_volumes - when: persistent_volumes | union(glusterfs_pv) | length > 0 or persistent_volume_claims | union(glusterfs_pvc) | length > 0 + when: openshift_hosted_registry_storage_glusterfs_swap | default(False) - name: Create Hosted Resources hosts: oo_first_master diff --git a/playbooks/openshift-hosted/private/create_persistent_volumes.yml b/playbooks/openshift-hosted/private/create_persistent_volumes.yml index 8a60a30b8..41ae2eb69 100644 --- a/playbooks/openshift-hosted/private/create_persistent_volumes.yml +++ b/playbooks/openshift-hosted/private/create_persistent_volumes.yml @@ -1,9 +1,5 @@ --- - name: Create Hosted Resources - persistent volumes hosts: oo_first_master - vars: - persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}" - persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}" roles: - role: openshift_persistent_volumes - when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0 diff --git a/playbooks/byo/openshift-cluster/openshift-logging.yml b/playbooks/openshift-logging/config.yml index 74e186f33..8837a2d32 100644 --- a/playbooks/byo/openshift-cluster/openshift-logging.yml +++ b/playbooks/openshift-logging/config.yml @@ -4,6 +4,6 @@ # Hosted logging on. See inventory/byo/hosts.*.example for the # currently supported method. # -- include: ../../init/main.yml +- include: ../init/main.yml -- include: ../../common/openshift-cluster/openshift_logging.yml +- include: private/config.yml diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/openshift-logging/private/config.yml index bc59bd95a..bc59bd95a 100644 --- a/playbooks/common/openshift-cluster/openshift_logging.yml +++ b/playbooks/openshift-logging/private/config.yml diff --git a/playbooks/openshift-logging/private/filter_plugins b/playbooks/openshift-logging/private/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/openshift-logging/private/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/openshift-logging/private/library b/playbooks/openshift-logging/private/library new file mode 120000 index 000000000..ba40d2f56 --- /dev/null +++ b/playbooks/openshift-logging/private/library @@ -0,0 +1 @@ +../../../library
\ No newline at end of file diff --git a/playbooks/openshift-logging/private/lookup_plugins b/playbooks/openshift-logging/private/lookup_plugins new file mode 120000 index 000000000..ac79701db --- /dev/null +++ b/playbooks/openshift-logging/private/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/openshift-logging/private/roles b/playbooks/openshift-logging/private/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/openshift-logging/private/roles @@ -0,0 +1 @@ +../../../roles
\ No newline at end of file diff --git a/playbooks/openshift-master/private/additional_config.yml b/playbooks/openshift-master/private/additional_config.yml index b7cfbe4e4..a90cd6b22 100644 --- a/playbooks/openshift-master/private/additional_config.yml +++ b/playbooks/openshift-master/private/additional_config.yml @@ -19,8 +19,6 @@ openshift_master_ha: "{{ groups.oo_masters | length > 1 }}" omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}" roles: - - role: openshift_master_cluster - when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker" - role: openshift_project_request_template when: openshift_project_request_template_manage - role: openshift_examples diff --git a/playbooks/openshift-master/private/tasks/wire_aggregator.yml b/playbooks/openshift-master/private/tasks/wire_aggregator.yml index 97acc5d5d..ecf8f15d9 100644 --- a/playbooks/openshift-master/private/tasks/wire_aggregator.yml +++ b/playbooks/openshift-master/private/tasks/wire_aggregator.yml @@ -183,7 +183,6 @@ systemd: name={{ openshift.common.service_type }}-master-api state=restarted when: - yedit_output.changed - - openshift.master.cluster_method == 'native' # We retry the controllers because the API may not be 100% initialized yet. - name: restart master controllers @@ -194,7 +193,6 @@ until: result.rc == 0 when: - yedit_output.changed - - openshift.master.cluster_method == 'native' - name: Verify API Server # Using curl here since the uri module requires python-httplib2 and diff --git a/playbooks/openshift-master/private/validate_restart.yml b/playbooks/openshift-master/private/validate_restart.yml index 5dbb21502..1077d0b9c 100644 --- a/playbooks/openshift-master/private/validate_restart.yml +++ b/playbooks/openshift-master/private/validate_restart.yml @@ -14,9 +14,6 @@ - role: common local_facts: rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}" - - role: master - local_facts: - cluster_method: "{{ openshift_master_cluster_method | default(None) }}" # Creating a temp file on localhost, we then check each system that will # be rebooted to see if that file exists, if so we know we're running diff --git a/playbooks/openshift-node/private/network_manager.yml b/playbooks/openshift-node/private/network_manager.yml index 7211787be..39640345f 100644 --- a/playbooks/openshift-node/private/network_manager.yml +++ b/playbooks/openshift-node/private/network_manager.yml @@ -1,6 +1,4 @@ --- -- import_playbook: ../../init/evaluate_groups.yml - - name: Install and configure NetworkManager hosts: oo_all_hosts become: yes diff --git a/playbooks/openstack/advanced-configuration.md b/playbooks/openstack/advanced-configuration.md index cdb1229a4..db2a13d38 100644 --- a/playbooks/openstack/advanced-configuration.md +++ b/playbooks/openstack/advanced-configuration.md @@ -294,7 +294,6 @@ variables](https://docs.openshift.com/container-platform/3.6/install_config/inst in `inventory/group_vars/OSEv3.yml`. For example, given a load balancer node under the ansible group named `ext_lb`: - openshift_master_cluster_method: native openshift_master_cluster_hostname: "{{ groups.ext_lb.0 }}" openshift_master_cluster_public_hostname: "{{ groups.ext_lb.0 }}" diff --git a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml index 68d898d9a..933117127 100644 --- a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml +++ b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml @@ -6,7 +6,6 @@ openshift_deployment_type: origin #openshift_release: v3.5 openshift_master_default_subdomain: "apps.{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}" -openshift_master_cluster_method: native openshift_master_cluster_public_hostname: "console.{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}" osm_default_node_selector: 'region=primary' diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py index 3cb1fa8d0..83ca83350 100644 --- a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py +++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py @@ -86,7 +86,7 @@ class CallbackModule(CallbackBase): }, 'installer_phase_logging': { 'title': 'Logging Install', - 'playbook': 'playbooks/byo/openshift-cluster/openshift-logging.yml' + 'playbook': 'playbooks/openshift-logging/config.yml' }, 'installer_phase_prometheus': { 'title': 'Prometheus Install', diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml index 410b739e9..cb83c8ead 100644 --- a/roles/nuage_master/handlers/main.yaml +++ b/roles/nuage_master/handlers/main.yaml @@ -3,8 +3,7 @@ systemd: name={{ openshift.common.service_type }}-master-api state=restarted when: > (openshift_master_ha | bool) and - (not master_api_service_status_changed | default(false)) and - openshift.master.cluster_method == 'native' + (not master_api_service_status_changed | default(false)) # TODO: need to fix up ignore_errors here # We retry the controllers because the API may not be 100% initialized yet. @@ -16,6 +15,5 @@ until: result.rc == 0 when: > (openshift_master_ha | bool) and - (not master_controllers_service_status_changed | default(false)) and - openshift.master.cluster_method == 'native' + (not master_controllers_service_status_changed | default(false)) ignore_errors: yes diff --git a/roles/openshift_facts/defaults/main.yml b/roles/openshift_facts/defaults/main.yml index 7064d727a..a182d23c5 100644 --- a/roles/openshift_facts/defaults/main.yml +++ b/roles/openshift_facts/defaults/main.yml @@ -3,4 +3,98 @@ openshift_cli_image_dict: origin: 'openshift/origin' openshift-enterprise: 'openshift3/ose' +openshift_hosted_images_dict: + origin: 'openshift/origin-${component}:${version}' + openshift-enterprise: 'openshift3/ose-${component}:${version}' + openshift_cli_image: "{{ osm_image | default(openshift_cli_image_dict[openshift_deployment_type]) }}" + +# osm_default_subdomain is an old migrated fact, can probably be removed. +osm_default_subdomain: "router.default.svc.cluster.local" +openshift_master_default_subdomain: "{{ osm_default_subdomain }}" + +openshift_hosted_etcd_storage_nfs_directory: '/exports' +openshift_hosted_etcd_storage_nfs_options: '*(rw,root_squash)' +openshift_hosted_etcd_storage_volume_name: 'etcd' +openshift_hosted_etcd_storage_volume_size: '1Gi' +openshift_hosted_etcd_storage_create_pv: True +openshift_hosted_etcd_storage_create_pvc: False +openshift_hosted_etcd_storage_access_modes: + - 'ReadWriteOnce' + +openshift_hosted_registry_namespace: 'default' +openshift_hosted_registry_storage_volume_name: 'registry' +openshift_hosted_registry_storage_volume_size: '5Gi' +openshift_hosted_registry_storage_create_pv: True +openshift_hosted_registry_storage_create_pvc: True +openshift_hosted_registry_storage_nfs_directory: '/exports' +openshift_hosted_registry_storage_nfs_options: '*(rw,root_squash)' +openshift_hosted_registry_storage_glusterfs_endpoints: 'glusterfs-registry-endpoints' +openshift_hosted_registry_storage_glusterfs_path: glusterfs-registry-volume +openshift_hosted_registry_storage_glusterfs_readOnly: False +openshift_hosted_registry_storage_glusterfs_swap: False +openshift_hosted_registry_storage_glusterfs_swapcopy: True +openshift_hosted_registry_storage_glusterfs_ips: [] +openshift_hosted_registry_storage_access_modes: + - 'ReadWriteMany' + +openshift_logging_storage_nfs_directory: '/exports' +openshift_logging_storage_nfs_options: '*(rw,root_squash)' +openshift_logging_storage_volume_name: 'logging-es' +openshift_logging_storage_create_pv: True +openshift_logging_storage_create_pvc: False +openshift_logging_storage_access_modes: + - ['ReadWriteOnce'] + +openshift_loggingops_storage_volume_name: 'logging-es-ops' +openshift_loggingops_storage_volume_size: '10Gi' +openshift_loggingops_storage_create_pv: True +openshift_loggingops_storage_create_pvc: False +openshift_loggingops_storage_nfs_directory: '/exports' +openshift_loggingops_storage_nfs_options: '*(rw,root_squash)' +openshift_loggingops_storage_access_modes: + - 'ReadWriteOnce' + +openshift_metrics_deploy: False +openshift_metrics_duration: 7 +openshift_metrics_resolution: '10s' +openshift_metrics_storage_volume_name: 'metrics' +openshift_metrics_storage_volume_size: '10Gi' +openshift_metrics_storage_create_pv: True +openshift_metrics_storage_create_pvc: False +openshift_metrics_storage_nfs_directory: '/exports' +openshift_metrics_storage_nfs_options: '*(rw,root_squash)' +openshift_metrics_storage_access_modes: + - 'ReadWriteOnce' + +openshift_prometheus_storage_volume_name: 'prometheus' +openshift_prometheus_storage_volume_size: '10Gi' +openshift_prometheus_storage_nfs_directory: '/exports' +openshift_prometheus_storage_nfs_options: '*(rw,root_squash)' +openshift_prometheus_storage_access_modes: + - 'ReadWriteOnce' +openshift_prometheus_storage_create_pv: True +openshift_prometheus_storage_create_pvc: False + +openshift_prometheus_alertmanager_storage_volume_name: 'prometheus-alertmanager' +openshift_prometheus_alertmanager_storage_volume_size: '10Gi' +openshift_prometheus_alertmanager_storage_nfs_directory: '/exports' +openshift_prometheus_alertmanager_storage_nfs_options: '*(rw,root_squash)' +openshift_prometheus_alertmanager_storage_access_modes: + - 'ReadWriteOnce' +openshift_prometheus_alertmanager_storage_create_pv: True +openshift_prometheus_alertmanager_storage_create_pvc: False + +openshift_prometheus_alertbuffer_storage_volume_name: 'prometheus-alertbuffer' +openshift_prometheus_alertbuffer_storage_volume_size: '10Gi' +openshift_prometheus_alertbuffer_storage_nfs_directory: '/exports' +openshift_prometheus_alertbuffer_storage_nfs_options: '*(rw,root_squash)' +openshift_prometheus_alertbuffer_storage_access_modes: + - 'ReadWriteOnce' +openshift_prometheus_alertbuffer_storage_create_pv: True +openshift_prometheus_alertbuffer_storage_create_pvc: False + + +openshift_router_selector: "region=infra" +openshift_hosted_router_selector: "{{ openshift_router_selector }}" +openshift_hosted_registry_selector: "{{ openshift_router_selector }}" diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 508228b2e..b371d347c 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -11,14 +11,13 @@ import copy import errno import json import re -import io import os import yaml import struct import socket from distutils.util import strtobool from distutils.version import LooseVersion -from ansible.module_utils.six import string_types, text_type +from ansible.module_utils.six import string_types from ansible.module_utils.six.moves import configparser # ignore pylint errors related to the module_utils import @@ -86,24 +85,6 @@ def migrate_node_facts(facts): return facts -def migrate_hosted_facts(facts): - """ Apply migrations for master facts """ - if 'master' in facts: - if 'router_selector' in facts['master']: - if 'hosted' not in facts: - facts['hosted'] = {} - if 'router' not in facts['hosted']: - facts['hosted']['router'] = {} - facts['hosted']['router']['selector'] = facts['master'].pop('router_selector') - if 'registry_selector' in facts['master']: - if 'hosted' not in facts: - facts['hosted'] = {} - if 'registry' not in facts['hosted']: - facts['hosted']['registry'] = {} - facts['hosted']['registry']['selector'] = facts['master'].pop('registry_selector') - return facts - - def migrate_admission_plugin_facts(facts): """ Apply migrations for admission plugin facts """ if 'master' in facts: @@ -125,7 +106,6 @@ def migrate_local_facts(facts): migrated_facts = copy.deepcopy(facts) migrated_facts = migrate_common_facts(migrated_facts) migrated_facts = migrate_node_facts(migrated_facts) - migrated_facts = migrate_hosted_facts(migrated_facts) migrated_facts = migrate_admission_plugin_facts(migrated_facts) return migrated_facts @@ -412,58 +392,6 @@ def normalize_provider_facts(provider, metadata): return facts -# pylint: disable=too-many-branches -def set_selectors(facts): - """ Set selectors facts if not already present in facts dict - Args: - facts (dict): existing facts - Returns: - dict: the facts dict updated with the generated selectors - facts if they were not already present - - """ - selector = "region=infra" - - if 'hosted' not in facts: - facts['hosted'] = {} - if 'router' not in facts['hosted']: - facts['hosted']['router'] = {} - if 'selector' not in facts['hosted']['router'] or facts['hosted']['router']['selector'] in [None, 'None']: - facts['hosted']['router']['selector'] = selector - if 'registry' not in facts['hosted']: - facts['hosted']['registry'] = {} - if 'selector' not in facts['hosted']['registry'] or facts['hosted']['registry']['selector'] in [None, 'None']: - facts['hosted']['registry']['selector'] = selector - if 'metrics' not in facts['hosted']: - facts['hosted']['metrics'] = {} - if 'selector' not in facts['hosted']['metrics'] or facts['hosted']['metrics']['selector'] in [None, 'None']: - facts['hosted']['metrics']['selector'] = None - if 'logging' not in facts or not isinstance(facts['logging'], dict): - facts['logging'] = {} - if 'selector' not in facts['logging'] or facts['logging']['selector'] in [None, 'None']: - facts['logging']['selector'] = None - if 'etcd' not in facts['hosted']: - facts['hosted']['etcd'] = {} - if 'selector' not in facts['hosted']['etcd'] or facts['hosted']['etcd']['selector'] in [None, 'None']: - facts['hosted']['etcd']['selector'] = None - if 'prometheus' not in facts: - facts['prometheus'] = {} - if 'selector' not in facts['prometheus'] or facts['prometheus']['selector'] in [None, 'None']: - facts['prometheus']['selector'] = None - if 'alertmanager' not in facts['prometheus']: - facts['prometheus']['alertmanager'] = {} - # pylint: disable=line-too-long - if 'selector' not in facts['prometheus']['alertmanager'] or facts['prometheus']['alertmanager']['selector'] in [None, 'None']: - facts['prometheus']['alertmanager']['selector'] = None - if 'alertbuffer' not in facts['prometheus']: - facts['prometheus']['alertbuffer'] = {} - # pylint: disable=line-too-long - if 'selector' not in facts['prometheus']['alertbuffer'] or facts['prometheus']['alertbuffer']['selector'] in [None, 'None']: - facts['prometheus']['alertbuffer']['selector'] = None - - return facts - - def set_identity_providers_if_unset(facts): """ Set identity_providers fact if not already present in facts dict @@ -608,60 +536,6 @@ def set_aggregate_facts(facts): return facts -def set_etcd_facts_if_unset(facts): - """ - If using embedded etcd, loads the data directory from master-config.yaml. - - If using standalone etcd, loads ETCD_DATA_DIR from etcd.conf. - - If anything goes wrong parsing these, the fact will not be set. - """ - if 'master' in facts and safe_get_bool(facts['master']['embedded_etcd']): - etcd_facts = facts['etcd'] if 'etcd' in facts else dict() - - if 'etcd_data_dir' not in etcd_facts: - try: - # Parse master config to find actual etcd data dir: - master_cfg_path = os.path.join(facts['common']['config_base'], - 'master/master-config.yaml') - master_cfg_f = open(master_cfg_path, 'r') - config = yaml.safe_load(master_cfg_f.read()) - master_cfg_f.close() - - etcd_facts['etcd_data_dir'] = \ - config['etcdConfig']['storageDirectory'] - - facts['etcd'] = etcd_facts - - # We don't want exceptions bubbling up here: - # pylint: disable=broad-except - except Exception: - pass - else: - etcd_facts = facts['etcd'] if 'etcd' in facts else dict() - - # Read ETCD_DATA_DIR from /etc/etcd/etcd.conf: - try: - # Add a fake section for parsing: - ini_str = text_type('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8') - ini_fp = io.StringIO(ini_str) - config = configparser.RawConfigParser() - config.readfp(ini_fp) - etcd_data_dir = config.get('root', 'ETCD_DATA_DIR') - if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'): - etcd_data_dir = etcd_data_dir[1:-1] - - etcd_facts['etcd_data_dir'] = etcd_data_dir - facts['etcd'] = etcd_facts - - # We don't want exceptions bubbling up here: - # pylint: disable=broad-except - except Exception: - pass - - return facts - - def set_deployment_facts_if_unset(facts): """ Set Facts that vary based on deployment_type. This currently includes common.service_type, master.registry_url, node.registry_url, @@ -1631,13 +1505,8 @@ class OpenShiftFacts(object): 'cloudprovider', 'common', 'etcd', - 'hosted', 'master', - 'node', - 'logging', - 'loggingops', - 'metrics', - 'prometheus'] + 'node'] # Disabling too-many-arguments, this should be cleaned up as a TODO item. # pylint: disable=too-many-arguments,no-value-for-parameter @@ -1717,7 +1586,6 @@ class OpenShiftFacts(object): facts = migrate_oauth_template_facts(facts) facts['current_config'] = get_current_config(facts) facts = set_url_facts_if_unset(facts) - facts = set_selectors(facts) facts = set_identity_providers_if_unset(facts) facts = set_deployment_facts_if_unset(facts) facts = set_sdn_facts_if_unset(facts, self.system_facts) @@ -1727,7 +1595,6 @@ class OpenShiftFacts(object): facts = build_api_server_args(facts) facts = set_version_facts_if_unset(facts) facts = set_aggregate_facts(facts) - facts = set_etcd_facts_if_unset(facts) facts = set_proxy_facts(facts) facts = set_builddefaults_facts(facts) facts = set_buildoverrides_facts(facts) @@ -1793,178 +1660,6 @@ class OpenShiftFacts(object): if 'cloudprovider' in roles: defaults['cloudprovider'] = dict(kind=None) - if 'hosted' in roles or self.role == 'hosted': - defaults['hosted'] = dict( - etcd=dict( - storage=dict( - kind=None, - volume=dict( - name='etcd', - size='1Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)' - ), - host=None, - access=dict( - modes=['ReadWriteOnce'] - ), - create_pv=True, - create_pvc=False - ) - ), - registry=dict( - storage=dict( - kind=None, - volume=dict( - name='registry', - size='5Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)'), - glusterfs=dict( - endpoints='glusterfs-registry-endpoints', - path='glusterfs-registry-volume', - ips=[], - readOnly=False, - swap=False, - swapcopy=True), - host=None, - access=dict( - modes=['ReadWriteMany'] - ), - create_pv=True, - create_pvc=True - ) - ), - router=dict() - ) - - defaults['logging'] = dict( - storage=dict( - kind=None, - volume=dict( - name='logging-es', - size='10Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)' - ), - host=None, - access=dict( - modes=['ReadWriteOnce'] - ), - create_pv=True, - create_pvc=False - ) - ) - - defaults['loggingops'] = dict( - storage=dict( - kind=None, - volume=dict( - name='logging-es-ops', - size='10Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)' - ), - host=None, - access=dict( - modes=['ReadWriteOnce'] - ), - create_pv=True, - create_pvc=False - ) - ) - - defaults['metrics'] = dict( - deploy=False, - duration=7, - resolution='10s', - storage=dict( - kind=None, - volume=dict( - name='metrics', - size='10Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)' - ), - host=None, - access=dict( - modes=['ReadWriteOnce'] - ), - create_pv=True, - create_pvc=False - ) - ) - - defaults['prometheus'] = dict( - storage=dict( - kind=None, - volume=dict( - name='prometheus', - size='10Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)' - ), - host=None, - access=dict( - modes=['ReadWriteOnce'] - ), - create_pv=True, - create_pvc=False - ) - ) - - defaults['prometheus']['alertmanager'] = dict( - storage=dict( - kind=None, - volume=dict( - name='prometheus-alertmanager', - size='10Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)' - ), - host=None, - access=dict( - modes=['ReadWriteOnce'] - ), - create_pv=True, - create_pvc=False - ) - ) - - defaults['prometheus']['alertbuffer'] = dict( - storage=dict( - kind=None, - volume=dict( - name='prometheus-alertbuffer', - size='10Gi' - ), - nfs=dict( - directory='/exports', - options='*(rw,root_squash)' - ), - host=None, - access=dict( - modes=['ReadWriteOnce'] - ), - create_pv=True, - create_pvc=False - ) - ) - return defaults def guess_host_provider(self): diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml index e70c0c420..b6501d288 100644 --- a/roles/openshift_hosted/defaults/main.yml +++ b/roles/openshift_hosted/defaults/main.yml @@ -27,6 +27,9 @@ openshift_cluster_domain: 'cluster.local' r_openshift_hosted_router_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" r_openshift_hosted_router_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" +openshift_hosted_router_selector: "{{ openshift_router_selector | default(openshift_hosted_infra_selector) }}" +openshift_hosted_router_namespace: 'default' + openshift_hosted_router_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}" openshift_hosted_router_edits: @@ -40,13 +43,14 @@ openshift_hosted_router_edits: value: 21600 action: put +openshift_hosted_router_registryurl: "{{ openshift_hosted_images_dict[openshift_deployment_type] }}" openshift_hosted_routers: - name: router replicas: "{{ replicas | default(1) }}" namespace: default serviceaccount: router selector: "{{ openshift_hosted_router_selector | default(None) }}" - images: "{{ openshift_hosted_router_image | default(None) }}" + images: "{{ openshift_hosted_router_registryurl }}" edits: "{{ openshift_hosted_router_edits }}" stats_port: 1936 ports: @@ -64,6 +68,11 @@ r_openshift_hosted_router_os_firewall_allow: [] # Registry # ############ +openshift_hosted_registry_selector: "{{ openshift_registry_selector | default(openshift_hosted_infra_selector) }}" +penshift_hosted_registry_registryurl: "{{ openshift_hosted_images_dict[openshift_deployment_type] }}" +openshift_hosted_registry_routecertificates: {} +openshift_hosted_registry_routetermination: "passthrough" + r_openshift_hosted_registry_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" r_openshift_hosted_registry_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" diff --git a/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py b/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py index 7f41529ac..003ce5f9e 100644 --- a/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py +++ b/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py @@ -12,7 +12,7 @@ class FilterModule(object): def get_router_replicas(replicas=None, router_nodes=None): ''' This function will return the number of replicas based on the results from the defined - openshift.hosted.router.replicas OR + openshift_hosted_router_replicas OR the query from oc_obj on openshift nodes with a selector OR default to 1 diff --git a/roles/openshift_hosted/tasks/registry.yml b/roles/openshift_hosted/tasks/registry.yml index e2e06594b..4797fb788 100644 --- a/roles/openshift_hosted/tasks/registry.yml +++ b/roles/openshift_hosted/tasks/registry.yml @@ -13,13 +13,13 @@ l_openshift_hosted_fw_allow: "{{ r_openshift_hosted_registry_os_firewall_allow }}" l_openshift_hosted_fw_deny: "{{ r_openshift_hosted_registry_os_firewall_deny }}" -- when: openshift.hosted.registry.replicas | default(none) is none +- when: openshift_hosted_registry_replicas | default(none) is none block: - name: Retrieve list of openshift nodes matching registry selector oc_obj: state: list kind: node - selector: "{{ openshift.hosted.registry.selector | default(omit) }}" + selector: "{{ openshift_hosted_registry_selector }}" register: registry_nodes - name: set_fact l_node_count to number of nodes matching registry selector @@ -39,16 +39,13 @@ # just 1: - name: set_fact l_default_replicas when l_node_count > 0 set_fact: - l_default_replicas: "{{ l_node_count if openshift.hosted.registry.storage.kind | default(none) is not none else 1 }}" + l_default_replicas: "{{ l_node_count if openshift_hosted_registry_storage_kind | default(none) is not none else 1 }}" when: l_node_count | int > 0 - name: set openshift_hosted facts set_fact: - openshift_hosted_registry_replicas: "{{ openshift.hosted.registry.replicas | default(l_default_replicas) }}" - openshift_hosted_registry_namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" - openshift_hosted_registry_selector: "{{ openshift.hosted.registry.selector }}" - openshift_hosted_registry_images: "{{ openshift.hosted.registry.registryurl | default('openshift3/ose-${component}:${version}')}}" - openshift_hosted_registry_storage_glusterfs_ips: "{%- set gluster_ips = [] %}{% if groups.glusterfs_registry is defined %}{% for node in groups.glusterfs_registry %}{%- set _ = gluster_ips.append(hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip)) %}{% endfor %}{{ gluster_ips }}{% elif groups.glusterfs is defined %}{% for node in groups.glusterfs %}{%- set _ = gluster_ips.append(hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip)) %}{% endfor %}{{ gluster_ips }}{% else %}{{ openshift.hosted.registry.storage.glusterfs.ips }}{% endif %}" + # This determines the gluster_ips to use for the registry by looping over the glusterfs_registry group + openshift_hosted_registry_storage_glusterfs_ips: "{%- set gluster_ips = [] %}{% if groups.glusterfs_registry is defined %}{% for node in groups.glusterfs_registry %}{%- set _ = gluster_ips.append(hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip)) %}{% endfor %}{{ gluster_ips }}{% elif groups.glusterfs is defined %}{% for node in groups.glusterfs %}{%- set _ = gluster_ips.append(hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip)) %}{% endfor %}{{ gluster_ips }}{% else %}{{ openshift_hosted_registry_storage_glusterfs_ips }}{% endif %}" - name: Update registry environment variables when pushing via dns set_fact: @@ -106,7 +103,7 @@ - include: storage/object_storage.yml static: no when: - - openshift.hosted.registry.storage.kind | default(none) == 'object' + - openshift_hosted_registry_storage_kind | default(none) == 'object' - name: Update openshift_hosted facts for persistent volumes set_fact: @@ -115,23 +112,23 @@ pvc_volume_mounts: - name: registry-storage type: persistentVolumeClaim - claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-claim" + claim_name: "{{ openshift_hosted_registry_storage_volume_name }}-claim" when: - - openshift.hosted.registry.storage.kind | default(none) in ['nfs', 'openstack', 'glusterfs'] + - openshift_hosted_registry_storage_kind | default(none) in ['nfs', 'openstack', 'glusterfs'] - include: storage/glusterfs_endpoints.yml when: - openshift_hosted_registry_storage_glusterfs_ips|length > 0 - - openshift.hosted.registry.storage.kind | default(none) in ['glusterfs'] + - openshift_hosted_registry_storage_kind | default(none) in ['glusterfs'] - name: Create OpenShift registry oc_adm_registry: name: "{{ openshift_hosted_registry_name }}" namespace: "{{ openshift_hosted_registry_namespace }}" selector: "{{ openshift_hosted_registry_selector }}" - replicas: "{{ openshift_hosted_registry_replicas }}" + replicas: "{{ openshift_hosted_registry_replicas | default(l_default_replicas) }}" service_account: "{{ openshift_hosted_registry_serviceaccount }}" - images: "{{ openshift_hosted_registry_images }}" + images: "{{ penshift_hosted_registry_registryurl }}" env_vars: "{{ openshift_hosted_registry_env_vars }}" volume_mounts: "{{ openshift_hosted_registry_volumes }}" edits: "{{ openshift_hosted_registry_edits }}" @@ -151,7 +148,7 @@ - include: storage/glusterfs.yml when: - - openshift.hosted.registry.storage.kind | default(none) == 'glusterfs' or openshift.hosted.registry.storage.glusterfs.swap + - openshift_hosted_registry_storage_kind | default(none) == 'glusterfs' or openshift_hosted_registry_storage_glusterfs_swap - name: Delete temp directory file: diff --git a/roles/openshift_hosted/tasks/router.yml b/roles/openshift_hosted/tasks/router.yml index dd7053656..57c10b637 100644 --- a/roles/openshift_hosted/tasks/router.yml +++ b/roles/openshift_hosted/tasks/router.yml @@ -11,16 +11,14 @@ oc_obj: state: list kind: node - namespace: "{{ openshift.hosted.router.namespace | default('default') }}" - selector: "{{ openshift.hosted.router.selector | default(omit) }}" + namespace: "{{ openshift_hosted_router_namespace }}" + selector: "{{ openshift_hosted_router_selector }}" register: router_nodes - when: openshift.hosted.router.replicas | default(none) is none + when: openshift_hosted_router_replicas | default(none) is none - name: set_fact replicas set_fact: - replicas: "{{ openshift.hosted.router.replicas|default(None) | get_router_replicas(router_nodes) }}" - openshift_hosted_router_selector: "{{ openshift.hosted.router.selector | default(None) }}" - openshift_hosted_router_image: "{{ openshift.hosted.router.registryurl }}" + replicas: "{{ openshift_hosted_router_replicas | default(None) | get_router_replicas(router_nodes) }}" - name: Get the certificate contents for router copy: @@ -42,8 +40,8 @@ signer_key: "{{ openshift_master_config_dir }}/ca.key" signer_serial: "{{ openshift_master_config_dir }}/ca.serial.txt" hostnames: - - "{{ openshift_master_default_subdomain | default('router.default.svc.cluster.local') }}" - - "*.{{ openshift_master_default_subdomain | default('router.default.svc.cluster.local') }}" + - "{{ openshift_master_default_subdomain }}" + - "*.{{ openshift_master_default_subdomain }}" cert: "{{ openshift_master_config_dir ~ '/openshift-router.crt' }}" key: "{{ openshift_master_config_dir ~ '/openshift-router.key' }}" with_items: "{{ openshift_hosted_routers }}" diff --git a/roles/openshift_hosted/tasks/secure.yml b/roles/openshift_hosted/tasks/secure.yml index 174bc39a4..ecbf5b141 100644 --- a/roles/openshift_hosted/tasks/secure.yml +++ b/roles/openshift_hosted/tasks/secure.yml @@ -1,10 +1,4 @@ --- -- name: Configure facts for docker-registry - set_fact: - openshift_hosted_registry_routecertificates: "{{ ('routecertificates' in openshift.hosted.registry.keys()) | ternary(openshift_hosted_registry_routecertificates, {}) }}" - openshift_hosted_registry_routehost: "{{ ('routehost' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routehost, False) }}" - openshift_hosted_registry_routetermination: "{{ ('routetermination' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routetermination, 'passthrough') }}" - - name: Include reencrypt route configuration include: secure/reencrypt.yml static: no @@ -39,7 +33,7 @@ - "{{ docker_registry_route.results[0].spec.host }}" - "{{ openshift_hosted_registry_name }}.default.svc" - "{{ openshift_hosted_registry_name }}.default.svc.{{ openshift_cluster_domain }}" - - "{{ openshift_hosted_registry_routehost }}" + - "{{ openshift_hosted_registry_routehost | default(omit) }}" cert: "{{ docker_registry_cert_path }}" key: "{{ docker_registry_key_path }}" expire_days: "{{ openshift_hosted_registry_cert_expire_days }}" diff --git a/roles/openshift_hosted/tasks/storage/glusterfs.yml b/roles/openshift_hosted/tasks/storage/glusterfs.yml index 7cae67baa..18b2edcc6 100644 --- a/roles/openshift_hosted/tasks/storage/glusterfs.yml +++ b/roles/openshift_hosted/tasks/storage/glusterfs.yml @@ -17,7 +17,7 @@ until: - "registry_pods.results.results[0]['items'] | count > 0" # There must be as many matching pods with 'Ready' status True as there are expected replicas - - "registry_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == openshift_hosted_registry_replicas | int" + - "registry_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == openshift_hosted_registry_replicas | default(l_default_replicas) | int" delay: 10 retries: "{{ (600 / 10) | int }}" @@ -35,7 +35,7 @@ mount: state: mounted fstype: glusterfs - src: "{% if 'glusterfs_registry' in groups %}{% set node = groups.glusterfs_registry[0] %}{% elif 'glusterfs' in groups %}{% set node = groups.glusterfs[0] %}{% endif %}{% if openshift_hosted_registry_storage_glusterfs_ips is defined and openshift_hosted_registry_storage_glusterfs_ips|length > 0 %}{{ openshift_hosted_registry_storage_glusterfs_ips[0] }}{% elif 'glusterfs_hostname' in hostvars[node] %}{{ hostvars[node].glusterfs_hostname }}{% elif 'openshift' in hostvars[node] %}{{ hostvars[node].openshift.node.nodename }}{% else %}{{ node }}{% endif %}:/{{ openshift.hosted.registry.storage.glusterfs.path }}" + src: "{% if 'glusterfs_registry' in groups %}{% set node = groups.glusterfs_registry[0] %}{% elif 'glusterfs' in groups %}{% set node = groups.glusterfs[0] %}{% endif %}{% if openshift_hosted_registry_storage_glusterfs_ips is defined and openshift_hosted_registry_storage_glusterfs_ips|length > 0 %}{{ openshift_hosted_registry_storage_glusterfs_ips[0] }}{% elif 'glusterfs_hostname' in hostvars[node] %}{{ hostvars[node].glusterfs_hostname }}{% elif 'openshift' in hostvars[node] %}{{ hostvars[node].openshift.node.nodename }}{% else %}{{ node }}{% endif %}:/{{ openshift_hosted_registry_storage_glusterfs_path }}" name: "{{ mktemp.stdout }}" - name: Set registry volume permissions @@ -60,7 +60,7 @@ - name: Copy current registry contents to new GlusterFS volume command: "oc rsync {{ registry_pod_name }}:/registry/ {{ mktemp.stdout }}/" - when: openshift.hosted.registry.storage.glusterfs.swapcopy + when: openshift_hosted_registry_storage_glusterfs_swapcopy - name: Swap new GlusterFS registry volume oc_volume: @@ -68,7 +68,7 @@ name: "{{ openshift_hosted_registry_name }}" vol_name: registry-storage mount_type: pvc - claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim" + claim_name: "{{ openshift_hosted_registry_storage_volume_name }}-glusterfs-claim" - name: Deactivate registry maintenance mode oc_env: @@ -77,7 +77,7 @@ state: absent env_vars: - REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED: 'true' - when: openshift.hosted.registry.storage.glusterfs.swap + when: openshift_hosted_registry_storage_glusterfs_swap - name: Unmount registry volume and clean up mount point/fstab mount: diff --git a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml index 0f4381748..bd7181c17 100644 --- a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml +++ b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml @@ -10,7 +10,7 @@ dest: "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml" - name: Create GlusterFS registry service and endpoint - command: "{{ openshift.common.client_binary }} apply -f {{ item }} -n {{ openshift.hosted.registry.namespace | default('default') }}" + command: "{{ openshift.common.client_binary }} apply -f {{ item }} -n {{ openshift_hosted_registry_namespace | default('default') }}" with_items: - "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml" - "{{ mktempHosted.stdout }}/glusterfs-registry-endpoints.yml" diff --git a/roles/openshift_hosted/tasks/storage/object_storage.yml b/roles/openshift_hosted/tasks/storage/object_storage.yml index 8553a8098..3d1b2c68e 100644 --- a/roles/openshift_hosted/tasks/storage/object_storage.yml +++ b/roles/openshift_hosted/tasks/storage/object_storage.yml @@ -1,6 +1,6 @@ --- - include: s3.yml - when: openshift.hosted.registry.storage.provider == 's3' + when: openshift_hosted_registry_storage_provider == 's3' - name: Ensure the registry secret exists oc_secret: diff --git a/roles/openshift_hosted/tasks/storage/s3.yml b/roles/openshift_hosted/tasks/storage/s3.yml index 8e905d905..4c100ee4e 100644 --- a/roles/openshift_hosted/tasks/storage/s3.yml +++ b/roles/openshift_hosted/tasks/storage/s3.yml @@ -2,8 +2,8 @@ - name: Assert that S3 variables are provided for registry_config template assert: that: - - openshift.hosted.registry.storage.s3.bucket | default(none) is not none - - openshift.hosted.registry.storage.s3.bucket | default(none) is not none + - openshift_hosted_registry_storage_s3_bucket | default(none) is not none + - openshift_hosted_registry_storage_s3_region | default(none) is not none msg: | When using S3 storage, the following variables are required: openshift_hosted_registry_storage_s3_bucket diff --git a/roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j2 index 607d25533..3c874d910 100644 --- a/roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j2 +++ b/roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Endpoints metadata: - name: {{ openshift.hosted.registry.storage.glusterfs.endpoints }} + name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }} subsets: - addresses: {% for ip in openshift_hosted_registry_storage_glusterfs_ips %} diff --git a/roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j2 b/roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j2 index 452c7c3e1..f18c94a4f 100644 --- a/roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j2 +++ b/roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Service metadata: - name: {{ openshift.hosted.registry.storage.glusterfs.endpoints }} + name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }} spec: ports: - port: 1 diff --git a/roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j2 index 607d25533..3c874d910 100644 --- a/roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j2 +++ b/roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Endpoints metadata: - name: {{ openshift.hosted.registry.storage.glusterfs.endpoints }} + name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }} subsets: - addresses: {% for ip in openshift_hosted_registry_storage_glusterfs_ips %} diff --git a/roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j2 b/roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j2 index 452c7c3e1..f18c94a4f 100644 --- a/roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j2 +++ b/roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Service metadata: - name: {{ openshift.hosted.registry.storage.glusterfs.endpoints }} + name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }} spec: ports: - port: 1 diff --git a/roles/openshift_hosted_facts/tasks/main.yml b/roles/openshift_hosted_facts/tasks/main.yml index 8fc70cecb..ed97d539c 100644 --- a/roles/openshift_hosted_facts/tasks/main.yml +++ b/roles/openshift_hosted_facts/tasks/main.yml @@ -1,19 +1 @@ --- -# openshift_*_selector variables have been deprecated in favor of -# openshift_hosted_*_selector variables. -- set_fact: - openshift_hosted_router_selector: "{{ openshift_router_selector | default(openshift_hosted_infra_selector) }}" - when: openshift_hosted_router_selector is not defined and openshift_hosted_infra_selector is defined -- set_fact: - openshift_hosted_registry_selector: "{{ openshift_registry_selector | default(openshift_hosted_infra_selector) }}" - when: openshift_hosted_registry_selector is not defined and openshift_hosted_infra_selector is defined - -- name: Set hosted facts - openshift_facts: - role: "{{ item }}" - openshift_env: "{{ hostvars - | oo_merge_hostvars(vars, inventory_hostname) - | oo_openshift_env }}" - openshift_env_structures: - - 'openshift.hosted.router.*' - with_items: [hosted, logging, loggingops, metrics, prometheus] diff --git a/roles/openshift_hosted_metrics/README.md b/roles/openshift_hosted_metrics/README.md deleted file mode 100644 index c2af3c494..000000000 --- a/roles/openshift_hosted_metrics/README.md +++ /dev/null @@ -1,54 +0,0 @@ -OpenShift Metrics with Hawkular -==================== - -OpenShift Metrics Installation - -Requirements ------------- - -* Ansible 2.2 -* It requires subdomain fqdn to be set. -* If persistence is enabled, then it also requires NFS. - -Role Variables --------------- - -From this role: - -| Name | Default value | | -|-------------------------------------------------|-----------------------|-------------------------------------------------------------| -| openshift_hosted_metrics_deploy | `False` | If metrics should be deployed | -| openshift_hosted_metrics_public_url | null | Hawkular metrics public url | -| openshift_hosted_metrics_storage_nfs_directory | `/exports` | Root export directory. | -| openshift_hosted_metrics_storage_volume_name | `metrics` | Metrics volume within openshift_hosted_metrics_volume_dir | -| openshift_hosted_metrics_storage_volume_size | `10Gi` | Metrics volume size | -| openshift_hosted_metrics_storage_nfs_options | `*(rw,root_squash)` | NFS options for configured exports. | -| openshift_hosted_metrics_duration | `7` | Metrics query duration | -| openshift_hosted_metrics_resolution | `10s` | Metrics resolution | - - -Dependencies ------------- -openshift_facts -openshift_examples -openshift_master_facts - -Example Playbook ----------------- - -``` -- name: Configure openshift-metrics - hosts: oo_first_master - roles: - - role: openshift_hosted_metrics -``` - -License -------- - -Apache License, Version 2.0 - -Author Information ------------------- - -Jose David MartÃn (j.david.nieto@gmail.com) diff --git a/roles/openshift_hosted_metrics/defaults/main.yml b/roles/openshift_hosted_metrics/defaults/main.yml deleted file mode 100644 index a01f24df8..000000000 --- a/roles/openshift_hosted_metrics/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -hosted_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/hosted" diff --git a/roles/openshift_hosted_metrics/handlers/main.yml b/roles/openshift_hosted_metrics/handlers/main.yml deleted file mode 100644 index 074b72942..000000000 --- a/roles/openshift_hosted_metrics/handlers/main.yml +++ /dev/null @@ -1,31 +0,0 @@ ---- -- name: restart master api - systemd: name={{ openshift.common.service_type }}-master-api state=restarted - when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' - notify: Verify API Server - -# We retry the controllers because the API may not be 100% initialized yet. -- name: restart master controllers - command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" - retries: 3 - delay: 5 - register: result - until: result.rc == 0 - when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' - -- name: Verify API Server - # Using curl here since the uri module requires python-httplib2 and - # wait_for port doesn't provide health information. - command: > - curl --silent --tlsv1.2 - --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt - {{ openshift.master.api_url }}/healthz/ready - args: - # Disables the following warning: - # Consider using get_url or uri module rather than running curl - warn: no - register: api_available_output - until: api_available_output.stdout == 'ok' - retries: 120 - delay: 1 - changed_when: false diff --git a/roles/openshift_hosted_metrics/meta/main.yaml b/roles/openshift_hosted_metrics/meta/main.yaml deleted file mode 100644 index debca3ca6..000000000 --- a/roles/openshift_hosted_metrics/meta/main.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- -galaxy_info: - author: David MartÃn - description: - company: - license: Apache License, Version 2.0 - min_ansible_version: 2.2 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud - - system -dependencies: -- { role: openshift_examples } -- { role: openshift_facts } -- { role: openshift_master_facts } diff --git a/roles/openshift_hosted_metrics/tasks/install.yml b/roles/openshift_hosted_metrics/tasks/install.yml deleted file mode 100644 index 15dd1bd54..000000000 --- a/roles/openshift_hosted_metrics/tasks/install.yml +++ /dev/null @@ -1,132 +0,0 @@ ---- - -- name: Test if metrics-deployer service account exists - command: > - {{ openshift.common.client_binary }} - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace=openshift-infra - get serviceaccount metrics-deployer -o json - register: serviceaccount - changed_when: false - failed_when: false - -- name: Create metrics-deployer Service Account - shell: > - echo {{ metrics_deployer_sa | to_json | quote }} | - {{ openshift.common.client_binary }} - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace openshift-infra - create -f - - when: serviceaccount.rc == 1 - -- name: Test edit permissions - command: > - {{ openshift.common.client_binary }} - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace openshift-infra - get rolebindings -o jsonpath='{.items[?(@.metadata.name == "edit")].userNames}' - register: edit_rolebindings - changed_when: false - -- name: Add edit permission to the openshift-infra project to metrics-deployer SA - command: > - {{ openshift.common.client_binary }} adm - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace openshift-infra - policy add-role-to-user edit - system:serviceaccount:openshift-infra:metrics-deployer - when: "'system:serviceaccount:openshift-infra:metrics-deployer' not in edit_rolebindings.stdout" - -- name: Test hawkular view permissions - command: > - {{ openshift.common.client_binary }} - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace openshift-infra - get rolebindings -o jsonpath='{.items[?(@.metadata.name == "view")].userNames}' - register: view_rolebindings - changed_when: false - -- name: Add view permissions to hawkular SA - command: > - {{ openshift.common.client_binary }} adm - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace openshift-infra - policy add-role-to-user view - system:serviceaccount:openshift-infra:hawkular - when: "'system:serviceaccount:openshift-infra:hawkular' not in view_rolebindings" - -- name: Test cluster-reader permissions - command: > - {{ openshift.common.client_binary }} - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace openshift-infra - get clusterrolebindings -o jsonpath='{.items[?(@.metadata.name == "cluster-reader")].userNames}' - register: cluster_reader_clusterrolebindings - changed_when: false - -- name: Add cluster-reader permission to the openshift-infra project to heapster SA - command: > - {{ openshift.common.client_binary }} adm - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace openshift-infra - policy add-cluster-role-to-user cluster-reader - system:serviceaccount:openshift-infra:heapster - when: "'system:serviceaccount:openshift-infra:heapster' not in cluster_reader_clusterrolebindings.stdout" - -- name: Create metrics-deployer secret - command: > - {{ openshift.common.client_binary }} - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace openshift-infra - secrets new metrics-deployer nothing=/dev/null - register: metrics_deployer_secret - changed_when: metrics_deployer_secret.rc == 0 - failed_when: metrics_deployer_secret.rc == 1 and 'already exists' not in metrics_deployer_secret.stderr - -# TODO: extend this to allow user passed in certs or generating cert with -# OpenShift CA -- name: Build metrics deployer command - set_fact: - deployer_cmd: "{{ openshift.common.client_binary }} process -f \ - {{ hosted_base }}/metrics-deployer.yaml -v \ - HAWKULAR_METRICS_HOSTNAME={{ g_metrics_hostname }} \ - -v USE_PERSISTENT_STORAGE={{metrics_persistence | string | lower }} \ - -v DYNAMICALLY_PROVISION_STORAGE={{metrics_dynamic_vol | string | lower }} \ - -v METRIC_DURATION={{ openshift.hosted.metrics.duration }} \ - -v METRIC_RESOLUTION={{ openshift.hosted.metrics.resolution }} - {{ image_prefix }} \ - {{ image_version }} \ - -v MODE={{ deployment_mode }} \ - | {{ openshift.common.client_binary }} --namespace openshift-infra \ - --config={{ openshift_hosted_metrics_kubeconfig }} \ - create -o name -f -" - -- name: Deploy Metrics - shell: "{{ deployer_cmd }}" - register: deploy_metrics - failed_when: "'already exists' not in deploy_metrics.stderr and deploy_metrics.rc != 0" - changed_when: deploy_metrics.rc == 0 - -- set_fact: - deployer_pod: "{{ deploy_metrics.stdout[1:2] }}" - -# TODO: re-enable this once the metrics deployer validation issue is fixed -# when using dynamically provisioned volumes -- name: "Wait for image pull and deployer pod" - shell: > - {{ openshift.common.client_binary }} - --namespace openshift-infra - --config={{ openshift_hosted_metrics_kubeconfig }} - get {{ deploy_metrics.stdout }} - register: deploy_result - until: "{{ 'Completed' in deploy_result.stdout }}" - failed_when: False - retries: 60 - delay: 10 - -- name: Configure master for metrics - modify_yaml: - dest: "{{ openshift.common.config_base }}/master/master-config.yaml" - yaml_key: assetConfig.metricsPublicURL - yaml_value: "{{ openshift_hosted_metrics_deploy_url }}" - notify: restart master diff --git a/roles/openshift_hosted_metrics/tasks/main.yaml b/roles/openshift_hosted_metrics/tasks/main.yaml deleted file mode 100644 index 5ce8aa92b..000000000 --- a/roles/openshift_hosted_metrics/tasks/main.yaml +++ /dev/null @@ -1,75 +0,0 @@ ---- -- name: Create temp directory for kubeconfig - command: mktemp -d /tmp/openshift-ansible-XXXXXX - register: mktemp - changed_when: False - -- name: Record kubeconfig tmp dir - set_fact: - openshift_hosted_metrics_kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig" - -- name: Copy the admin client config(s) - command: > - cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ openshift_hosted_metrics_kubeconfig }} - changed_when: False - -- name: Set hosted metrics facts - openshift_facts: - role: hosted - openshift_env: "{{ hostvars - | oo_merge_hostvars(vars, inventory_hostname) - | oo_openshift_env }}" - openshift_env_structures: - - 'openshift.hosted.metrics.*' - -- set_fact: - metrics_persistence: "{{ openshift.hosted.metrics.storage_kind | default(none) is not none }}" - metrics_dynamic_vol: "{{ openshift.hosted.metrics.storage_kind | default(none) == 'dynamic' }}" - metrics_template_dir: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/examples/infrastructure-templates/{{ 'origin' if deployment_type == 'origin' else 'enterprise' }}" - image_prefix: "{{ '-v IMAGE_PREFIX=' ~ openshift.hosted.metrics.deployer.prefix if 'prefix' in openshift.hosted.metrics.deployer else '' }}" - image_version: "{{ '-v IMAGE_VERSION=' ~ openshift.hosted.metrics.deployer.version if 'version' in openshift.hosted.metrics.deployer else '' }}" - - -- name: Check for existing metrics pods - shell: > - {{ openshift.common.client_binary }} - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace openshift-infra - get pods -l {{ item }} | grep -q Running - register: metrics_pods_status - with_items: - - metrics-infra=hawkular-metrics - - metrics-infra=heapster - - metrics-infra=hawkular-cassandra - failed_when: false - changed_when: false - -- name: Check for previous deployer - shell: > - {{ openshift.common.client_binary }} - --config={{ openshift_hosted_metrics_kubeconfig }} - --namespace openshift-infra - get pods -l metrics-infra=deployer --sort-by='{.metadata.creationTimestamp}' | tail -1 | grep metrics-deployer- - register: metrics_deployer_status - failed_when: false - changed_when: false - -- name: Record current deployment status - set_fact: - greenfield: "{{ not metrics_deployer_status.rc == 0 }}" - failed_error: "{{ True if 'Error' in metrics_deployer_status.stdout else False }}" - metrics_running: "{{ metrics_pods_status.results | oo_collect(attribute='rc') == [0,0,0] }}" - -- name: Set deployment mode - set_fact: - deployment_mode: "{{ 'refresh' if (failed_error | bool or metrics_upgrade | bool) else 'deploy' }}" - -# TODO: handle non greenfield deployments in the future -- include: install.yml - when: greenfield - -- name: Delete temp directory - file: - name: "{{ mktemp.stdout }}" - state: absent - changed_when: False diff --git a/roles/openshift_hosted_metrics/vars/main.yaml b/roles/openshift_hosted_metrics/vars/main.yaml deleted file mode 100644 index 6c207d6ac..000000000 --- a/roles/openshift_hosted_metrics/vars/main.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- -hawkular_permission_oc_commands: - - policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer -n openshift-infra - - policy add-cluster-role-to-user cluster-admin system:serviceaccount:openshift-infra:heapster - -metrics_deployer_sa: - apiVersion: v1 - kind: ServiceAccount - metadata: - name: metrics-deployer - secrets: - - name: metrics-deployer - - -hawkular_tmp_conf: /tmp/hawkular_admin.kubeconfig - -hawkular_persistence: "{% if openshift.hosted.metrics.storage.kind != None %}true{% else %}false{% endif %}" - -hawkular_type: "{{ 'origin' if deployment_type == 'origin' else 'enterprise' }}" - -metrics_upgrade: openshift.hosted.metrics.upgrade | default(False) diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml index 497c6e0c5..2f1aa061f 100644 --- a/roles/openshift_logging/defaults/main.yml +++ b/roles/openshift_logging/defaults/main.yml @@ -28,7 +28,7 @@ openshift_logging_curator_ops_memory_limit: 256Mi openshift_logging_curator_ops_cpu_request: 100m openshift_logging_curator_ops_nodeselector: {} -openshift_logging_kibana_hostname: "{{ 'kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}" +openshift_logging_kibana_hostname: "{{ 'kibana.' ~ openshift_master_default_subdomain }}" openshift_logging_kibana_cpu_limit: null openshift_logging_kibana_memory_limit: 736Mi openshift_logging_kibana_cpu_request: 100m @@ -54,7 +54,7 @@ openshift_logging_kibana_key: "" #for the public facing kibana certs openshift_logging_kibana_ca: "" -openshift_logging_kibana_ops_hostname: "{{ 'kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}" +openshift_logging_kibana_ops_hostname: "{{ 'kibana-ops.' ~ openshift_master_default_subdomain }}" openshift_logging_kibana_ops_cpu_limit: null openshift_logging_kibana_ops_memory_limit: 736Mi openshift_logging_kibana_ops_cpu_request: 100m @@ -109,7 +109,7 @@ openshift_logging_es_config: {} # for exposing es to external (outside of the cluster) clients openshift_logging_es_allow_external: False -openshift_logging_es_hostname: "{{ 'es.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}" +openshift_logging_es_hostname: "{{ 'es.' ~ openshift_master_default_subdomain }}" #The absolute path on the control node to the cert file to use #for the public facing es certs @@ -145,7 +145,7 @@ openshift_logging_es_ops_nodeselector: {} # for exposing es-ops to external (outside of the cluster) clients openshift_logging_es_ops_allow_external: False -openshift_logging_es_ops_hostname: "{{ 'es-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}" +openshift_logging_es_ops_hostname: "{{ 'es-ops.' ~ openshift_master_default_subdomain }}" #The absolute path on the control node to the cert file to use #for the public facing es-ops certs @@ -165,7 +165,7 @@ openshift_logging_storage_access_modes: ['ReadWriteOnce'] # mux - secure_forward listener service openshift_logging_mux_allow_external: False openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}" -openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}" +openshift_logging_mux_hostname: "{{ 'mux.' ~ openshift_master_default_subdomain}}" openshift_logging_mux_port: 24284 openshift_logging_mux_cpu_limit: null openshift_logging_mux_memory_limit: 512Mi diff --git a/roles/openshift_logging/handlers/main.yml b/roles/openshift_logging/handlers/main.yml index 074b72942..e0329ee7c 100644 --- a/roles/openshift_logging/handlers/main.yml +++ b/roles/openshift_logging/handlers/main.yml @@ -1,7 +1,7 @@ --- - name: restart master api systemd: name={{ openshift.common.service_type }}-master-api state=restarted - when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + when: (not (master_api_service_status_changed | default(false) | bool)) notify: Verify API Server # We retry the controllers because the API may not be 100% initialized yet. @@ -11,7 +11,7 @@ delay: 5 register: result until: result.rc == 0 - when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + when: (not (master_controllers_service_status_changed | default(false) | bool)) - name: Verify API Server # Using curl here since the uri module requires python-httplib2 and diff --git a/roles/openshift_logging_curator/meta/main.yaml b/roles/openshift_logging_curator/meta/main.yaml index 6752fb7f9..d4635aab0 100644 --- a/roles/openshift_logging_curator/meta/main.yaml +++ b/roles/openshift_logging_curator/meta/main.yaml @@ -13,3 +13,4 @@ galaxy_info: - cloud dependencies: - role: lib_openshift +- role: openshift_facts diff --git a/roles/openshift_logging_elasticsearch/meta/main.yaml b/roles/openshift_logging_elasticsearch/meta/main.yaml index 097270772..6a9a6539c 100644 --- a/roles/openshift_logging_elasticsearch/meta/main.yaml +++ b/roles/openshift_logging_elasticsearch/meta/main.yaml @@ -13,3 +13,4 @@ galaxy_info: - cloud dependencies: - role: lib_openshift +- role: openshift_facts diff --git a/roles/openshift_logging_fluentd/meta/main.yaml b/roles/openshift_logging_fluentd/meta/main.yaml index 2003aacb2..89c98204f 100644 --- a/roles/openshift_logging_fluentd/meta/main.yaml +++ b/roles/openshift_logging_fluentd/meta/main.yaml @@ -13,3 +13,4 @@ galaxy_info: - cloud dependencies: - role: lib_openshift +- role: openshift_facts diff --git a/roles/openshift_logging_kibana/defaults/main.yml b/roles/openshift_logging_kibana/defaults/main.yml index 6cdf7c8f3..007089e00 100644 --- a/roles/openshift_logging_kibana/defaults/main.yml +++ b/roles/openshift_logging_kibana/defaults/main.yml @@ -10,7 +10,7 @@ openshift_logging_kibana_cpu_limit: null openshift_logging_kibana_cpu_request: 100m openshift_logging_kibana_memory_limit: 736Mi -openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" +openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ openshift_master_default_subdomain }}" openshift_logging_kibana_es_host: "logging-es" openshift_logging_kibana_es_port: 9200 diff --git a/roles/openshift_logging_kibana/meta/main.yaml b/roles/openshift_logging_kibana/meta/main.yaml index 89e08abc0..d97586a37 100644 --- a/roles/openshift_logging_kibana/meta/main.yaml +++ b/roles/openshift_logging_kibana/meta/main.yaml @@ -13,3 +13,4 @@ galaxy_info: - cloud dependencies: - role: lib_openshift +- role: openshift_facts diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml index cd15da939..1e6c501bf 100644 --- a/roles/openshift_logging_mux/defaults/main.yml +++ b/roles/openshift_logging_mux/defaults/main.yml @@ -28,7 +28,7 @@ openshift_logging_mux_journal_read_from_head: "{{ openshift_hosted_logging_journ openshift_logging_mux_allow_external: False openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}" -openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}" +openshift_logging_mux_hostname: "{{ 'mux.' ~ openshift_master_default_subdomain }}" openshift_logging_mux_port: 24284 # the namespace to use for undefined projects should come first, followed by any # additional namespaces to create by default - users will typically not need to set this diff --git a/roles/openshift_logging_mux/meta/main.yaml b/roles/openshift_logging_mux/meta/main.yaml index f40beb79d..f271d8d7d 100644 --- a/roles/openshift_logging_mux/meta/main.yaml +++ b/roles/openshift_logging_mux/meta/main.yaml @@ -13,3 +13,4 @@ galaxy_info: - cloud dependencies: - role: lib_openshift +- role: openshift_facts diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml index 359536202..e6b8b8ac8 100644 --- a/roles/openshift_master/handlers/main.yml +++ b/roles/openshift_master/handlers/main.yml @@ -5,7 +5,6 @@ state: restarted when: - not (master_api_service_status_changed | default(false) | bool) - - openshift.master.cluster_method == 'native' notify: - Verify API Server @@ -18,7 +17,6 @@ until: result.rc == 0 when: - not (master_controllers_service_status_changed | default(false) | bool) - - openshift.master.cluster_method == 'native' - name: Verify API Server # Using curl here since the uri module requires python-httplib2 and diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml index a1cda2ad4..bf0cbbf18 100644 --- a/roles/openshift_master/meta/main.yml +++ b/roles/openshift_master/meta/main.yml @@ -15,3 +15,4 @@ dependencies: - role: lib_openshift - role: lib_utils - role: lib_os_firewall +- role: openshift_facts diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index e52cd6231..5f4e6df71 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -11,25 +11,6 @@ - openshift_master_oauth_grant_method is defined - openshift_master_oauth_grant_method not in openshift_master_valid_grant_methods -# HA Variable Validation -- fail: - msg: "openshift_master_cluster_method must be set to either 'native' or 'pacemaker' for multi-master installations" - when: - - openshift.master.ha | bool - - (openshift.master.cluster_method is not defined) or (openshift.master.cluster_method is defined and openshift.master.cluster_method not in ["native", "pacemaker"]) -- fail: - msg: "openshift_master_cluster_password must be set for multi-master installations" - when: - - openshift.master.ha | bool - - openshift.master.cluster_method == "pacemaker" - - openshift_master_cluster_password is not defined or not openshift_master_cluster_password -- fail: - msg: "Pacemaker based HA is not supported at this time when used with containerized installs" - when: - - openshift.master.ha | bool - - openshift.master.cluster_method == "pacemaker" - - openshift.common.is_containerized | bool - - name: Open up firewall ports import_tasks: firewall.yml @@ -226,7 +207,6 @@ enabled: yes state: started when: - - openshift.master.cluster_method == 'native' - inventory_hostname == openshift_master_hosts[0] register: l_start_result until: not l_start_result | failed @@ -241,14 +221,12 @@ - set_fact: master_api_service_status_changed: "{{ l_start_result | changed }}" when: - - openshift.master.cluster_method == 'native' - inventory_hostname == openshift_master_hosts[0] - pause: seconds: 15 when: - openshift.master.ha | bool - - openshift.master.cluster_method == 'native' - name: Start and enable master api all masters systemd: @@ -256,7 +234,6 @@ enabled: yes state: started when: - - openshift.master.cluster_method == 'native' - inventory_hostname != openshift_master_hosts[0] register: l_start_result until: not l_start_result | failed @@ -271,14 +248,12 @@ - set_fact: master_api_service_status_changed: "{{ l_start_result | changed }}" when: - - openshift.master.cluster_method == 'native' - inventory_hostname != openshift_master_hosts[0] # A separate wait is required here for native HA since notifies will # be resolved after all tasks in the role. - include_tasks: check_master_api_is_ready.yml when: - - openshift.master.cluster_method == 'native' - master_api_service_status_changed | bool - name: Start and enable master controller service @@ -286,8 +261,6 @@ name: "{{ openshift.common.service_type }}-master-controllers" enabled: yes state: started - when: - - openshift.master.cluster_method == 'native' register: l_start_result until: not l_start_result | failed retries: 1 @@ -301,30 +274,6 @@ - name: Set fact master_controllers_service_status_changed set_fact: master_controllers_service_status_changed: "{{ l_start_result | changed }}" - when: - - openshift.master.cluster_method == 'native' - -- name: Install cluster packages - package: name=pcs state=present - when: - - openshift.master.cluster_method == 'pacemaker' - - not openshift.common.is_containerized | bool - register: l_install_result - until: l_install_result | success - -- name: Start and enable cluster service - systemd: - name: pcsd - enabled: yes - state: started - when: - - openshift.master.cluster_method == 'pacemaker' - - not openshift.common.is_containerized | bool - -- name: Set the cluster user password - shell: echo {{ openshift_master_cluster_password | quote }} | passwd --stdin hacluster - when: - - l_install_result | changed - name: node bootstrap settings include_tasks: bootstrap.yml diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml index c95f562d0..ca04d2243 100644 --- a/roles/openshift_master/tasks/registry_auth.yml +++ b/roles/openshift_master/tasks/registry_auth.yml @@ -33,7 +33,7 @@ - openshift_docker_alternative_creds | default(False) | bool - oreg_auth_user is defined - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool - register: master_oreg_auth_credentials_create + register: master_oreg_auth_credentials_create_alt notify: - restart master api - restart master controllers @@ -45,4 +45,8 @@ when: - openshift.common.is_containerized | bool - oreg_auth_user is defined - - (master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or master_oreg_auth_credentials_create.changed) | bool + - > + (master_oreg_auth_credentials_stat.stat.exists + or oreg_auth_credentials_replace + or master_oreg_auth_credentials_create.changed + or master_oreg_auth_credentials_create_alt.changed) | bool diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml index 9d11ed574..ee76413e3 100644 --- a/roles/openshift_master/tasks/systemd_units.yml +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -25,7 +25,6 @@ state: absent ignore_errors: true when: - - openshift.master.cluster_method == "native" - not l_is_master_system_container | bool # This is the image used for both HA and non-HA clusters: @@ -43,7 +42,6 @@ src: "{{ ha_svc_template_path }}/atomic-openshift-master-{{ item }}.service.j2" dest: "{{ containerized_svc_dir }}/{{ openshift.common.service_type }}-master-{{ item }}.service" when: - - openshift.master.cluster_method == "native" - not l_is_master_system_container | bool with_items: - api @@ -63,22 +61,17 @@ - api - controllers when: - - openshift.master.cluster_method == "native" - not l_is_master_system_container | bool - name: Preserve Master API Proxy Config options command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-api register: l_master_api_proxy - when: - - openshift.master.cluster_method == "native" failed_when: false changed_when: false - name: Preserve Master API AWS options command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-api register: master_api_aws - when: - - openshift.master.cluster_method == "native" failed_when: false changed_when: false @@ -87,14 +80,11 @@ src: "{{ ha_svc_template_path }}/atomic-openshift-master-api.j2" dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api backup: true - when: - - openshift.master.cluster_method == "native" notify: - restart master api - name: Restore Master API Proxy Config Options when: - - openshift.master.cluster_method == "native" - l_master_api_proxy.rc == 0 - "'http_proxy' not in openshift.common" - "'https_proxy' not in openshift.common" @@ -105,7 +95,6 @@ - name: Restore Master API AWS Options when: - - openshift.master.cluster_method == "native" - master_api_aws.rc == 0 - not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined) lineinfile: @@ -117,16 +106,12 @@ - name: Preserve Master Controllers Proxy Config options command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers register: master_controllers_proxy - when: - - openshift.master.cluster_method == "native" failed_when: false changed_when: false - name: Preserve Master Controllers AWS options command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers register: master_controllers_aws - when: - - openshift.master.cluster_method == "native" failed_when: false changed_when: false @@ -135,8 +120,6 @@ src: "{{ ha_svc_template_path }}/atomic-openshift-master-controllers.j2" dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers backup: true - when: - - openshift.master.cluster_method == "native" notify: - restart master controllers @@ -146,7 +129,6 @@ line: "{{ item }}" with_items: "{{ master_controllers_proxy.stdout_lines | default([]) }}" when: - - openshift.master.cluster_method == "native" - master_controllers_proxy.rc == 0 - "'http_proxy' not in openshift.common" - "'https_proxy' not in openshift.common" @@ -157,6 +139,5 @@ line: "{{ item }}" with_items: "{{ master_controllers_aws.stdout_lines | default([]) }}" when: - - openshift.master.cluster_method == "native" - master_controllers_aws.rc == 0 - not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined) diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index a0f00e545..92668b227 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -120,7 +120,7 @@ kubernetesMasterConfig: - application/vnd.kubernetes.protobuf {% endif %} controllerArguments: {{ openshift.master.controller_args | default(None) | to_padded_yaml( level=2 ) }} - masterCount: {{ openshift.master.master_count if openshift.master.cluster_method | default(None) == 'native' else 1 }} + masterCount: {{ openshift.master.master_count }} masterIP: {{ openshift.common.ip }} podEvictionTimeout: {{ openshift.master.pod_eviction_timeout | default("") }} proxyClientInfo: @@ -204,7 +204,7 @@ projectConfig: mcsLabelsPerProject: {{ osm_mcs_labels_per_project }} uidAllocatorRange: "{{ osm_uid_allocator_range }}" routingConfig: - subdomain: "{{ openshift_master_default_subdomain | default("") }}" + subdomain: "{{ openshift_master_default_subdomain }}" serviceAccountConfig: limitSecretReferences: {{ openshift_master_saconfig_limitsecretreferences | default(false) }} managedNames: diff --git a/roles/openshift_master_cluster/README.md b/roles/openshift_master_cluster/README.md deleted file mode 100644 index 58dd19ac3..000000000 --- a/roles/openshift_master_cluster/README.md +++ /dev/null @@ -1,34 +0,0 @@ -OpenShift Master Cluster -======================== - -TODO - -Requirements ------------- - -* Ansible 2.2 - -Role Variables --------------- - -TODO - -Dependencies ------------- - -TODO - -Example Playbook ----------------- - -TODO - -License -------- - -Apache License Version 2.0 - -Author Information ------------------- - -Jason DeTiberus (jdetiber@redhat.com) diff --git a/roles/openshift_master_cluster/meta/main.yml b/roles/openshift_master_cluster/meta/main.yml deleted file mode 100644 index c452b165e..000000000 --- a/roles/openshift_master_cluster/meta/main.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -galaxy_info: - author: Jason DeTiberus - description: - company: Red Hat, Inc. - license: Apache License, Version 2.0 - min_ansible_version: 2.2 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud - - system -dependencies: [] diff --git a/roles/openshift_master_cluster/tasks/configure.yml b/roles/openshift_master_cluster/tasks/configure.yml deleted file mode 100644 index 1b94598dd..000000000 --- a/roles/openshift_master_cluster/tasks/configure.yml +++ /dev/null @@ -1,43 +0,0 @@ ---- -- fail: - msg: This role requires that openshift_master_cluster_vip is set - when: openshift_master_cluster_vip is not defined or not openshift_master_cluster_vip -- fail: - msg: This role requires that openshift_master_cluster_public_vip is set - when: openshift_master_cluster_public_vip is not defined or not openshift_master_cluster_public_vip - -- name: Authenticate to the cluster - command: pcs cluster auth -u hacluster -p {{ openshift_master_cluster_password }} {{ omc_cluster_hosts }} - -- name: Create the cluster - command: pcs cluster setup --name openshift_master {{ omc_cluster_hosts }} - -- name: Start the cluster - command: pcs cluster start --all - -- name: Enable the cluster on all nodes - command: pcs cluster enable --all - -- name: Set default resource stickiness - command: pcs resource defaults resource-stickiness=100 - -- name: Add the cluster VIP resource - command: pcs resource create virtual-ip IPaddr2 ip={{ openshift_master_cluster_vip }} --group {{ openshift.common.service_type }}-master - -- name: Add the cluster public VIP resource - command: pcs resource create virtual-ip IPaddr2 ip={{ openshift_master_cluster_public_vip }} --group {{ openshift.common.service_type }}-master - when: openshift_master_cluster_public_vip != openshift_master_cluster_vip - -- name: Add the cluster master service resource - command: pcs resource create master systemd:{{ openshift.common.service_type }}-master op start timeout=90s stop timeout=90s --group {{ openshift.common.service_type }}-master - -- name: Disable stonith - command: pcs property set stonith-enabled=false - -- name: Wait for the clustered master service to be available - wait_for: - host: "{{ openshift_master_cluster_vip }}" - port: "{{ openshift.master.api_port }}" - state: started - timeout: 180 - delay: 90 diff --git a/roles/openshift_master_cluster/tasks/main.yml b/roles/openshift_master_cluster/tasks/main.yml deleted file mode 100644 index 41bfc72cb..000000000 --- a/roles/openshift_master_cluster/tasks/main.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -- fail: - msg: "Not possible on atomic hosts for now" - when: openshift.common.is_containerized | bool - -- name: Test if cluster is already configured - command: pcs status - register: pcs_status - changed_when: false - failed_when: false - when: openshift.master.cluster_method == "pacemaker" - -- include_tasks: configure.yml - when: "pcs_status | failed and 'Error: cluster is not currently running on this node' in pcs_status.stderr" diff --git a/roles/openshift_master_facts/defaults/main.yml b/roles/openshift_master_facts/defaults/main.yml index d0dcdae4b..a89f48afa 100644 --- a/roles/openshift_master_facts/defaults/main.yml +++ b/roles/openshift_master_facts/defaults/main.yml @@ -1,5 +1,4 @@ --- -openshift_master_default_subdomain: "router.default.svc.cluster.local" openshift_master_admission_plugin_config: openshift.io/ImagePolicy: configuration: diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py index c827f2d26..ff15f693b 100644 --- a/roles/openshift_master_facts/filter_plugins/openshift_master.py +++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py @@ -485,31 +485,6 @@ class FilterModule(object): Dumper=AnsibleDumper)) @staticmethod - def validate_pcs_cluster(data, masters=None): - ''' Validates output from "pcs status", ensuring that each master - provided is online. - Ex: data = ('...', - 'PCSD Status:', - 'master1.example.com: Online', - 'master2.example.com: Online', - 'master3.example.com: Online', - '...') - masters = ['master1.example.com', - 'master2.example.com', - 'master3.example.com'] - returns True - ''' - if not issubclass(type(data), string_types): - raise errors.AnsibleFilterError("|failed expects data is a string or unicode") - if not issubclass(type(masters), list): - raise errors.AnsibleFilterError("|failed expects masters is a list") - valid = True - for master in masters: - if "{0}: Online".format(master) not in data: - valid = False - return valid - - @staticmethod def certificates_to_synchronize(hostvars, include_keys=True, include_ca=True): ''' Return certificates to synchronize based on facts. ''' if not issubclass(type(hostvars), dict): @@ -553,6 +528,5 @@ class FilterModule(object): def filters(self): ''' returns a mapping of filters to methods ''' return {"translate_idps": self.translate_idps, - "validate_pcs_cluster": self.validate_pcs_cluster, "certificates_to_synchronize": self.certificates_to_synchronize, "oo_htpasswd_users_from_file": self.oo_htpasswd_users_from_file} diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml index 20cc5358e..0cb87dcaa 100644 --- a/roles/openshift_master_facts/tasks/main.yml +++ b/roles/openshift_master_facts/tasks/main.yml @@ -1,14 +1,8 @@ --- -# Ensure the default sub-domain is set: -- name: Migrate legacy osm_default_subdomain fact - set_fact: - openshift_master_default_subdomain: "{{ osm_default_subdomain | default(None) }}" - when: openshift_master_default_subdomain is not defined - - name: Verify required variables are set fail: msg: openshift_master_default_subdomain must be set to deploy metrics - when: openshift_hosted_metrics_deploy | default(false) | bool and openshift_master_default_subdomain | default("") == "" + when: openshift_hosted_metrics_deploy | default(false) | bool and openshift_master_default_subdomain == "" # NOTE: These metrics variables are unfortunately needed by both the master and the metrics roles # to properly configure the master-config.yaml file. @@ -20,7 +14,7 @@ - name: Set g_metrics_hostname set_fact: g_metrics_hostname: "{{ openshift_hosted_metrics_public_url - | default('hawkular-metrics.' ~ (openshift_master_default_subdomain)) + | default('hawkular-metrics.' ~ openshift_master_default_subdomain) | oo_hostname_from_url }}" - set_fact: @@ -31,7 +25,6 @@ openshift_facts: role: master local_facts: - cluster_method: "{{ openshift_master_cluster_method | default('native') }}" cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}" cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}" api_port: "{{ openshift_master_api_port | default(None) }}" diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_metrics/handlers/main.yml index 074b72942..e0329ee7c 100644 --- a/roles/openshift_metrics/handlers/main.yml +++ b/roles/openshift_metrics/handlers/main.yml @@ -1,7 +1,7 @@ --- - name: restart master api systemd: name={{ openshift.common.service_type }}-master-api state=restarted - when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + when: (not (master_api_service_status_changed | default(false) | bool)) notify: Verify API Server # We retry the controllers because the API may not be 100% initialized yet. @@ -11,7 +11,7 @@ delay: 5 register: result until: result.rc == 0 - when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + when: (not (master_controllers_service_status_changed | default(false) | bool)) - name: Verify API Server # Using curl here since the uri module requires python-httplib2 and diff --git a/roles/openshift_node/tasks/registry_auth.yml b/roles/openshift_node/tasks/registry_auth.yml index f5428867a..ab43ec049 100644 --- a/roles/openshift_node/tasks/registry_auth.yml +++ b/roles/openshift_node/tasks/registry_auth.yml @@ -32,7 +32,7 @@ - openshift_docker_alternative_creds | bool - oreg_auth_user is defined - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool - register: node_oreg_auth_credentials_create + register: node_oreg_auth_credentials_create_alt notify: - restart node @@ -43,4 +43,8 @@ when: - openshift.common.is_containerized | bool - oreg_auth_user is defined - - (node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or node_oreg_auth_credentials_create.changed) | bool + - > + (node_oreg_auth_credentials_stat.stat.exists + or oreg_auth_credentials_replace + or node_oreg_auth_credentials_create.changed + or node_oreg_auth_credentials_create_alt.changed) | bool diff --git a/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py b/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py new file mode 100644 index 000000000..8046aff23 --- /dev/null +++ b/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py @@ -0,0 +1,159 @@ +""" +Ansible action plugin to generate pv and pvc dictionaries lists +""" + +from ansible.plugins.action import ActionBase +from ansible import errors + + +class ActionModule(ActionBase): + """Action plugin to execute health checks.""" + + def get_templated(self, var_to_template): + """Return a properly templated ansible variable""" + return self._templar.template(self.task_vars.get(var_to_template)) + + def build_common(self, varname=None): + """Retrieve common variables for each pv and pvc type""" + volume = self.get_templated(str(varname) + '_volume_name') + size = self.get_templated(str(varname) + '_volume_size') + labels = self.task_vars.get(str(varname) + '_labels') + if labels: + labels = self._templar.template(labels) + else: + labels = dict() + access_modes = self.get_templated(str(varname) + '_access_modes') + return (volume, size, labels, access_modes) + + def build_pv_nfs(self, varname=None): + """Build pv dictionary for nfs storage type""" + host = self.task_vars.get(str(varname) + '_host') + if host: + self._templar.template(host) + elif host is None: + groups = self.task_vars.get('groups') + default_group_name = self.get_templated('openshift_persistent_volumes_default_nfs_group') + if groups and default_group_name and default_group_name in groups and len(groups[default_group_name]) > 0: + host = groups['oo_nfs_to_config'][0] + else: + raise errors.AnsibleModuleError("|failed no storage host detected") + volume, size, labels, access_modes = self.build_common(varname=varname) + directory = self.get_templated(str(varname) + '_nfs_directory') + path = directory + '/' + volume + return dict( + name="{0}-volume".format(volume), + capacity=size, + labels=labels, + access_modes=access_modes, + storage=dict( + nfs=dict( + server=host, + path=path))) + + def build_pv_openstack(self, varname=None): + """Build pv dictionary for openstack storage type""" + volume, size, labels, access_modes = self.build_common(varname=varname) + filesystem = self.get_templated(str(varname) + '_openstack_filesystem') + volume_id = self.get_templated(str(varname) + '_openstack_volumeID') + return dict( + name="{0}-volume".format(volume), + capacity=size, + labels=labels, + access_modes=access_modes, + storage=dict( + cinder=dict( + fsType=filesystem, + volumeID=volume_id))) + + def build_pv_glusterfs(self, varname=None): + """Build pv dictionary for glusterfs storage type""" + volume, size, labels, access_modes = self.build_common(varname=varname) + endpoints = self.get_templated(str(varname) + '_glusterfs_endpoints') + path = self.get_templated(str(varname) + '_glusterfs_path') + read_only = self.get_templated(str(varname) + '_glusterfs_readOnly') + return dict( + name="{0}-volume".format(volume), + capacity=size, + labels=labels, + access_modes=access_modes, + storage=dict( + glusterfs=dict( + endpoints=endpoints, + path=path, + readOnly=read_only))) + + def build_pv_dict(self, varname=None): + """Check for the existence of PV variables""" + kind = self.task_vars.get(str(varname) + '_kind') + if kind: + kind = self._templar.template(kind) + create_pv = self.task_vars.get(str(varname) + '_create_pv') + if create_pv and self._templar.template(create_pv): + if kind == 'nfs': + persistent_volume = self.build_pv_nfs(varname=varname) + + elif kind == 'openstack': + persistent_volume = self.build_pv_openstack(varname=varname) + + elif kind == 'glusterfs': + persistent_volume = self.build_pv_glusterfs(varname=varname) + + elif not (kind == 'object' or kind == 'dynamic'): + msg = "|failed invalid storage kind '{0}' for component '{1}'".format( + kind, + varname) + raise errors.AnsibleModuleError(msg) + + return persistent_volume + return None + + def build_pvc_dict(self, varname=None): + """Check for the existence of PVC variables""" + kind = self.task_vars.get(str(varname) + '_kind') + if kind: + kind = self._templar.template(kind) + create_pv = self.task_vars.get(str(varname) + '_create_pv') + if create_pv: + create_pv = self._templar.template(create_pv) + create_pvc = self.task_vars.get(str(varname) + '_create_pvc') + if create_pvc: + create_pvc = self._templar.template(create_pvc) + if kind != 'object' and create_pv and create_pvc: + volume, size, _, access_modes = self.build_common(varname=varname) + return dict( + name="{0}-claim".format(volume), + capacity=size, + access_modes=access_modes) + return None + + def run(self, tmp=None, task_vars=None): + """Run generate_pv_pvcs_list action plugin""" + result = super(ActionModule, self).run(tmp, task_vars) + # Ignore settting self.task_vars outside of init. + # pylint: disable=W0201 + self.task_vars = task_vars or {} + + result["changed"] = False + result["failed"] = False + result["msg"] = "persistent_volumes list and persistent_volume_claims list created" + vars_to_check = ['openshift_hosted_registry_storage', + 'openshift_hosted_router_storage', + 'openshift_hosted_etcd_storage', + 'openshift_logging_storage', + 'openshift_loggingops_storage', + 'openshift_metrics_storage', + 'openshift_prometheus_storage', + 'openshift_prometheus_alertmanager_storage', + 'openshift_prometheus_alertbuffer_storage'] + persistent_volumes = [] + persistent_volume_claims = [] + for varname in vars_to_check: + pv_dict = self.build_pv_dict(varname) + if pv_dict: + persistent_volumes.append(pv_dict) + pvc_dict = self.build_pvc_dict(varname) + if pvc_dict: + persistent_volume_claims.append(pvc_dict) + result["persistent_volumes"] = persistent_volumes + result["persistent_volume_claims"] = persistent_volume_claims + return result diff --git a/roles/openshift_persistent_volumes/defaults/main.yml b/roles/openshift_persistent_volumes/defaults/main.yml new file mode 100644 index 000000000..b16e164e6 --- /dev/null +++ b/roles/openshift_persistent_volumes/defaults/main.yml @@ -0,0 +1,9 @@ +--- + +openshift_persistent_volumes_default_nfs_group: 'oo_nfs_to_config' + +openshift_persistent_volume_extras: [] +openshift_persistent_volume_claims_extras: [] + +glusterfs_pv: [] +glusterfs_pvc: [] diff --git a/roles/openshift_persistent_volumes/meta/main.yml b/roles/openshift_persistent_volumes/meta/main.yml index 19e9a56b7..48b0699ab 100644 --- a/roles/openshift_persistent_volumes/meta/main.yml +++ b/roles/openshift_persistent_volumes/meta/main.yml @@ -9,4 +9,5 @@ galaxy_info: - name: EL versions: - 7 -dependencies: {} +dependencies: +- role: openshift_facts diff --git a/roles/openshift_persistent_volumes/tasks/main.yml b/roles/openshift_persistent_volumes/tasks/main.yml index e431e978c..0b4dd7d1f 100644 --- a/roles/openshift_persistent_volumes/tasks/main.yml +++ b/roles/openshift_persistent_volumes/tasks/main.yml @@ -9,39 +9,36 @@ cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig changed_when: False -- name: Deploy PersistentVolume definitions - template: - dest: "{{ mktemp.stdout }}/persistent-volumes.yml" - src: persistent-volume.yml.j2 - when: persistent_volumes | length > 0 - changed_when: False +- set_fact: + glusterfs_pv: + - name: "{{ openshift_hosted_registry_storage_volume_name }}-glusterfs-volume" + capacity: "{{ openshift_hosted_registry_storage_volume_size }}" + access_modes: "{{ openshift_hosted_registry_storage_access_modes }}" + storage: + glusterfs: + endpoints: "{{ openshift_hosted_registry_storage_glusterfs_endpoints }}" + path: "{{ openshift_hosted_registry_storage_glusterfs_path }}" + readOnly: "{{ openshift_hosted_registry_storage_glusterfs_readOnly }}" + glusterfs_pvc: + - name: "{{ openshift_hosted_registry_storage_volume_name }}-glusterfs-claim" + capacity: "{{ openshift_hosted_registry_storage_volume_size }}" + access_modes: "{{ openshift_hosted_registry_storage_access_modes }}" + when: openshift_hosted_registry_storage_glusterfs_swap | default(False) -- name: Create PersistentVolumes - command: > - {{ openshift.common.client_binary }} create - -f {{ mktemp.stdout }}/persistent-volumes.yml - --config={{ mktemp.stdout }}/admin.kubeconfig - register: pv_create_output - when: persistent_volumes | length > 0 - failed_when: ('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout) - changed_when: ('created' in pv_create_output.stdout) +- name: create standard pv and pvc lists + # generate_pv_pvcs_list is a custom action module defined in ../action_plugins + generate_pv_pvcs_list: {} + register: l_pv_pvcs_list -- name: Deploy PersistentVolumeClaim definitions - template: - dest: "{{ mktemp.stdout }}/persistent-volume-claims.yml" - src: persistent-volume-claim.yml.j2 - when: persistent_volume_claims | length > 0 - changed_when: False +- include_tasks: pv.yml + vars: + l_extra_persistent_volumes: "{{ openshift_persistent_volume_extras | union(glusterfs_pv) }}" + persistent_volumes: "{{ l_pv_pvcs_list.persistent_volumes | union(l_extra_persistent_volumes) }}" -- name: Create PersistentVolumeClaims - command: > - {{ openshift.common.client_binary }} create - -f {{ mktemp.stdout }}/persistent-volume-claims.yml - --config={{ mktemp.stdout }}/admin.kubeconfig - register: pvc_create_output - when: persistent_volume_claims | length > 0 - failed_when: ('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout) - changed_when: ('created' in pvc_create_output.stdout) +- include_tasks: pvc.yml + vars: + l_extra_persistent_volume_claims: "{{ openshift_persistent_volume_claims_extras | union(glusterfs_pvc) }}" + persistent_volume_claims: "{{ l_pv_pvcs_list.persistent_volume_claims | union(l_extra_persistent_volume_claims) }}" - name: Delete temp directory file: diff --git a/roles/openshift_persistent_volumes/tasks/pv.yml b/roles/openshift_persistent_volumes/tasks/pv.yml new file mode 100644 index 000000000..346605ff7 --- /dev/null +++ b/roles/openshift_persistent_volumes/tasks/pv.yml @@ -0,0 +1,17 @@ +--- +- name: Deploy PersistentVolume definitions + template: + dest: "{{ mktemp.stdout }}/persistent-volumes.yml" + src: persistent-volume.yml.j2 + when: persistent_volumes | length > 0 + changed_when: False + +- name: Create PersistentVolumes + command: > + {{ openshift.common.client_binary }} create + -f {{ mktemp.stdout }}/persistent-volumes.yml + --config={{ mktemp.stdout }}/admin.kubeconfig + register: pv_create_output + when: persistent_volumes | length > 0 + failed_when: ('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout) + changed_when: ('created' in pv_create_output.stdout) diff --git a/roles/openshift_persistent_volumes/tasks/pvc.yml b/roles/openshift_persistent_volumes/tasks/pvc.yml new file mode 100644 index 000000000..e44f9b18f --- /dev/null +++ b/roles/openshift_persistent_volumes/tasks/pvc.yml @@ -0,0 +1,17 @@ +--- +- name: Deploy PersistentVolumeClaim definitions + template: + dest: "{{ mktemp.stdout }}/persistent-volume-claims.yml" + src: persistent-volume-claim.yml.j2 + when: persistent_volume_claims | length > 0 + changed_when: False + +- name: Create PersistentVolumeClaims + command: > + {{ openshift.common.client_binary }} create + -f {{ mktemp.stdout }}/persistent-volume-claims.yml + --config={{ mktemp.stdout }}/admin.kubeconfig + register: pvc_create_output + when: persistent_volume_claims | length > 0 + failed_when: ('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout) + changed_when: ('created' in pvc_create_output.stdout) diff --git a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 index ee9dac7cb..9ec14208b 100644 --- a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 +++ b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 @@ -17,5 +17,5 @@ items: capacity: storage: "{{ volume.capacity }}" accessModes: {{ volume.access_modes | to_padded_yaml(2, 2) }} - {{ volume.storage.keys()[0] }}: {{ volume.storage[volume.storage.keys()[0]] | to_padded_yaml(3, 2) }} + {{ (volume.storage.keys() | list)[0] }}: {{ volume.storage[(volume.storage.keys() | list)[0]] | to_padded_yaml(3, 2) }} {% endfor %} diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml index 814d6ff28..b7b3c0db2 100644 --- a/roles/openshift_storage_glusterfs/defaults/main.yml +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -45,7 +45,7 @@ openshift_storage_glusterfs_heketi_fstab: "{{ '/var/lib/heketi/fstab' | quote if openshift_storage_glusterfs_namespace: "{{ 'glusterfs' | quote if openshift_storage_glusterfs_is_native or openshift_storage_glusterfs_heketi_is_native else 'default' | quote }}" openshift_storage_glusterfs_registry_timeout: "{{ openshift_storage_glusterfs_timeout }}" -openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.namespace | default(openshift_storage_glusterfs_namespace) }}" +openshift_storage_glusterfs_registry_namespace: "{{ openshift_hosted_registry_namespace | default(openshift_storage_glusterfs_namespace) }}" openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}" openshift_storage_glusterfs_registry_name: 'registry' openshift_storage_glusterfs_registry_nodeselector: "glusterfs={{ openshift_storage_glusterfs_registry_name }}-host" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml index d3cba61cf..fa50e39a2 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml @@ -56,5 +56,5 @@ register: registry_volume - name: Create GlusterFS registry volume - command: "{{ glusterfs_heketi_client }} volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}" - when: "openshift.hosted.registry.storage.glusterfs.path not in registry_volume.stdout" + command: "{{ glusterfs_heketi_client }} volume create --size={{ openshift_hosted_registry_storage_volume_size | replace('Gi','') }} --name={{ openshift_hosted_registry_storage_glusterfs_path }}" + when: "openshift_hosted_registry_storage_glusterfs_path not in registry_volume.stdout" diff --git a/roles/openshift_storage_glusterfs/tasks/main.yml b/roles/openshift_storage_glusterfs/tasks/main.yml index d2d8c6c10..728f15a21 100644 --- a/roles/openshift_storage_glusterfs/tasks/main.yml +++ b/roles/openshift_storage_glusterfs/tasks/main.yml @@ -10,8 +10,10 @@ - groups.glusterfs | default([]) | count > 0 - include: glusterfs_registry.yml - when: - - "groups.glusterfs_registry | default([]) | count > 0 or openshift.hosted.registry.storage.kind == 'glusterfs' or openshift.hosted.registry.storage.glusterfs.swap" + when: > + groups.glusterfs_registry | default([]) | count > 0 + or (openshift_hosted_registry_storage_kind | default(none) == 'glusterfs') + or (openshift_hosted_registry_storage_glusterfs_swap | default(False)) - name: Delete temp directory file: diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml index c25cad74c..55e4024ec 100644 --- a/roles/openshift_storage_nfs/tasks/main.yml +++ b/roles/openshift_storage_nfs/tasks/main.yml @@ -20,25 +20,25 @@ - name: Ensure exports directory exists file: - path: "{{ openshift.hosted.registry.storage.nfs.directory }}" + path: "{{ openshift_hosted_registry_storage_nfs_directory }}" state: directory - name: Ensure export directories exist file: - path: "{{ item.storage.nfs.directory }}/{{ item.storage.volume.name }}" + path: "{{ item }}" state: directory mode: 0777 owner: nfsnobody group: nfsnobody with_items: - - "{{ openshift.hosted.registry }}" - - "{{ openshift.metrics }}" - - "{{ openshift.logging }}" - - "{{ openshift.loggingops }}" - - "{{ openshift.hosted.etcd }}" - - "{{ openshift.prometheus }}" - - "{{ openshift.prometheus.alertmanager }}" - - "{{ openshift.prometheus.alertbuffer }}" + - "{{ openshift_hosted_registry_storage_nfs_directory }}/{{ openshift_hosted_registry_storage_volume_name }}" + - "{{ openshift_metrics_storage_nfs_directory }}/{{ openshift_metrics_storage_volume_name }}" + - "{{ openshift_logging_storage_nfs_directory }}/{{ openshift_logging_storage_volume_name }}" + - "{{ openshift_loggingops_storage_nfs_directory }}/{{ openshift_loggingops_storage_volume_name }}" + - "{{ openshift_hosted_etcd_storage_nfs_directory }}/{{ openshift_hosted_etcd_storage_volume_name }}" + - "{{ openshift_prometheus_storage_nfs_directory }}/{{ openshift_prometheus_storage_volume_name }}" + - "{{ openshift_prometheus_alertmanager_storage_nfs_directory }}/{{ openshift_prometheus_alertmanager_storage_volume_name }}" + - "{{ openshift_prometheus_alertbuffer_storage_nfs_directory }}/{{ openshift_prometheus_alertbuffer_storage_volume_name }}" - name: Configure exports template: diff --git a/roles/openshift_storage_nfs/templates/exports.j2 b/roles/openshift_storage_nfs/templates/exports.j2 index c2a741035..2ec8db019 100644 --- a/roles/openshift_storage_nfs/templates/exports.j2 +++ b/roles/openshift_storage_nfs/templates/exports.j2 @@ -1,8 +1,8 @@ -{{ openshift.hosted.registry.storage.nfs.directory }}/{{ openshift.hosted.registry.storage.volume.name }} {{ openshift.hosted.registry.storage.nfs.options }} -{{ openshift.metrics.storage.nfs.directory }}/{{ openshift.metrics.storage.volume.name }} {{ openshift.metrics.storage.nfs.options }} -{{ openshift.logging.storage.nfs.directory }}/{{ openshift.logging.storage.volume.name }} {{ openshift.logging.storage.nfs.options }} -{{ openshift.loggingops.storage.nfs.directory }}/{{ openshift.loggingops.storage.volume.name }} {{ openshift.loggingops.storage.nfs.options }} -{{ openshift.hosted.etcd.storage.nfs.directory }}/{{ openshift.hosted.etcd.storage.volume.name }} {{ openshift.hosted.etcd.storage.nfs.options }} -{{ openshift.prometheus.storage.nfs.directory }}/{{ openshift.prometheus.storage.volume.name }} {{ openshift.prometheus.storage.nfs.options }} -{{ openshift.prometheus.alertmanager.storage.nfs.directory }}/{{ openshift.prometheus.alertmanager.storage.volume.name }} {{ openshift.prometheus.alertmanager.storage.nfs.options }} -{{ openshift.prometheus.alertbuffer.storage.nfs.directory }}/{{ openshift.prometheus.alertbuffer.storage.volume.name }} {{ openshift.prometheus.alertbuffer.storage.nfs.options }} +{{ openshift_hosted_registry_storage_nfs_directory }}/{{ openshift_hosted_registry_storage_volume_name }} {{ openshift_hosted_registry_storage_nfs_options }} +{{ openshift_metrics_storage_nfs_directory }}/{{ openshift_metrics_storage_volume_name }} {{ openshift_metrics_storage_nfs_options }} +{{ openshift_logging_storage_nfs_directory }}/{{ openshift_logging_storage_volume_name }} {{ openshift_logging_storage_nfs_options }} +{{ openshift_loggingops_storage_nfs_directory }}/{{ openshift_loggingops_storage_volume_name }} {{ openshift_loggingops_storage_nfs_options }} +{{ openshift_hosted_etcd_storage_nfs_directory }}/{{ openshift_hosted_etcd_storage_volume_name }} {{ openshift_hosted_etcd_storage_nfs_options }} +{{ openshift_prometheus_storage_nfs_directory }}/{{ openshift_prometheus_storage_volume_name }} {{ openshift_prometheus_storage_nfs_options }} +{{ openshift_prometheus_alertmanager_storage_nfs_directory }}/{{ openshift_prometheus_alertmanager_storage_volume_name }} {{ openshift_prometheus_alertmanager_storage_nfs_options }} +{{ openshift_prometheus_alertbuffer_storage_nfs_directory }}/{{ openshift_prometheus_alertbuffer_storage_volume_name }} {{ openshift_prometheus_alertbuffer_storage_nfs_options }} diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index 1e2af2c61..dda8eb4c6 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -125,7 +125,6 @@ def write_inventory_vars(base_inventory, lb): base_inventory.write('openshift_override_hostname_check=true\n') if lb is not None: - base_inventory.write('openshift_master_cluster_method=native\n') base_inventory.write("openshift_master_cluster_hostname={}\n".format(lb.hostname)) base_inventory.write( "openshift_master_cluster_public_hostname={}\n".format(lb.public_hostname)) @@ -266,7 +265,6 @@ def default_facts(hosts, verbose=False): facts_env = os.environ.copy() facts_env["OO_INSTALL_CALLBACK_FACTS_YAML"] = CFG.settings['ansible_callback_facts_yaml'] facts_env["ANSIBLE_CALLBACK_PLUGINS"] = CFG.settings['ansible_plugins_directory'] - facts_env["OPENSHIFT_MASTER_CLUSTER_METHOD"] = 'native' if 'ansible_log_path' in CFG.settings: facts_env["ANSIBLE_LOG_PATH"] = CFG.settings['ansible_log_path'] if 'ansible_config' in CFG.settings: |