diff options
author | Scott Dodson <sdodson@redhat.com> | 2017-10-18 07:47:40 -0400 |
---|---|---|
committer | GitHub <noreply@github.com> | 2017-10-18 07:47:40 -0400 |
commit | 5ff1e1bec64ea99830003a7f7d44de0ac1e65a57 (patch) | |
tree | 2b8d7ea06be949aadab22b7a09db35263ad7c28b | |
parent | e007bc8a9b397331f008586dcc3b2ee7d29978ad (diff) | |
parent | 418b742c365ef8dce4e14f9486ea658495029df3 (diff) | |
download | openshift-5ff1e1bec64ea99830003a7f7d44de0ac1e65a57.tar.gz openshift-5ff1e1bec64ea99830003a7f7d44de0ac1e65a57.tar.bz2 openshift-5ff1e1bec64ea99830003a7f7d44de0ac1e65a57.tar.xz openshift-5ff1e1bec64ea99830003a7f7d44de0ac1e65a57.zip |
Merge pull request #5459 from zgalor/nfs_fix
Fix prometheus role nfs
-rw-r--r-- | filter_plugins/oo_filters.py | 324 | ||||
-rw-r--r-- | inventory/byo/hosts.example | 65 | ||||
-rw-r--r-- | inventory/byo/hosts.origin.example | 900 | ||||
-rw-r--r-- | playbooks/common/openshift-cluster/create_persistent_volumes.yml | 9 | ||||
-rwxr-xr-x | roles/openshift_facts/library/openshift_facts.py | 77 | ||||
-rw-r--r-- | roles/openshift_hosted_facts/tasks/main.yml | 2 | ||||
-rw-r--r-- | roles/openshift_prometheus/defaults/main.yaml | 34 | ||||
-rw-r--r-- | roles/openshift_prometheus/files/openshift_prometheus.exports | 3 | ||||
-rw-r--r-- | roles/openshift_prometheus/tasks/create_pvs.yaml | 36 | ||||
-rw-r--r-- | roles/openshift_prometheus/tasks/install_prometheus.yaml | 9 | ||||
-rw-r--r-- | roles/openshift_prometheus/tasks/nfs.yaml | 44 | ||||
-rw-r--r-- | roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j2 | 15 | ||||
-rw-r--r-- | roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j2 | 15 | ||||
-rw-r--r-- | roles/openshift_prometheus/templates/prom-pv-server.yml.j2 | 15 | ||||
-rw-r--r-- | roles/openshift_storage_nfs/tasks/main.yml | 3 | ||||
-rw-r--r-- | roles/openshift_storage_nfs/templates/exports.j2 | 3 |
16 files changed, 1201 insertions, 353 deletions
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index 83a05370a..2fbd23450 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -710,8 +710,8 @@ def oo_openshift_env(hostvars): return facts -# pylint: disable=too-many-branches, too-many-nested-blocks, too-many-statements -def oo_component_persistent_volumes(hostvars, groups, component): +# pylint: disable=too-many-branches, too-many-nested-blocks, too-many-statements, too-many-locals +def oo_component_persistent_volumes(hostvars, groups, component, subcomponent=None): """ Generate list of persistent volumes based on oo_openshift_env storage options set in host variables for a specific component. """ @@ -723,84 +723,90 @@ def oo_component_persistent_volumes(hostvars, groups, component): persistent_volume = None if component in hostvars['openshift']: - if 'storage' in hostvars['openshift'][component]: - params = hostvars['openshift'][component]['storage'] + if subcomponent is not None: + storage_component = hostvars['openshift'][component][subcomponent] + else: + storage_component = hostvars['openshift'][component] + + if 'storage' in storage_component: + params = storage_component['storage'] kind = params['kind'] - create_pv = params['create_pv'] - if kind is not None and create_pv: - if kind == 'nfs': - host = params['host'] - if host is None: - if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0: - host = groups['oo_nfs_to_config'][0] + if 'create_pv' in params: + create_pv = params['create_pv'] + if kind is not None and create_pv: + if kind == 'nfs': + host = params['host'] + if host is None: + if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0: + host = groups['oo_nfs_to_config'][0] + else: + raise errors.AnsibleFilterError("|failed no storage host detected") + directory = params['nfs']['directory'] + volume = params['volume']['name'] + path = directory + '/' + volume + size = params['volume']['size'] + if 'labels' in params: + labels = params['labels'] else: - raise errors.AnsibleFilterError("|failed no storage host detected") - directory = params['nfs']['directory'] - volume = params['volume']['name'] - path = directory + '/' + volume - size = params['volume']['size'] - if 'labels' in params: - labels = params['labels'] - else: - labels = dict() - access_modes = params['access']['modes'] - persistent_volume = dict( - name="{0}-volume".format(volume), - capacity=size, - labels=labels, - access_modes=access_modes, - storage=dict( - nfs=dict( - server=host, - path=path))) - - elif kind == 'openstack': - volume = params['volume']['name'] - size = params['volume']['size'] - if 'labels' in params: - labels = params['labels'] - else: - labels = dict() - access_modes = params['access']['modes'] - filesystem = params['openstack']['filesystem'] - volume_id = params['openstack']['volumeID'] - persistent_volume = dict( - name="{0}-volume".format(volume), - capacity=size, - labels=labels, - access_modes=access_modes, - storage=dict( - cinder=dict( - fsType=filesystem, - volumeID=volume_id))) - - elif kind == 'glusterfs': - volume = params['volume']['name'] - size = params['volume']['size'] - if 'labels' in params: - labels = params['labels'] - else: - labels = dict() - access_modes = params['access']['modes'] - endpoints = params['glusterfs']['endpoints'] - path = params['glusterfs']['path'] - read_only = params['glusterfs']['readOnly'] - persistent_volume = dict( - name="{0}-volume".format(volume), - capacity=size, - labels=labels, - access_modes=access_modes, - storage=dict( - glusterfs=dict( - endpoints=endpoints, - path=path, - readOnly=read_only))) - - elif not (kind == 'object' or kind == 'dynamic'): - msg = "|failed invalid storage kind '{0}' for component '{1}'".format( - kind, - component) - raise errors.AnsibleFilterError(msg) + labels = dict() + access_modes = params['access']['modes'] + persistent_volume = dict( + name="{0}-volume".format(volume), + capacity=size, + labels=labels, + access_modes=access_modes, + storage=dict( + nfs=dict( + server=host, + path=path))) + + elif kind == 'openstack': + volume = params['volume']['name'] + size = params['volume']['size'] + if 'labels' in params: + labels = params['labels'] + else: + labels = dict() + access_modes = params['access']['modes'] + filesystem = params['openstack']['filesystem'] + volume_id = params['openstack']['volumeID'] + persistent_volume = dict( + name="{0}-volume".format(volume), + capacity=size, + labels=labels, + access_modes=access_modes, + storage=dict( + cinder=dict( + fsType=filesystem, + volumeID=volume_id))) + + elif kind == 'glusterfs': + volume = params['volume']['name'] + size = params['volume']['size'] + if 'labels' in params: + labels = params['labels'] + else: + labels = dict() + access_modes = params['access']['modes'] + endpoints = params['glusterfs']['endpoints'] + path = params['glusterfs']['path'] + read_only = params['glusterfs']['readOnly'] + persistent_volume = dict( + name="{0}-volume".format(volume), + capacity=size, + labels=labels, + access_modes=access_modes, + storage=dict( + glusterfs=dict( + endpoints=endpoints, + path=path, + readOnly=read_only))) + + elif not (kind == 'object' or kind == 'dynamic'): + msg = "|failed invalid storage kind '{0}' for component '{1}'".format( + kind, + component) + raise errors.AnsibleFilterError(msg) return persistent_volume @@ -820,85 +826,10 @@ def oo_persistent_volumes(hostvars, groups, persistent_volumes=None): persistent_volumes = [] if 'hosted' in hostvars['openshift']: for component in hostvars['openshift']['hosted']: - if 'storage' in hostvars['openshift']['hosted'][component]: - params = hostvars['openshift']['hosted'][component]['storage'] - kind = params['kind'] - if 'create_pv' in params: - create_pv = params['create_pv'] - if kind is not None and create_pv: - if kind == 'nfs': - host = params['host'] - if host is None: - if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0: - host = groups['oo_nfs_to_config'][0] - else: - raise errors.AnsibleFilterError("|failed no storage host detected") - directory = params['nfs']['directory'] - volume = params['volume']['name'] - path = directory + '/' + volume - size = params['volume']['size'] - if 'labels' in params: - labels = params['labels'] - else: - labels = dict() - access_modes = params['access']['modes'] - persistent_volume = dict( - name="{0}-volume".format(volume), - capacity=size, - labels=labels, - access_modes=access_modes, - storage=dict( - nfs=dict( - server=host, - path=path))) - persistent_volumes.append(persistent_volume) - elif kind == 'openstack': - volume = params['volume']['name'] - size = params['volume']['size'] - if 'labels' in params: - labels = params['labels'] - else: - labels = dict() - access_modes = params['access']['modes'] - filesystem = params['openstack']['filesystem'] - volume_id = params['openstack']['volumeID'] - persistent_volume = dict( - name="{0}-volume".format(volume), - capacity=size, - labels=labels, - access_modes=access_modes, - storage=dict( - cinder=dict( - fsType=filesystem, - volumeID=volume_id))) - persistent_volumes.append(persistent_volume) - elif kind == 'glusterfs': - volume = params['volume']['name'] - size = params['volume']['size'] - if 'labels' in params: - labels = params['labels'] - else: - labels = dict() - access_modes = params['access']['modes'] - endpoints = params['glusterfs']['endpoints'] - path = params['glusterfs']['path'] - read_only = params['glusterfs']['readOnly'] - persistent_volume = dict( - name="{0}-volume".format(volume), - capacity=size, - labels=labels, - access_modes=access_modes, - storage=dict( - glusterfs=dict( - endpoints=endpoints, - path=path, - readOnly=read_only))) - persistent_volumes.append(persistent_volume) - elif not (kind == 'object' or kind == 'dynamic'): - msg = "|failed invalid storage kind '{0}' for component '{1}'".format( - kind, - component) - raise errors.AnsibleFilterError(msg) + persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'hosted', component) + if persistent_volume is not None: + persistent_volumes.append(persistent_volume) + if 'logging' in hostvars['openshift']: persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'logging') if persistent_volume is not None: @@ -911,10 +842,22 @@ def oo_persistent_volumes(hostvars, groups, persistent_volumes=None): persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'metrics') if persistent_volume is not None: persistent_volumes.append(persistent_volume) + if 'prometheus' in hostvars['openshift']: + persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'prometheus') + if persistent_volume is not None: + persistent_volumes.append(persistent_volume) + if 'alertmanager' in hostvars['openshift']['prometheus']: + persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'prometheus', 'alertmanager') + if persistent_volume is not None: + persistent_volumes.append(persistent_volume) + if 'alertbuffer' in hostvars['openshift']['prometheus']: + persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'prometheus', 'alertbuffer') + if persistent_volume is not None: + persistent_volumes.append(persistent_volume) return persistent_volumes -def oo_component_pv_claims(hostvars, component): +def oo_component_pv_claims(hostvars, component, subcomponent=None): """ Generate list of persistent volume claims based on oo_openshift_env storage options set in host variables for a speicific component. """ @@ -922,20 +865,27 @@ def oo_component_pv_claims(hostvars, component): raise errors.AnsibleFilterError("|failed expects hostvars is a dict") if component in hostvars['openshift']: - if 'storage' in hostvars['openshift'][component]: - params = hostvars['openshift'][component]['storage'] + if subcomponent is not None: + storage_component = hostvars['openshift'][component][subcomponent] + else: + storage_component = hostvars['openshift'][component] + + if 'storage' in storage_component: + params = storage_component['storage'] kind = params['kind'] - create_pv = params['create_pv'] - create_pvc = params['create_pvc'] - if kind not in [None, 'object'] and create_pv and create_pvc: - volume = params['volume']['name'] - size = params['volume']['size'] - access_modes = params['access']['modes'] - persistent_volume_claim = dict( - name="{0}-claim".format(volume), - capacity=size, - access_modes=access_modes) - return persistent_volume_claim + if 'create_pv' in params: + if 'create_pvc' in params: + create_pv = params['create_pv'] + create_pvc = params['create_pvc'] + if kind not in [None, 'object'] and create_pv and create_pvc: + volume = params['volume']['name'] + size = params['volume']['size'] + access_modes = params['access']['modes'] + persistent_volume_claim = dict( + name="{0}-claim".format(volume), + capacity=size, + access_modes=access_modes) + return persistent_volume_claim return None @@ -952,22 +902,10 @@ def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None): persistent_volume_claims = [] if 'hosted' in hostvars['openshift']: for component in hostvars['openshift']['hosted']: - if 'storage' in hostvars['openshift']['hosted'][component]: - params = hostvars['openshift']['hosted'][component]['storage'] - kind = params['kind'] - if 'create_pv' in params: - if 'create_pvc' in params: - create_pv = params['create_pv'] - create_pvc = params['create_pvc'] - if kind not in [None, 'object'] and create_pv and create_pvc: - volume = params['volume']['name'] - size = params['volume']['size'] - access_modes = params['access']['modes'] - persistent_volume_claim = dict( - name="{0}-claim".format(volume), - capacity=size, - access_modes=access_modes) - persistent_volume_claims.append(persistent_volume_claim) + persistent_volume_claim = oo_component_pv_claims(hostvars, 'hosted', component) + if persistent_volume_claim is not None: + persistent_volume_claims.append(persistent_volume_claim) + if 'logging' in hostvars['openshift']: persistent_volume_claim = oo_component_pv_claims(hostvars, 'logging') if persistent_volume_claim is not None: @@ -980,6 +918,18 @@ def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None): persistent_volume_claim = oo_component_pv_claims(hostvars, 'metrics') if persistent_volume_claim is not None: persistent_volume_claims.append(persistent_volume_claim) + if 'prometheus' in hostvars['openshift']: + persistent_volume_claim = oo_component_pv_claims(hostvars, 'prometheus') + if persistent_volume_claim is not None: + persistent_volume_claims.append(persistent_volume_claim) + if 'alertmanager' in hostvars['openshift']['prometheus']: + persistent_volume_claim = oo_component_pv_claims(hostvars, 'prometheus', 'alertmanager') + if persistent_volume_claim is not None: + persistent_volume_claims.append(persistent_volume_claim) + if 'alertbuffer' in hostvars['openshift']['prometheus']: + persistent_volume_claim = oo_component_pv_claims(hostvars, 'prometheus', 'alertbuffer') + if persistent_volume_claim is not None: + persistent_volume_claims.append(persistent_volume_claim) return persistent_volume_claims diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example index 436135bcf..499a9d8e7 100644 --- a/inventory/byo/hosts.example +++ b/inventory/byo/hosts.example @@ -615,6 +615,71 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_logging_image_prefix=registry.access.redhat.com/openshift3/ #openshift_logging_image_version=3.7.0 +# Prometheus deployment +# +# Currently prometheus deployment is disabled by default, enable it by setting this +#openshift_hosted_prometheus_deploy=true +# +# Prometheus storage config +# Option A - NFS Host Group +# An NFS volume will be created with path "nfs_directory/volume_name" +# on the host within the [nfs] host group. For example, the volume +# path using these options would be "/exports/prometheus" +#openshift_prometheus_storage_kind=nfs +#openshift_prometheus_storage_access_modes=['ReadWriteOnce'] +#openshift_prometheus_storage_nfs_directory=/exports +#openshift_prometheus_storage_nfs_options='*(rw,root_squash)' +#openshift_prometheus_storage_volume_name=prometheus +#openshift_prometheus_storage_volume_size=10Gi +#openshift_prometheus_storage_labels={'storage': 'prometheus'} +# For prometheus-alertmanager +#openshift_prometheus_alertmanager_storage_kind=nfs +#openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce'] +#openshift_prometheus_alertmanager_storage_nfs_directory=/exports +#openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)' +#openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager +#openshift_prometheus_alertmanager_storage_volume_size=10Gi +#openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'} +# For prometheus-alertbuffer +#openshift_prometheus_alertbuffer_storage_kind=nfs +#openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce'] +#openshift_prometheus_alertbuffer_storage_nfs_directory=/exports +#openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)' +#openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer +#openshift_prometheus_alertbuffer_storage_volume_size=10Gi +#openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'} +# +# Option B - External NFS Host +# NFS volume must already exist with path "nfs_directory/_volume_name" on +# the storage_host. For example, the remote volume path using these +# options would be "nfs.example.com:/exports/prometheus" +#openshift_prometheus_storage_kind=nfs +#openshift_prometheus_storage_access_modes=['ReadWriteOnce'] +#openshift_prometheus_storage_host=nfs.example.com +#openshift_prometheus_storage_nfs_directory=/exports +#openshift_prometheus_storage_volume_name=prometheus +#openshift_prometheus_storage_volume_size=10Gi +#openshift_prometheus_storage_labels={'storage': 'prometheus'} +# For prometheus-alertmanager +#openshift_prometheus_alertmanager_storage_kind=nfs +#openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce'] +#openshift_prometheus_alertmanager_storage_host=nfs.example.com +#openshift_prometheus_alertmanager_storage_nfs_directory=/exports +#openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager +#openshift_prometheus_alertmanager_storage_volume_size=10Gi +#openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'} +# For prometheus-alertbuffer +#openshift_prometheus_alertbuffer_storage_kind=nfs +#openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce'] +#openshift_prometheus_alertbuffer_storage_host=nfs.example.com +#openshift_prometheus_alertbuffer_storage_nfs_directory=/exports +#openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer +#openshift_prometheus_alertbuffer_storage_volume_size=10Gi +#openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'} +# +# Option C - none -- Prometheus, alertmanager and alertbuffer will use emptydir volumes +# which are destroyed when pods are deleted + # Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') # os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example new file mode 100644 index 000000000..9d811fcab --- /dev/null +++ b/inventory/byo/hosts.origin.example @@ -0,0 +1,900 @@ +# This is an example of a bring your own (byo) host inventory + +# Create an OSEv3 group that contains the masters and nodes groups +[OSEv3:children] +masters +nodes +etcd +lb +nfs + +# Set variables common for all OSEv3 hosts +[OSEv3:vars] +# Enable unsupported configurations, things that will yield a partially +# functioning cluster but would not be supported for production use +#openshift_enable_unsupported_configurations=false + +# SSH user, this user should allow ssh based auth without requiring a +# password. If using ssh key based auth, then the key should be managed by an +# ssh agent. +ansible_ssh_user=root + +# If ansible_ssh_user is not root, ansible_become must be set to true and the +# user must be configured for passwordless sudo +#ansible_become=yes + +# Debug level for all OpenShift components (Defaults to 2) +debug_level=2 + +# Specify the deployment type. Valid values are origin and openshift-enterprise. +openshift_deployment_type=origin + +# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we +# rely on the version running on the first master. Works best for containerized installs where we can usually +# use this to lookup the latest exact version of the container images, which is the tag actually used to configure +# the cluster. For RPM installations we just verify the version detected in your configured repos matches this +# release. +openshift_release=v3.7 + +# Specify an exact container image tag to install or configure. +# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed. +# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up. +#openshift_image_tag=v3.7.0 + +# Specify an exact rpm version to install or configure. +# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed. +# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up. +#openshift_pkg_version=-3.7.0 + +# This enables all the system containers except for docker: +#openshift_use_system_containers=False +# +# But you can choose separately each component that must be a +# system container: +# +#openshift_use_openvswitch_system_container=False +#openshift_use_node_system_container=False +#openshift_use_master_system_container=False +#openshift_use_etcd_system_container=False +# +# In either case, system_images_registry must be specified to be able to find the system images +#system_images_registry="docker.io" + +# Install the openshift examples +#openshift_install_examples=true + +# Configure logoutURL in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url +#openshift_master_logout_url=http://example.com + +# Configure extensionScripts in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets +#openshift_master_extension_scripts=['/path/to/script1.js','/path/to/script2.js'] + +# Configure extensionStylesheets in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets +#openshift_master_extension_stylesheets=['/path/to/stylesheet1.css','/path/to/stylesheet2.css'] + +# Configure extensions in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files +#openshift_master_extensions=[{'name': 'images', 'sourceDirectory': '/path/to/my_images'}] + +# Configure extensions in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files +#openshift_master_oauth_template=/path/to/login-template.html + +# Configure imagePolicyConfig in the master config +# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig +#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true} + +# Configure master API rate limits for external clients +#openshift_master_external_ratelimit_qps=200 +#openshift_master_external_ratelimit_burst=400 +# Configure master API rate limits for loopback clients +#openshift_master_loopback_ratelimit_qps=300 +#openshift_master_loopback_ratelimit_burst=600 + +# Docker Configuration +# Add additional, insecure, and blocked registries to global docker configuration +# For enterprise deployment types we ensure that registry.access.redhat.com is +# included if you do not include it +#openshift_docker_additional_registries=registry.example.com +#openshift_docker_insecure_registries=registry.example.com +#openshift_docker_blocked_registries=registry.hacker.com +# Disable pushing to dockerhub +#openshift_docker_disable_push_dockerhub=True +# Use Docker inside a System Container. Note that this is a tech preview and should +# not be used to upgrade! +# The following options for docker are ignored: +# - docker_version +# - docker_upgrade +# The following options must not be used +# - openshift_docker_options +#openshift_docker_use_system_container=False +# Instead of using docker, replacec it with cri-o +# NOTE: This uses openshift_docker_systemcontainer_image_registry_override as it's override +# just as container-engine does. +#openshift_use_crio=False +# Force the registry to use for the docker/crio system container. By default the registry +# will be built off of the deployment type and ansible_distribution. Only +# use this option if you are sure you know what you are doing! +#openshift_docker_systemcontainer_image_override="registry.example.com/container-engine:latest" +#openshift_crio_systemcontainer_image_override="registry.example.com/cri-o:latest" +# Items added, as is, to end of /etc/sysconfig/docker OPTIONS +# Default value: "--log-driver=journald" +#openshift_docker_options="-l warn --ipv6=false" + +# Specify exact version of Docker to configure or upgrade to. +# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10. +# docker_version="1.12.1" + +# Specify whether to run Docker daemon with SELinux enabled in containers. Default is True. +# Uncomment below to disable; for example if your kernel does not support the +# Docker overlay/overlay2 storage drivers with SELinux enabled. +#openshift_docker_selinux_enabled=False + +# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone. +# docker_upgrade=False + +# Specify exact version of etcd to configure or upgrade to. +# etcd_version="3.1.0" +# Enable etcd debug logging, defaults to false +# etcd_debug=true +# Set etcd log levels by package +# etcd_log_package_levels="etcdserver=WARNING,security=DEBUG" + +# Upgrade Hooks +# +# Hooks are available to run custom tasks at various points during a cluster +# upgrade. Each hook should point to a file with Ansible tasks defined. Suggest using +# absolute paths, if not the path will be treated as relative to the file where the +# hook is actually used. +# +# Tasks to run before each master is upgraded. +# openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml +# +# Tasks to run to upgrade the master. These tasks run after the main openshift-ansible +# upgrade steps, but before we restart system/services. +# openshift_master_upgrade_hook=/usr/share/custom/master.yml +# +# Tasks to run after each master is upgraded and system/services have been restarted. +# openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml + + +# Alternate image format string, useful if you've got your own registry mirror +# Configure this setting just on node or master +#oreg_url_master=example.com/openshift3/ose-${component}:${version} +#oreg_url_node=example.com/openshift3/ose-${component}:${version} +# For setting the configuration globally +#oreg_url=example.com/openshift3/ose-${component}:${version} +# If oreg_url points to a registry other than registry.access.redhat.com we can +# modify image streams to point at that registry by setting the following to true +#openshift_examples_modify_imagestreams=true + +# OpenShift repository configuration +#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}] +#openshift_repos_enable_testing=false + +# htpasswd auth +openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}] +# Defining htpasswd users +#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'} +# or +#openshift_master_htpasswd_file=<path to local pre-generated htpasswd file> + +# Allow all auth +#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}] + +# LDAP auth +#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': 'my-ldap-ca.crt', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}] +# +# Configure LDAP CA certificate +# Specify either the ASCII contents of the certificate or the path to +# the local file that will be copied to the remote host. CA +# certificate contents will be copied to master systems and saved +# within /etc/origin/master/ with a filename matching the "ca" key set +# within the LDAPPasswordIdentityProvider. +# +#openshift_master_ldap_ca=<ca text> +# or +#openshift_master_ldap_ca_file=<path to local ca file to use> + +# OpenID auth +#openshift_master_identity_providers=[{"name": "openid_auth", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "my_client_id", "client_secret": "my_client_secret", "claims": {"id": ["sub"], "preferredUsername": ["preferred_username"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://myidp.example.com/oauth2/authorize", "token": "https://myidp.example.com/oauth2/token"}, "ca": "my-openid-ca-bundle.crt"}] +# +# Configure OpenID CA certificate +# Specify either the ASCII contents of the certificate or the path to +# the local file that will be copied to the remote host. CA +# certificate contents will be copied to master systems and saved +# within /etc/origin/master/ with a filename matching the "ca" key set +# within the OpenIDIdentityProvider. +# +#openshift_master_openid_ca=<ca text> +# or +#openshift_master_openid_ca_file=<path to local ca file to use> + +# Request header auth +#openshift_master_identity_providers=[{"name": "my_request_header_provider", "challenge": "true", "login": "true", "kind": "RequestHeaderIdentityProvider", "challengeURL": "https://www.example.com/challenging-proxy/oauth/authorize?${query}", "loginURL": "https://www.example.com/login-proxy/oauth/authorize?${query}", "clientCA": "my-request-header-ca.crt", "clientCommonNames": ["my-auth-proxy"], "headers": ["X-Remote-User", "SSO-User"], "emailHeaders": ["X-Remote-User-Email"], "nameHeaders": ["X-Remote-User-Display-Name"], "preferredUsernameHeaders": ["X-Remote-User-Login"]}] +# +# Configure request header CA certificate +# Specify either the ASCII contents of the certificate or the path to +# the local file that will be copied to the remote host. CA +# certificate contents will be copied to master systems and saved +# within /etc/origin/master/ with a filename matching the "clientCA" +# key set within the RequestHeaderIdentityProvider. +# +#openshift_master_request_header_ca=<ca text> +# or +#openshift_master_request_header_ca_file=<path to local ca file to use> + +# CloudForms Management Engine (ManageIQ) App Install +# +# Enables installation of MIQ server. Recommended for dedicated +# clusters only. See roles/openshift_cfme/README.md for instructions +# and requirements. +#openshift_cfme_install_app=False + +# Cloud Provider Configuration +# +# Note: You may make use of environment variables rather than store +# sensitive configuration within the ansible inventory. +# For example: +#openshift_cloudprovider_aws_access_key="{{ lookup('env','AWS_ACCESS_KEY_ID') }}" +#openshift_cloudprovider_aws_secret_key="{{ lookup('env','AWS_SECRET_ACCESS_KEY') }}" +# +# AWS +#openshift_cloudprovider_kind=aws +# Note: IAM profiles may be used instead of storing API credentials on disk. +#openshift_cloudprovider_aws_access_key=aws_access_key_id +#openshift_cloudprovider_aws_secret_key=aws_secret_access_key +# +# Openstack +#openshift_cloudprovider_kind=openstack +#openshift_cloudprovider_openstack_auth_url=http://openstack.example.com:35357/v2.0/ +#openshift_cloudprovider_openstack_username=username +#openshift_cloudprovider_openstack_password=password +#openshift_cloudprovider_openstack_domain_id=domain_id +#openshift_cloudprovider_openstack_domain_name=domain_name +#openshift_cloudprovider_openstack_tenant_id=tenant_id +#openshift_cloudprovider_openstack_tenant_name=tenant_name +#openshift_cloudprovider_openstack_region=region +#openshift_cloudprovider_openstack_lb_subnet_id=subnet_id +# +# GCE +#openshift_cloudprovider_kind=gce + +# Project Configuration +#osm_project_request_message='' +#osm_project_request_template='' +#osm_mcs_allocator_range='s0:/2' +#osm_mcs_labels_per_project=5 +#osm_uid_allocator_range='1000000000-1999999999/10000' + +# Configure additional projects +#openshift_additional_projects={'my-project': {'default_node_selector': 'label=value'}} + +# Enable cockpit +#osm_use_cockpit=true +# +# Set cockpit plugins +#osm_cockpit_plugins=['cockpit-kubernetes'] + +# Native high availability cluster method with optional load balancer. +# If no lb group is defined, the installer assumes that a load balancer has +# been preconfigured. For installation the value of +# openshift_master_cluster_hostname must resolve to the load balancer +# or to one or all of the masters defined in the inventory if no load +# balancer is present. +#openshift_master_cluster_method=native +#openshift_master_cluster_hostname=openshift-ansible.test.example.com +#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com + +# Pacemaker high availability cluster method. +# Pacemaker HA environment must be able to self provision the +# configured VIP. For installation openshift_master_cluster_hostname +# must resolve to the configured VIP. +#openshift_master_cluster_method=pacemaker +#openshift_master_cluster_password=openshift_cluster +#openshift_master_cluster_vip=192.168.133.25 +#openshift_master_cluster_public_vip=192.168.133.25 +#openshift_master_cluster_hostname=openshift-ansible.test.example.com +#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com + +# Override the default controller lease ttl +#osm_controller_lease_ttl=30 + +# Configure controller arguments +#osm_controller_args={'resource-quota-sync-period': ['10s']} + +# Configure api server arguments +#osm_api_server_args={'max-requests-inflight': ['400']} + +# default subdomain to use for exposed routes +#openshift_master_default_subdomain=apps.test.example.com + +# additional cors origins +#osm_custom_cors_origins=['foo.example.com', 'bar.example.com'] + +# default project node selector +#osm_default_node_selector='region=primary' + +# Override the default pod eviction timeout +#openshift_master_pod_eviction_timeout=5m + +# Override the default oauth tokenConfig settings: +# openshift_master_access_token_max_seconds=86400 +# openshift_master_auth_token_max_seconds=500 + +# Override master servingInfo.maxRequestsInFlight +#openshift_master_max_requests_inflight=500 + +# Override master and node servingInfo.minTLSVersion and .cipherSuites +# valid TLS versions are VersionTLS10, VersionTLS11, VersionTLS12 +# example cipher suites override, valid cipher suites are https://golang.org/pkg/crypto/tls/#pkg-constants +#openshift_master_min_tls_version=VersionTLS12 +#openshift_master_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...'] +# +#openshift_node_min_tls_version=VersionTLS12 +#openshift_node_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...'] + +# default storage plugin dependencies to install, by default the ceph and +# glusterfs plugin dependencies will be installed, if available. +#osn_storage_plugin_deps=['ceph','glusterfs','iscsi'] + +# OpenShift Router Options +# +# An OpenShift router will be created during install if there are +# nodes present with labels matching the default router selector, +# "region=infra". Set openshift_node_labels per node as needed in +# order to label nodes. +# +# Example: +# [nodes] +# node.example.com openshift_node_labels="{'region': 'infra'}" +# +# Router selector (optional) +# Router will only be created if nodes matching this label are present. +# Default value: 'region=infra' +#openshift_hosted_router_selector='region=infra' +# +# Router replicas (optional) +# Unless specified, openshift-ansible will calculate the replica count +# based on the number of nodes matching the openshift router selector. +#openshift_hosted_router_replicas=2 +# +# Router force subdomain (optional) +# A router path format to force on all routes used by this router +# (will ignore the route host value) +#openshift_hosted_router_force_subdomain='${name}-${namespace}.apps.example.com' +# +# Router certificate (optional) +# Provide local certificate paths which will be configured as the +# router's default certificate. +#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"} +# +# Manage the OpenShift Router +#openshift_hosted_manage_router=true +# +# Router sharding support has been added and can be achieved by supplying the correct +# data to the inventory. The variable to house the data is openshift_hosted_routers +# and is in the form of a list. If no data is passed then a default router will be +# created. There are multiple combinations of router sharding. The one described +# below supports routers on separate nodes. +# +#openshift_hosted_routers=[{'name': 'router1', 'certificate': {'certfile': '/path/to/certificate/abc.crt', 'keyfile': '/path/to/certificate/abc.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router1', 'ports': ['80:80', '443:443']}, {'name': 'router2', 'certificate': {'certfile': '/path/to/certificate/xyz.crt', 'keyfile': '/path/to/certificate/xyz.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [{'action': 'append', 'key': 'spec.template.spec.containers[0].env', 'value': {'name': 'ROUTE_LABELS', 'value': 'route=external'}}], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router2', 'ports': ['80:80', '443:443']}] + +# OpenShift Registry Console Options +# Override the console image prefix for enterprise deployments, not used in origin +# default is "registry.access.redhat.com/openshift3/" and the image appended is "registry-console" +#openshift_cockpit_deployer_prefix=registry.example.com/myrepo/ +# Override image version, defaults to latest for origin, matches the product version for enterprise +#openshift_cockpit_deployer_version=1.4.1 + +# Openshift Registry Options +# +# An OpenShift registry will be created during install if there are +# nodes present with labels matching the default registry selector, +# "region=infra". Set openshift_node_labels per node as needed in +# order to label nodes. +# +# Example: +# [nodes] +# node.example.com openshift_node_labels="{'region': 'infra'}" +# +# Registry selector (optional) +# Registry will only be created if nodes matching this label are present. +# Default value: 'region=infra' +#openshift_hosted_registry_selector='region=infra' +# +# Registry replicas (optional) +# Unless specified, openshift-ansible will calculate the replica count +# based on the number of nodes matching the openshift registry selector. +#openshift_hosted_registry_replicas=2 +# +# Validity of the auto-generated certificate in days (optional) +#openshift_hosted_registry_cert_expire_days=730 +# +# Manage the OpenShift Registry +#openshift_hosted_manage_registry=true + +# Registry Storage Options +# +# NFS Host Group +# An NFS volume will be created with path "nfs_directory/volume_name" +# on the host within the [nfs] host group. For example, the volume +# path using these options would be "/exports/registry" +#openshift_hosted_registry_storage_kind=nfs +#openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] +#openshift_hosted_registry_storage_nfs_directory=/exports +#openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)' +#openshift_hosted_registry_storage_volume_name=registry +#openshift_hosted_registry_storage_volume_size=10Gi +# +# External NFS Host +# NFS volume must already exist with path "nfs_directory/_volume_name" on +# the storage_host. For example, the remote volume path using these +# options would be "nfs.example.com:/exports/registry" +#openshift_hosted_registry_storage_kind=nfs +#openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] +#openshift_hosted_registry_storage_host=nfs.example.com +#openshift_hosted_registry_storage_nfs_directory=/exports +#openshift_hosted_registry_storage_volume_name=registry +#openshift_hosted_registry_storage_volume_size=10Gi +# +# Openstack +# Volume must already exist. +#openshift_hosted_registry_storage_kind=openstack +#openshift_hosted_registry_storage_access_modes=['ReadWriteOnce'] +#openshift_hosted_registry_storage_openstack_filesystem=ext4 +#openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57 +#openshift_hosted_registry_storage_volume_size=10Gi +# +# AWS S3 +# S3 bucket must already exist. +#openshift_hosted_registry_storage_kind=object +#openshift_hosted_registry_storage_provider=s3 +#openshift_hosted_registry_storage_s3_encrypt=false +#openshift_hosted_registry_storage_s3_kmskeyid=aws_kms_key_id +#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id +#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key +#openshift_hosted_registry_storage_s3_bucket=bucket_name +#openshift_hosted_registry_storage_s3_region=bucket_region +#openshift_hosted_registry_storage_s3_chunksize=26214400 +#openshift_hosted_registry_storage_s3_rootdirectory=/registry +#openshift_hosted_registry_pullthrough=true +#openshift_hosted_registry_acceptschema2=true +#openshift_hosted_registry_enforcequota=true +# +# Any S3 service (Minio, ExoScale, ...): Basically the same as above +# but with regionendpoint configured +# S3 bucket must already exist. +#openshift_hosted_registry_storage_kind=object +#openshift_hosted_registry_storage_provider=s3 +#openshift_hosted_registry_storage_s3_accesskey=access_key_id +#openshift_hosted_registry_storage_s3_secretkey=secret_access_key +#openshift_hosted_registry_storage_s3_regionendpoint=https://myendpoint.example.com/ +#openshift_hosted_registry_storage_s3_bucket=bucket_name +#openshift_hosted_registry_storage_s3_region=bucket_region +#openshift_hosted_registry_storage_s3_chunksize=26214400 +#openshift_hosted_registry_storage_s3_rootdirectory=/registry +#openshift_hosted_registry_pullthrough=true +#openshift_hosted_registry_acceptschema2=true +#openshift_hosted_registry_enforcequota=true +# +# Additional CloudFront Options. When using CloudFront all three +# of the followingg variables must be defined. +#openshift_hosted_registry_storage_s3_cloudfront_baseurl=https://myendpoint.cloudfront.net/ +#openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile=/full/path/to/secret.pem +#openshift_hosted_registry_storage_s3_cloudfront_keypairid=yourpairid + +# Metrics deployment +# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html +# +# By default metrics are not automatically deployed, set this to enable them +#openshift_metrics_install_metrics=true +# +# Storage Options +# If openshift_metrics_storage_kind is unset then metrics will be stored +# in an EmptyDir volume and will be deleted when the cassandra pod terminates. +# Storage options A & B currently support only one cassandra pod which is +# generally enough for up to 1000 pods. Additional volumes can be created +# manually after the fact and metrics scaled per the docs. +# +# Option A - NFS Host Group +# An NFS volume will be created with path "nfs_directory/volume_name" +# on the host within the [nfs] host group. For example, the volume +# path using these options would be "/exports/metrics" +#openshift_metrics_storage_kind=nfs +#openshift_metrics_storage_access_modes=['ReadWriteOnce'] +#openshift_metrics_storage_nfs_directory=/exports +#openshift_metrics_storage_nfs_options='*(rw,root_squash)' +#openshift_metrics_storage_volume_name=metrics +#openshift_metrics_storage_volume_size=10Gi +#openshift_metrics_storage_labels={'storage': 'metrics'} +# +# Option B - External NFS Host +# NFS volume must already exist with path "nfs_directory/_volume_name" on +# the storage_host. For example, the remote volume path using these +# options would be "nfs.example.com:/exports/metrics" +#openshift_metrics_storage_kind=nfs +#openshift_metrics_storage_access_modes=['ReadWriteOnce'] +#openshift_metrics_storage_host=nfs.example.com +#openshift_metrics_storage_nfs_directory=/exports +#openshift_metrics_storage_volume_name=metrics +#openshift_metrics_storage_volume_size=10Gi +#openshift_metrics_storage_labels={'storage': 'metrics'} +# +# Option C - Dynamic -- If openshift supports dynamic volume provisioning for +# your cloud platform use this. +#openshift_metrics_storage_kind=dynamic +# +# Other Metrics Options -- Common items you may wish to reconfigure, for the complete +# list of options please see roles/openshift_metrics/README.md +# +# Override metricsPublicURL in the master config for cluster metrics +# Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics +# Currently, you may only alter the hostname portion of the url, alterting the +# `/hawkular/metrics` path will break installation of metrics. +#openshift_metrics_hawkular_hostname=https://hawkular-metrics.example.com/hawkular/metrics +# Configure the prefix and version for the component images +#openshift_metrics_image_prefix=docker.io/openshift/origin- +#openshift_metrics_image_version=v3.7.0 +# +# StorageClass +# openshift_storageclass_name=gp2 +# openshift_storageclass_parameters={'type': 'gp2', 'encrypted': 'false'} +# + +# Logging deployment +# +# Currently logging deployment is disabled by default, enable it by setting this +#openshift_logging_install_logging=true +# +# Logging storage config +# Option A - NFS Host Group +# An NFS volume will be created with path "nfs_directory/volume_name" +# on the host within the [nfs] host group. For example, the volume +# path using these options would be "/exports/logging" +#openshift_logging_storage_kind=nfs +#openshift_logging_storage_access_modes=['ReadWriteOnce'] +#openshift_logging_storage_nfs_directory=/exports +#openshift_logging_storage_nfs_options='*(rw,root_squash)' +#openshift_logging_storage_volume_name=logging +#openshift_logging_storage_volume_size=10Gi +#openshift_logging_storage_labels={'storage': 'logging'} +# +# Option B - External NFS Host +# NFS volume must already exist with path "nfs_directory/_volume_name" on +# the storage_host. For example, the remote volume path using these +# options would be "nfs.example.com:/exports/logging" +#openshift_logging_storage_kind=nfs +#openshift_logging_storage_access_modes=['ReadWriteOnce'] +#openshift_logging_storage_host=nfs.example.com +#openshift_logging_storage_nfs_directory=/exports +#openshift_logging_storage_volume_name=logging +#openshift_logging_storage_volume_size=10Gi +#openshift_logging_storage_labels={'storage': 'logging'} +# +# Option C - Dynamic -- If openshift supports dynamic volume provisioning for +# your cloud platform use this. +#openshift_logging_storage_kind=dynamic +# +# Option D - none -- Logging will use emptydir volumes which are destroyed when +# pods are deleted +# +# Other Logging Options -- Common items you may wish to reconfigure, for the complete +# list of options please see roles/openshift_logging/README.md +# +# Configure loggingPublicURL in the master config for aggregate logging, defaults +# to kibana.{{ openshift_master_default_subdomain }} +#openshift_logging_kibana_hostname=logging.apps.example.com +# Configure the number of elastic search nodes, unless you're using dynamic provisioning +# this value must be 1 +#openshift_logging_es_cluster_size=1 +# Configure the prefix and version for the component images +#openshift_logging_image_prefix=docker.io/openshift/origin- +#openshift_logging_image_version=v3.7.0 + +# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') +# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' + +# Disable the OpenShift SDN plugin +# openshift_use_openshift_sdn=False + +# Configure SDN cluster network and kubernetes service CIDR blocks. These +# network blocks should be private and should not conflict with network blocks +# in your infrastructure that pods may require access to. Can not be changed +# after deployment. +# +# WARNING : Do not pick subnets that overlap with the default Docker bridge subnet of +# 172.17.0.0/16. Your installation will fail and/or your configuration change will +# cause the Pod SDN or Cluster SDN to fail. +# +# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting +# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS +# environment variable located in /etc/sysconfig/docker-network. +# When upgrading or scaling up the following must match whats in your master config! +# Inventory: master yaml field +# osm_cluster_network_cidr: clusterNetworkCIDR +# openshift_portal_net: serviceNetworkCIDR +# When installing osm_cluster_network_cidr and openshift_portal_net must be set. +# Sane examples are provided below. +#osm_cluster_network_cidr=10.128.0.0/14 +#openshift_portal_net=172.30.0.0/16 + +# ExternalIPNetworkCIDRs controls what values are acceptable for the +# service external IP field. If empty, no externalIP may be set. It +# may contain a list of CIDRs which are checked for access. If a CIDR +# is prefixed with !, IPs in that CIDR will be rejected. Rejections +# will be applied first, then the IP checked against one of the +# allowed CIDRs. You should ensure this range does not overlap with +# your nodes, pods, or service CIDRs for security reasons. +#openshift_master_external_ip_network_cidrs=['0.0.0.0/0'] + +# IngressIPNetworkCIDR controls the range to assign ingress IPs from for +# services of type LoadBalancer on bare metal. If empty, ingress IPs will not +# be assigned. It may contain a single CIDR that will be allocated from. For +# security reasons, you should ensure that this range does not overlap with +# the CIDRs reserved for external IPs, nodes, pods, or services. +#openshift_master_ingress_ip_network_cidr=172.46.0.0/16 + +# Configure number of bits to allocate to each host’s subnet e.g. 9 +# would mean a /23 network on the host. +# When upgrading or scaling up the following must match whats in your master config! +# Inventory: master yaml field +# osm_host_subnet_length: hostSubnetLength +# When installing osm_host_subnet_length must be set. A sane example is provided below. +#osm_host_subnet_length=9 + +# Configure master API and console ports. +#openshift_master_api_port=8443 +#openshift_master_console_port=8443 + +# set RPM version for debugging purposes +#openshift_pkg_version=-1.1 + +# Configure custom ca certificate +#openshift_master_ca_certificate={'certfile': '/path/to/ca.crt', 'keyfile': '/path/to/ca.key'} +# +# NOTE: CA certificate will not be replaced with existing clusters. +# This option may only be specified when creating a new cluster or +# when redeploying cluster certificates with the redeploy-certificates +# playbook. + +# Configure custom named certificates (SNI certificates) +# +# https://docs.openshift.org/latest/install_config/certificate_customization.html +# +# NOTE: openshift_master_named_certificates is cached on masters and is an +# additive fact, meaning that each run with a different set of certificates +# will add the newly provided certificates to the cached set of certificates. +# +# An optional CA may be specified for each named certificate. CAs will +# be added to the OpenShift CA bundle which allows for the named +# certificate to be served for internal cluster communication. +# +# If you would like openshift_master_named_certificates to be overwritten with +# the provided value, specify openshift_master_overwrite_named_certificates. +#openshift_master_overwrite_named_certificates=true +# +# Provide local certificate paths which will be deployed to masters +#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "cafile": "/path/to/custom-ca1.crt"}] +# +# Detected names may be overridden by specifying the "names" key +#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"], "cafile": "/path/to/custom-ca1.crt"}] + +# Session options +#openshift_master_session_name=ssn +#openshift_master_session_max_seconds=3600 + +# An authentication and encryption secret will be generated if secrets +# are not provided. If provided, openshift_master_session_auth_secrets +# and openshift_master_encryption_secrets must be equal length. +# +# Signing secrets, used to authenticate sessions using +# HMAC. Recommended to use secrets with 32 or 64 bytes. +#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] +# +# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32 +# characters long, to select AES-128, AES-192, or AES-256. +#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] + +# configure how often node iptables rules are refreshed +#openshift_node_iptables_sync_period=5s + +# Configure nodeIP in the node config +# This is needed in cases where node traffic is desired to go over an +# interface other than the default network interface. +#openshift_set_node_ip=True + +# Force setting of system hostname when configuring OpenShift +# This works around issues related to installations that do not have valid dns +# entries for the interfaces attached to the host. +#openshift_set_hostname=True + +# Configure dnsIP in the node config +#openshift_dns_ip=172.30.0.1 + +# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later. +#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['80']} + +# Configure logrotate scripts +# See: https://github.com/nickhammond/ansible-logrotate +#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}] + +# openshift-ansible will wait indefinitely for your input when it detects that the +# value of openshift_hostname resolves to an IP address not bound to any local +# interfaces. This mis-configuration is problematic for any pod leveraging host +# networking and liveness or readiness probes. +# Setting this variable to true will override that check. +#openshift_override_hostname_check=true + +# openshift_use_dnsmasq is deprecated. This must be true, or installs will fail +# in versions >= 3.6 +#openshift_use_dnsmasq=False + +# Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf +# This is useful for POC environments where DNS may not actually be available yet or to set +# options like 'strict-order' to alter dnsmasq configuration. +#openshift_node_dnsmasq_additional_config_file=/home/bob/ose-dnsmasq.conf + +# Global Proxy Configuration +# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment +# variables for docker and master services. +# +# Hosts in the openshift_no_proxy list will NOT use any globally +# configured HTTP(S)_PROXYs. openshift_no_proxy accepts domains +# (.example.com), and hosts (example.com), and IP addresses. +#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT +#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT +#openshift_no_proxy='.hosts.example.com,some-host.com' +# +# Most environments don't require a proxy between openshift masters, nodes, and +# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list. +# If all of your hosts share a common domain you may wish to disable this and +# specify that domain above instead. +# +# For example, having hosts with FQDNs: m1.ex.com, n1.ex.com, and +# n2.ex.com, one would simply add '.ex.com' to the openshift_no_proxy +# variable (above) and set this value to False +#openshift_generate_no_proxy_hosts=True +# +# These options configure the BuildDefaults admission controller which injects +# configuration into Builds. Proxy related values will default to the global proxy +# config values. You only need to set these if they differ from the global proxy settings. +# See BuildDefaults documentation at +# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html +#openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT +#openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT +#openshift_builddefaults_no_proxy=mycorp.com +#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT +#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT +#openshift_builddefaults_git_no_proxy=mycorp.com +#openshift_builddefaults_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}] +#openshift_builddefaults_nodeselectors={'nodelabel1':'nodelabelvalue1'} +#openshift_builddefaults_annotations={'annotationkey1':'annotationvalue1'} +#openshift_builddefaults_resources_requests_cpu=100m +#openshift_builddefaults_resources_requests_memory=256Mi +#openshift_builddefaults_resources_limits_cpu=1000m +#openshift_builddefaults_resources_limits_memory=512Mi + +# Or you may optionally define your own build defaults configuration serialized as json +#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","gitNoProxy":"ose3-master.example.com","kind":"BuildDefaultsConfig"}}}' + +# These options configure the BuildOverrides admission controller which injects +# configuration into Builds. +# See BuildOverrides documentation at +# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html +#openshift_buildoverrides_force_pull=true +#openshift_buildoverrides_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}] +#openshift_buildoverrides_nodeselectors={'nodelabel1':'nodelabelvalue1'} +#openshift_buildoverrides_annotations={'annotationkey1':'annotationvalue1'} + +# Or you may optionally define your own build overrides configuration serialized as json +#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}' + +# Enable template service broker by specifying one of more namespaces whose +# templates will be served by the broker +#openshift_template_service_broker_namespaces=['openshift'] + +# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default +#openshift_master_dynamic_provisioning_enabled=False + +# Admission plugin config +#openshift_master_admission_plugin_config={"ProjectRequestLimit":{"configuration":{"apiVersion":"v1","kind":"ProjectRequestLimitConfig","limits":[{"selector":{"admin":"true"}},{"maxProjects":"1"}]}},"PodNodeConstraints":{"configuration":{"apiVersion":"v1","kind":"PodNodeConstraintsConfig"}}} + +# Configure usage of openshift_clock role. +#openshift_clock_enabled=true + +# OpenShift Per-Service Environment Variables +# Environment variables are added to /etc/sysconfig files for +# each OpenShift service: node, master (api and controllers). +# API and controllers environment variables are merged in single +# master environments. +#openshift_master_api_env_vars={"ENABLE_HTTP2": "true"} +#openshift_master_controllers_env_vars={"ENABLE_HTTP2": "true"} +#openshift_node_env_vars={"ENABLE_HTTP2": "true"} + +# Enable API service auditing, available as of 1.3 +#openshift_master_audit_config={"enabled": true} +# +# In case you want more advanced setup for the auditlog you can +# use this line. +# The directory in "auditFilePath" will be created if it's not +# exist +#openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5} + +# Enable origin repos that point at Centos PAAS SIG, defaults to true, only used +# by deployment_type=origin +#openshift_enable_origin_repo=false + +# Validity of the auto-generated OpenShift certificates in days. +# See also openshift_hosted_registry_cert_expire_days above. +# +#openshift_ca_cert_expire_days=1825 +#openshift_node_cert_expire_days=730 +#openshift_master_cert_expire_days=730 + +# Validity of the auto-generated external etcd certificates in days. +# Controls validity for etcd CA, peer, server and client certificates. +# +#etcd_ca_default_days=1825 +# +# ServiceAccountConfig:LimitSecretRefences rejects pods that reference secrets their service accounts do not reference +# openshift_master_saconfig_limitsecretreferences=false + +# Upgrade Control +# +# By default nodes are upgraded in a serial manner one at a time and all failures +# are fatal, one set of variables for normal nodes, one set of variables for +# nodes that are part of control plane as the number of hosts may be different +# in those two groups. +#openshift_upgrade_nodes_serial=1 +#openshift_upgrade_nodes_max_fail_percentage=0 +#openshift_upgrade_control_plane_nodes_serial=1 +#openshift_upgrade_control_plane_nodes_max_fail_percentage=0 +# +# You can specify the number of nodes to upgrade at once. We do not currently +# attempt to verify that you have capacity to drain this many nodes at once +# so please be careful when specifying these values. You should also verify that +# the expected number of nodes are all schedulable and ready before starting an +# upgrade. If it's not possible to drain the requested nodes the upgrade will +# stall indefinitely until the drain is successful. +# +# If you're upgrading more than one node at a time you can specify the maximum +# percentage of failure within the batch before the upgrade is aborted. Any +# nodes that do fail are ignored for the rest of the playbook run and you should +# take care to investigate the failure and return the node to service so that +# your cluster. +# +# The percentage must exceed the value, this would fail on two failures +# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49 +# where as this would not +# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50 +# +# Multiple data migrations take place and if they fail they will fail the upgrade +# You may wish to disable these or make them non fatal +# +# openshift_upgrade_pre_storage_migration_enabled=true +# openshift_upgrade_pre_storage_migration_fatal==true +# openshift_upgrade_post_storage_migration_enabled=true +# openshift_upgrade_post_storage_migration_fatal==false + +# host group for masters +[masters] +ose3-master[1:3]-ansible.test.example.com + +[etcd] +ose3-etcd[1:3]-ansible.test.example.com + +# NOTE: Containerized load balancer hosts are not yet supported, if using a global +# containerized=true host variable we must set to false. +[lb] +ose3-lb-ansible.test.example.com containerized=false + +# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes +# However, in order to ensure that your masters are not burdened with running pods you should +# make them unschedulable by adding openshift_schedulable=False any node that's also a master. +[nodes] +ose3-master[1:3]-ansible.test.example.com +ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}" diff --git a/playbooks/common/openshift-cluster/create_persistent_volumes.yml b/playbooks/common/openshift-cluster/create_persistent_volumes.yml index 8a60a30b8..ec6f2c52c 100644 --- a/playbooks/common/openshift-cluster/create_persistent_volumes.yml +++ b/playbooks/common/openshift-cluster/create_persistent_volumes.yml @@ -1,4 +1,13 @@ --- +- name: Create persistent volumes + hosts: oo_first_master + vars: + persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}" + persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}" + tasks: + - debug: var=persistent_volumes + - debug: var=persistent_volume_claims + - name: Create Hosted Resources - persistent volumes hosts: oo_first_master vars: diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index ba1d8f29d..33028fea4 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -498,6 +498,20 @@ def set_selectors(facts): facts['hosted']['etcd'] = {} if 'selector' not in facts['hosted']['etcd'] or facts['hosted']['etcd']['selector'] in [None, 'None']: facts['hosted']['etcd']['selector'] = None + if 'prometheus' not in facts: + facts['prometheus'] = {} + if 'selector' not in facts['prometheus'] or facts['prometheus']['selector'] in [None, 'None']: + facts['prometheus']['selector'] = None + if 'alertmanager' not in facts['prometheus']: + facts['prometheus']['alertmanager'] = {} + # pylint: disable=line-too-long + if 'selector' not in facts['prometheus']['alertmanager'] or facts['prometheus']['alertmanager']['selector'] in [None, 'None']: + facts['prometheus']['alertmanager']['selector'] = None + if 'alertbuffer' not in facts['prometheus']: + facts['prometheus']['alertbuffer'] = {} + # pylint: disable=line-too-long + if 'selector' not in facts['prometheus']['alertbuffer'] or facts['prometheus']['alertbuffer']['selector'] in [None, 'None']: + facts['prometheus']['alertbuffer']['selector'] = None return facts @@ -1779,7 +1793,8 @@ class OpenShiftFacts(object): 'node', 'logging', 'loggingops', - 'metrics'] + 'metrics', + 'prometheus'] # Disabling too-many-arguments, this should be cleaned up as a TODO item. # pylint: disable=too-many-arguments,no-value-for-parameter @@ -2068,6 +2083,66 @@ class OpenShiftFacts(object): ) ) + defaults['prometheus'] = dict( + storage=dict( + kind=None, + volume=dict( + name='prometheus', + size='10Gi' + ), + nfs=dict( + directory='/exports', + options='*(rw,root_squash)' + ), + host=None, + access=dict( + modes=['ReadWriteOnce'] + ), + create_pv=True, + create_pvc=False + ) + ) + + defaults['prometheus']['alertmanager'] = dict( + storage=dict( + kind=None, + volume=dict( + name='prometheus-alertmanager', + size='10Gi' + ), + nfs=dict( + directory='/exports', + options='*(rw,root_squash)' + ), + host=None, + access=dict( + modes=['ReadWriteOnce'] + ), + create_pv=True, + create_pvc=False + ) + ) + + defaults['prometheus']['alertbuffer'] = dict( + storage=dict( + kind=None, + volume=dict( + name='prometheus-alertbuffer', + size='10Gi' + ), + nfs=dict( + directory='/exports', + options='*(rw,root_squash)' + ), + host=None, + access=dict( + modes=['ReadWriteOnce'] + ), + create_pv=True, + create_pvc=False + ) + ) + return defaults def guess_host_provider(self): diff --git a/roles/openshift_hosted_facts/tasks/main.yml b/roles/openshift_hosted_facts/tasks/main.yml index 47dc9171d..8fc70cecb 100644 --- a/roles/openshift_hosted_facts/tasks/main.yml +++ b/roles/openshift_hosted_facts/tasks/main.yml @@ -16,4 +16,4 @@ | oo_openshift_env }}" openshift_env_structures: - 'openshift.hosted.router.*' - with_items: [hosted, logging, loggingops, metrics] + with_items: [hosted, logging, loggingops, metrics, prometheus] diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml index 5aa8aecec..ab3d4e31e 100644 --- a/roles/openshift_prometheus/defaults/main.yaml +++ b/roles/openshift_prometheus/defaults/main.yaml @@ -10,50 +10,30 @@ openshift_prometheus_node_selector: {"region":"infra"} # images openshift_prometheus_image_proxy: "openshift/oauth-proxy:v1.0.0" openshift_prometheus_image_prometheus: "openshift/prometheus:v2.0.0-dev" -openshift_prometheus_image_alertmanager: "openshift/prometheus-alertmanager:dev" +openshift_prometheus_image_alertmanager: "openshift/prometheus-alertmanager:v0.9.1" openshift_prometheus_image_alertbuffer: "openshift/prometheus-alert-buffer:v0.0.1" # additional prometheus rules file openshift_prometheus_additional_rules_file: null -# All the required exports -openshift_prometheus_pv_exports: - - prometheus - - prometheus-alertmanager - - prometheus-alertbuffer -# PV template files and their created object names -openshift_prometheus_pv_data: - - pv_name: prometheus - pv_template: prom-pv-server.yml - pv_label: Prometheus Server PV - - pv_name: prometheus-alertmanager - pv_template: prom-pv-alertmanager.yml - pv_label: Prometheus Alertmanager PV - - pv_name: prometheus-alertbuffer - pv_template: prom-pv-alertbuffer.yml - pv_label: Prometheus Alert Buffer PV - -# Hostname/IP of the NFS server. Currently defaults to first master -openshift_prometheus_nfs_server: "{{ groups.nfs.0 }}" - # storage openshift_prometheus_storage_type: pvc openshift_prometheus_pvc_name: prometheus -openshift_prometheus_pvc_size: 10G +openshift_prometheus_pvc_size: "{{ openshift_prometheus_storage_volume_size | default('10Gi') }}" openshift_prometheus_pvc_access_modes: [ReadWriteOnce] -openshift_prometheus_pvc_pv_selector: {} +openshift_prometheus_pvc_pv_selector: "{{ openshift_prometheus_storage_labels | default('') }}" openshift_prometheus_alertmanager_storage_type: pvc openshift_prometheus_alertmanager_pvc_name: prometheus-alertmanager -openshift_prometheus_alertmanager_pvc_size: 10G +openshift_prometheus_alertmanager_pvc_size: "{{ openshift_prometheus_alertmanager_storage_volume_size | default('10Gi') }}" openshift_prometheus_alertmanager_pvc_access_modes: [ReadWriteOnce] -openshift_prometheus_alertmanager_pvc_pv_selector: {} +openshift_prometheus_alertmanager_pvc_pv_selector: "{{ openshift_prometheus_alertmanager_storage_labels | default('') }}" openshift_prometheus_alertbuffer_storage_type: pvc openshift_prometheus_alertbuffer_pvc_name: prometheus-alertbuffer -openshift_prometheus_alertbuffer_pvc_size: 10G +openshift_prometheus_alertbuffer_pvc_size: "{{ openshift_prometheus_alertbuffer_storage_volume_size | default('10Gi') }}" openshift_prometheus_alertbuffer_pvc_access_modes: [ReadWriteOnce] -openshift_prometheus_alertbuffer_pvc_pv_selector: {} +openshift_prometheus_alertbuffer_pvc_pv_selector: "{{ openshift_prometheus_alertbuffer_storage_labels | default('') }}" # container resources openshift_prometheus_cpu_limit: null diff --git a/roles/openshift_prometheus/files/openshift_prometheus.exports b/roles/openshift_prometheus/files/openshift_prometheus.exports deleted file mode 100644 index 3ccedb1fd..000000000 --- a/roles/openshift_prometheus/files/openshift_prometheus.exports +++ /dev/null @@ -1,3 +0,0 @@ -/exports/prometheus *(rw,no_root_squash,no_wdelay) -/exports/prometheus-alertmanager *(rw,no_root_squash,no_wdelay) -/exports/prometheus-alertbuffer *(rw,no_root_squash,no_wdelay) diff --git a/roles/openshift_prometheus/tasks/create_pvs.yaml b/roles/openshift_prometheus/tasks/create_pvs.yaml deleted file mode 100644 index 4e79da05f..000000000 --- a/roles/openshift_prometheus/tasks/create_pvs.yaml +++ /dev/null @@ -1,36 +0,0 @@ ---- -# Check for existance and then conditionally: -# - evaluate templates -# - PVs -# -# These tasks idempotently create required Prometheus PV objects. Do not -# call this file directly. This file is intended to be ran as an -# include that has a 'with_items' attached to it. Hence the use below -# of variables like "{{ item.pv_label }}" - -- name: "Check if the {{ item.pv_label }} template has been created already" - oc_obj: - namespace: "{{ openshift_prometheus_namespace }}" - state: list - kind: pv - name: "{{ item.pv_name }}" - register: prom_pv_check - -# Skip all of this if the PV already exists -- block: - - name: "Ensure the {{ item.pv_label }} template is evaluated" - template: - src: "{{ item.pv_template }}.j2" - dest: "{{ tempdir }}/templates/{{ item.pv_template }}" - - - name: "Ensure {{ item.pv_label }} is created" - oc_obj: - namespace: "{{ openshift_prometheus_namespace }}" - kind: pv - name: "{{ item.pv_name }}" - state: present - delete_after: True - files: - - "{{ tempdir }}/templates/{{ item.pv_template }}" - when: - - not prom_pv_check.results.results.0 diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml index a9bce2fb1..cb75eedca 100644 --- a/roles/openshift_prometheus/tasks/install_prometheus.yaml +++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml @@ -54,15 +54,6 @@ resource_name: cluster-reader user: "system:serviceaccount:{{ openshift_prometheus_namespace }}:prometheus" - -###################################################################### -# NFS -# In the case that we are not running on a cloud provider, volumes must be statically provisioned - -- include: nfs.yaml - when: not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')) - - # create prometheus and alerts services # TODO join into 1 task with loop - name: Create prometheus service diff --git a/roles/openshift_prometheus/tasks/nfs.yaml b/roles/openshift_prometheus/tasks/nfs.yaml deleted file mode 100644 index 0b45f2cee..000000000 --- a/roles/openshift_prometheus/tasks/nfs.yaml +++ /dev/null @@ -1,44 +0,0 @@ ---- -# Tasks to statically provision NFS volumes -# Include if not using dynamic volume provisioning -- name: Ensure the /exports/ directory exists - file: - path: /exports/ - state: directory - mode: 0755 - owner: root - group: root - -- name: Ensure the prom-pv0X export directories exist - file: - path: "/exports/{{ item }}" - state: directory - mode: 0777 - owner: nfsnobody - group: nfsnobody - with_items: "{{ openshift_prometheus_pv_exports }}" - -- name: Ensure the NFS exports for Prometheus PVs exist - copy: - src: openshift_prometheus.exports - dest: /etc/exports.d/openshift_prometheus.exports - register: nfs_exports_updated - -- name: Ensure the NFS export table is refreshed if exports were added - command: exportfs -ar - when: - - nfs_exports_updated.changed - - -###################################################################### -# Create the required Prometheus PVs. Check out these online docs if you -# need a refresher on includes looping with items: -# * http://docs.ansible.com/ansible/playbooks_loops.html#loops-and-includes-in-2-0 -# * http://stackoverflow.com/a/35128533 -# -# TODO: Handle the case where a PV template is updated in -# openshift-ansible and the change needs to be landed on the managed -# cluster. - -- include: create_pvs.yaml - with_items: "{{ openshift_prometheus_pv_data }}" diff --git a/roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j2 b/roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j2 deleted file mode 100644 index 55a5e19c3..000000000 --- a/roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j2 +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: prometheus-alertbuffer - labels: - storage: prometheus-alertbuffer -spec: - capacity: - storage: 15Gi - accessModes: - - ReadWriteOnce - nfs: - path: /exports/prometheus-alertbuffer - server: {{ openshift_prometheus_nfs_server }} - persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j2 b/roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j2 deleted file mode 100644 index 4ee518735..000000000 --- a/roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j2 +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: prometheus-alertmanager - labels: - storage: prometheus-alertmanager -spec: - capacity: - storage: 15Gi - accessModes: - - ReadWriteOnce - nfs: - path: /exports/prometheus-alertmanager - server: {{ openshift_prometheus_nfs_server }} - persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_prometheus/templates/prom-pv-server.yml.j2 b/roles/openshift_prometheus/templates/prom-pv-server.yml.j2 deleted file mode 100644 index 933bf0f60..000000000 --- a/roles/openshift_prometheus/templates/prom-pv-server.yml.j2 +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: prometheus - labels: - storage: prometheus -spec: - capacity: - storage: 15Gi - accessModes: - - ReadWriteOnce - nfs: - path: /exports/prometheus - server: {{ openshift_prometheus_nfs_server }} - persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml index 3047fbaf9..c4e023c1e 100644 --- a/roles/openshift_storage_nfs/tasks/main.yml +++ b/roles/openshift_storage_nfs/tasks/main.yml @@ -35,6 +35,9 @@ - "{{ openshift.logging }}" - "{{ openshift.loggingops }}" - "{{ openshift.hosted.etcd }}" + - "{{ openshift.prometheus }}" + - "{{ openshift.prometheus.alertmanager }}" + - "{{ openshift.prometheus.alertbuffer }}" - name: Configure exports template: diff --git a/roles/openshift_storage_nfs/templates/exports.j2 b/roles/openshift_storage_nfs/templates/exports.j2 index 0141e0d25..c2a741035 100644 --- a/roles/openshift_storage_nfs/templates/exports.j2 +++ b/roles/openshift_storage_nfs/templates/exports.j2 @@ -3,3 +3,6 @@ {{ openshift.logging.storage.nfs.directory }}/{{ openshift.logging.storage.volume.name }} {{ openshift.logging.storage.nfs.options }} {{ openshift.loggingops.storage.nfs.directory }}/{{ openshift.loggingops.storage.volume.name }} {{ openshift.loggingops.storage.nfs.options }} {{ openshift.hosted.etcd.storage.nfs.directory }}/{{ openshift.hosted.etcd.storage.volume.name }} {{ openshift.hosted.etcd.storage.nfs.options }} +{{ openshift.prometheus.storage.nfs.directory }}/{{ openshift.prometheus.storage.volume.name }} {{ openshift.prometheus.storage.nfs.options }} +{{ openshift.prometheus.alertmanager.storage.nfs.directory }}/{{ openshift.prometheus.alertmanager.storage.volume.name }} {{ openshift.prometheus.alertmanager.storage.nfs.options }} +{{ openshift.prometheus.alertbuffer.storage.nfs.directory }}/{{ openshift.prometheus.alertbuffer.storage.volume.name }} {{ openshift.prometheus.alertbuffer.storage.nfs.options }} |