diff options
21 files changed, 700 insertions, 300 deletions
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index 902436302..f0f250480 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -716,6 +716,100 @@ def oo_openshift_env(hostvars):  # pylint: disable=too-many-branches, too-many-nested-blocks, too-many-statements +def oo_component_persistent_volumes(hostvars, groups, component): +    """ Generate list of persistent volumes based on oo_openshift_env +        storage options set in host variables for a specific component. +    """ +    if not issubclass(type(hostvars), dict): +        raise errors.AnsibleFilterError("|failed expects hostvars is a dict") +    if not issubclass(type(groups), dict): +        raise errors.AnsibleFilterError("|failed expects groups is a dict") + +    persistent_volume = None + +    if component in hostvars['openshift']: +        if 'storage' in hostvars['openshift'][component]: +            params = hostvars['openshift'][component]['storage'] +            kind = params['kind'] +            create_pv = params['create_pv'] +            if kind is not None and create_pv: +                if kind == 'nfs': +                    host = params['host'] +                    if host is None: +                        if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0: +                            host = groups['oo_nfs_to_config'][0] +                        else: +                            raise errors.AnsibleFilterError("|failed no storage host detected") +                    directory = params['nfs']['directory'] +                    volume = params['volume']['name'] +                    path = directory + '/' + volume +                    size = params['volume']['size'] +                    if 'labels' in params: +                        labels = params['labels'] +                    else: +                        labels = dict() +                    access_modes = params['access']['modes'] +                    persistent_volume = dict( +                        name="{0}-volume".format(volume), +                        capacity=size, +                        labels=labels, +                        access_modes=access_modes, +                        storage=dict( +                            nfs=dict( +                                server=host, +                                path=path))) + +                elif kind == 'openstack': +                    volume = params['volume']['name'] +                    size = params['volume']['size'] +                    if 'labels' in params: +                        labels = params['labels'] +                    else: +                        labels = dict() +                    access_modes = params['access']['modes'] +                    filesystem = params['openstack']['filesystem'] +                    volume_id = params['openstack']['volumeID'] +                    persistent_volume = dict( +                        name="{0}-volume".format(volume), +                        capacity=size, +                        labels=labels, +                        access_modes=access_modes, +                        storage=dict( +                            cinder=dict( +                                fsType=filesystem, +                                volumeID=volume_id))) + +                elif kind == 'glusterfs': +                    volume = params['volume']['name'] +                    size = params['volume']['size'] +                    if 'labels' in params: +                        labels = params['labels'] +                    else: +                        labels = dict() +                    access_modes = params['access']['modes'] +                    endpoints = params['glusterfs']['endpoints'] +                    path = params['glusterfs']['path'] +                    read_only = params['glusterfs']['readOnly'] +                    persistent_volume = dict( +                        name="{0}-volume".format(volume), +                        capacity=size, +                        labels=labels, +                        access_modes=access_modes, +                        storage=dict( +                            glusterfs=dict( +                                endpoints=endpoints, +                                path=path, +                                readOnly=read_only))) + +                elif not (kind == 'object' or kind == 'dynamic'): +                    msg = "|failed invalid storage kind '{0}' for component '{1}'".format( +                        kind, +                        component) +                    raise errors.AnsibleFilterError(msg) +    return persistent_volume + + +# pylint: disable=too-many-branches, too-many-nested-blocks, too-many-statements  def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):      """ Generate list of persistent volumes based on oo_openshift_env          storage options set in host variables. @@ -734,84 +828,122 @@ def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):              if 'storage' in hostvars['openshift']['hosted'][component]:                  params = hostvars['openshift']['hosted'][component]['storage']                  kind = params['kind'] -                create_pv = params['create_pv'] -                if kind is not None and create_pv: -                    if kind == 'nfs': -                        host = params['host'] -                        if host is None: -                            if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0: -                                host = groups['oo_nfs_to_config'][0] +                if 'create_pv' in params: +                    create_pv = params['create_pv'] +                    if kind is not None and create_pv: +                        if kind == 'nfs': +                            host = params['host'] +                            if host is None: +                                if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0: +                                    host = groups['oo_nfs_to_config'][0] +                                else: +                                    raise errors.AnsibleFilterError("|failed no storage host detected") +                            directory = params['nfs']['directory'] +                            volume = params['volume']['name'] +                            path = directory + '/' + volume +                            size = params['volume']['size'] +                            if 'labels' in params: +                                labels = params['labels']                              else: -                                raise errors.AnsibleFilterError("|failed no storage host detected") -                        directory = params['nfs']['directory'] -                        volume = params['volume']['name'] -                        path = directory + '/' + volume -                        size = params['volume']['size'] -                        if 'labels' in params: -                            labels = params['labels'] -                        else: -                            labels = dict() -                        access_modes = params['access']['modes'] -                        persistent_volume = dict( -                            name="{0}-volume".format(volume), -                            capacity=size, -                            labels=labels, -                            access_modes=access_modes, -                            storage=dict( -                                nfs=dict( -                                    server=host, -                                    path=path))) -                        persistent_volumes.append(persistent_volume) -                    elif kind == 'openstack': -                        volume = params['volume']['name'] -                        size = params['volume']['size'] -                        if 'labels' in params: -                            labels = params['labels'] -                        else: -                            labels = dict() -                        access_modes = params['access']['modes'] -                        filesystem = params['openstack']['filesystem'] -                        volume_id = params['openstack']['volumeID'] -                        persistent_volume = dict( -                            name="{0}-volume".format(volume), -                            capacity=size, -                            labels=labels, -                            access_modes=access_modes, -                            storage=dict( -                                cinder=dict( -                                    fsType=filesystem, -                                    volumeID=volume_id))) -                        persistent_volumes.append(persistent_volume) -                    elif kind == 'glusterfs': -                        volume = params['volume']['name'] -                        size = params['volume']['size'] -                        if 'labels' in params: -                            labels = params['labels'] -                        else: -                            labels = dict() -                        access_modes = params['access']['modes'] -                        endpoints = params['glusterfs']['endpoints'] -                        path = params['glusterfs']['path'] -                        read_only = params['glusterfs']['readOnly'] -                        persistent_volume = dict( -                            name="{0}-volume".format(volume), -                            capacity=size, -                            labels=labels, -                            access_modes=access_modes, -                            storage=dict( -                                glusterfs=dict( -                                    endpoints=endpoints, -                                    path=path, -                                    readOnly=read_only))) -                        persistent_volumes.append(persistent_volume) -                    elif not (kind == 'object' or kind == 'dynamic'): -                        msg = "|failed invalid storage kind '{0}' for component '{1}'".format( -                            kind, -                            component) -                        raise errors.AnsibleFilterError(msg) +                                labels = dict() +                            access_modes = params['access']['modes'] +                            persistent_volume = dict( +                                name="{0}-volume".format(volume), +                                capacity=size, +                                labels=labels, +                                access_modes=access_modes, +                                storage=dict( +                                    nfs=dict( +                                        server=host, +                                        path=path))) +                            persistent_volumes.append(persistent_volume) +                        elif kind == 'openstack': +                            volume = params['volume']['name'] +                            size = params['volume']['size'] +                            if 'labels' in params: +                                labels = params['labels'] +                            else: +                                labels = dict() +                            access_modes = params['access']['modes'] +                            filesystem = params['openstack']['filesystem'] +                            volume_id = params['openstack']['volumeID'] +                            persistent_volume = dict( +                                name="{0}-volume".format(volume), +                                capacity=size, +                                labels=labels, +                                access_modes=access_modes, +                                storage=dict( +                                    cinder=dict( +                                        fsType=filesystem, +                                        volumeID=volume_id))) +                            persistent_volumes.append(persistent_volume) +                        elif kind == 'glusterfs': +                            volume = params['volume']['name'] +                            size = params['volume']['size'] +                            if 'labels' in params: +                                labels = params['labels'] +                            else: +                                labels = dict() +                            access_modes = params['access']['modes'] +                            endpoints = params['glusterfs']['endpoints'] +                            path = params['glusterfs']['path'] +                            read_only = params['glusterfs']['readOnly'] +                            persistent_volume = dict( +                                name="{0}-volume".format(volume), +                                capacity=size, +                                labels=labels, +                                access_modes=access_modes, +                                storage=dict( +                                    glusterfs=dict( +                                        endpoints=endpoints, +                                        path=path, +                                        readOnly=read_only))) +                            persistent_volumes.append(persistent_volume) +                        elif not (kind == 'object' or kind == 'dynamic'): +                            msg = "|failed invalid storage kind '{0}' for component '{1}'".format( +                                kind, +                                component) +                            raise errors.AnsibleFilterError(msg) +    if 'logging' in hostvars['openshift']: +        persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'logging') +        if persistent_volume is not None: +            persistent_volumes.append(persistent_volume) +    if 'loggingops' in hostvars['openshift']: +        persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'loggingops') +        if persistent_volume is not None: +            persistent_volumes.append(persistent_volume) +    if 'metrics' in hostvars['openshift']: +        persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'metrics') +        if persistent_volume is not None: +            persistent_volumes.append(persistent_volume)      return persistent_volumes +def oo_component_pv_claims(hostvars, component): +    """ Generate list of persistent volume claims based on oo_openshift_env +        storage options set in host variables for a speicific component. +    """ +    if not issubclass(type(hostvars), dict): +        raise errors.AnsibleFilterError("|failed expects hostvars is a dict") + +    if component in hostvars['openshift']: +        if 'storage' in hostvars['openshift'][component]: +            params = hostvars['openshift'][component]['storage'] +            kind = params['kind'] +            create_pv = params['create_pv'] +            create_pvc = params['create_pvc'] +            if kind not in [None, 'object'] and create_pv and create_pvc: +                volume = params['volume']['name'] +                size = params['volume']['size'] +                access_modes = params['access']['modes'] +                persistent_volume_claim = dict( +                    name="{0}-claim".format(volume), +                    capacity=size, +                    access_modes=access_modes) +                return persistent_volume_claim +    return None + +  def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None):      """ Generate list of persistent volume claims based on oo_openshift_env          storage options set in host variables. @@ -828,17 +960,31 @@ def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None):              if 'storage' in hostvars['openshift']['hosted'][component]:                  params = hostvars['openshift']['hosted'][component]['storage']                  kind = params['kind'] -                create_pv = params['create_pv'] -                create_pvc = params['create_pvc'] -                if kind not in [None, 'object'] and create_pv and create_pvc: -                    volume = params['volume']['name'] -                    size = params['volume']['size'] -                    access_modes = params['access']['modes'] -                    persistent_volume_claim = dict( -                        name="{0}-claim".format(volume), -                        capacity=size, -                        access_modes=access_modes) -                    persistent_volume_claims.append(persistent_volume_claim) +                if 'create_pv' in params: +                    if 'create_pvc' in params: +                        create_pv = params['create_pv'] +                        create_pvc = params['create_pvc'] +                        if kind not in [None, 'object'] and create_pv and create_pvc: +                            volume = params['volume']['name'] +                            size = params['volume']['size'] +                            access_modes = params['access']['modes'] +                            persistent_volume_claim = dict( +                                name="{0}-claim".format(volume), +                                capacity=size, +                                access_modes=access_modes) +                            persistent_volume_claims.append(persistent_volume_claim) +    if 'logging' in hostvars['openshift']: +        persistent_volume_claim = oo_component_pv_claims(hostvars, 'logging') +        if persistent_volume_claim is not None: +            persistent_volume_claims.append(persistent_volume_claim) +    if 'loggingops' in hostvars['openshift']: +        persistent_volume_claim = oo_component_pv_claims(hostvars, 'loggingops') +        if persistent_volume_claim is not None: +            persistent_volume_claims.append(persistent_volume_claim) +    if 'metrics' in hostvars['openshift']: +        persistent_volume_claim = oo_component_pv_claims(hostvars, 'metrics') +        if persistent_volume_claim is not None: +            persistent_volume_claims.append(persistent_volume_claim)      return persistent_volume_claims diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example index 486fe56a0..38bc66ad1 100644 --- a/inventory/byo/hosts.origin.example +++ b/inventory/byo/hosts.origin.example @@ -491,10 +491,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',  # See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html  #  # By default metrics are not automatically deployed, set this to enable them -# openshift_hosted_metrics_deploy=true +#openshift_metrics_install_metrics=true  #  # Storage Options -# If openshift_hosted_metrics_storage_kind is unset then metrics will be stored +# If openshift_metrics_storage_kind is unset then metrics will be stored  # in an EmptyDir volume and will be deleted when the cassandra pod terminates.  # Storage options A & B currently support only one cassandra pod which is  # generally enough for up to 1000 pods. Additional volumes can be created @@ -504,29 +504,29 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',  # An NFS volume will be created with path "nfs_directory/volume_name"  # on the host within the [nfs] host group.  For example, the volume  # path using these options would be "/exports/metrics" -#openshift_hosted_metrics_storage_kind=nfs -#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_metrics_storage_nfs_directory=/exports -#openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)' -#openshift_hosted_metrics_storage_volume_name=metrics -#openshift_hosted_metrics_storage_volume_size=10Gi -#openshift_hosted_metrics_storage_labels={'storage': 'metrics'} +#openshift_metrics_storage_kind=nfs +#openshift_metrics_storage_access_modes=['ReadWriteOnce'] +#openshift_metrics_storage_nfs_directory=/exports +#openshift_metrics_storage_nfs_options='*(rw,root_squash)' +#openshift_metrics_storage_volume_name=metrics +#openshift_metrics_storage_volume_size=10Gi +#openshift_metrics_storage_labels={'storage': 'metrics'}  #  # Option B - External NFS Host  # NFS volume must already exist with path "nfs_directory/_volume_name" on  # the storage_host. For example, the remote volume path using these  # options would be "nfs.example.com:/exports/metrics" -#openshift_hosted_metrics_storage_kind=nfs -#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_metrics_storage_host=nfs.example.com -#openshift_hosted_metrics_storage_nfs_directory=/exports -#openshift_hosted_metrics_storage_volume_name=metrics -#openshift_hosted_metrics_storage_volume_size=10Gi -#openshift_hosted_metrics_storage_labels={'storage': 'metrics'} +#openshift_metrics_storage_kind=nfs +#openshift_metrics_storage_access_modes=['ReadWriteOnce'] +#openshift_metrics_storage_host=nfs.example.com +#openshift_metrics_storage_nfs_directory=/exports +#openshift_metrics_storage_volume_name=metrics +#openshift_metrics_storage_volume_size=10Gi +#openshift_metrics_storage_labels={'storage': 'metrics'}  #  # Option C - Dynamic -- If openshift supports dynamic volume provisioning for  # your cloud platform use this. -#openshift_hosted_metrics_storage_kind=dynamic +#openshift_metrics_storage_kind=dynamic  #  # Other Metrics Options -- Common items you may wish to reconfigure, for the complete  # list of options please see roles/openshift_metrics/README.md @@ -535,10 +535,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',  # Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics  # Currently, you may only alter the hostname portion of the url, alterting the  # `/hawkular/metrics` path will break installation of metrics. -#openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics +#openshift_metrics_hawkular_hostname=https://hawkular-metrics.example.com/hawkular/metrics  # Configure the prefix and version for the component images -#openshift_hosted_metrics_deployer_prefix=docker.io/openshift/origin- -#openshift_hosted_metrics_deployer_version=v3.7.0 +#openshift_metrics_image_prefix=docker.io/openshift/origin- +#openshift_metrics_image_version=v3.7.0  #  # StorageClass  # openshift_storageclass_name=gp2 @@ -548,36 +548,36 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',  # Logging deployment  #  # Currently logging deployment is disabled by default, enable it by setting this -#openshift_hosted_logging_deploy=true +#openshift_logging_install_logging=true  #  # Logging storage config  # Option A - NFS Host Group  # An NFS volume will be created with path "nfs_directory/volume_name"  # on the host within the [nfs] host group.  For example, the volume  # path using these options would be "/exports/logging" -#openshift_hosted_logging_storage_kind=nfs -#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_logging_storage_nfs_directory=/exports -#openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)' -#openshift_hosted_logging_storage_volume_name=logging -#openshift_hosted_logging_storage_volume_size=10Gi -#openshift_hosted_logging_storage_labels={'storage': 'logging'} +#openshift_logging_storage_kind=nfs +#openshift_logging_storage_access_modes=['ReadWriteOnce'] +#openshift_logging_storage_nfs_directory=/exports +#openshift_logging_storage_nfs_options='*(rw,root_squash)' +#openshift_logging_storage_volume_name=logging +#openshift_logging_storage_volume_size=10Gi +#openshift_logging_storage_labels={'storage': 'logging'}  #  # Option B - External NFS Host  # NFS volume must already exist with path "nfs_directory/_volume_name" on  # the storage_host. For example, the remote volume path using these  # options would be "nfs.example.com:/exports/logging" -#openshift_hosted_logging_storage_kind=nfs -#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_logging_storage_host=nfs.example.com -#openshift_hosted_logging_storage_nfs_directory=/exports -#openshift_hosted_logging_storage_volume_name=logging -#openshift_hosted_logging_storage_volume_size=10Gi -#openshift_hosted_logging_storage_labels={'storage': 'logging'} +#openshift_logging_storage_kind=nfs +#openshift_logging_storage_access_modes=['ReadWriteOnce'] +#openshift_logging_storage_host=nfs.example.com +#openshift_logging_storage_nfs_directory=/exports +#openshift_logging_storage_volume_name=logging +#openshift_logging_storage_volume_size=10Gi +#openshift_logging_storage_labels={'storage': 'logging'}  #  # Option C - Dynamic -- If openshift supports dynamic volume provisioning for  # your cloud platform use this. -#openshift_hosted_logging_storage_kind=dynamic +#openshift_logging_storage_kind=dynamic  #  # Option D - none -- Logging will use emptydir volumes which are destroyed when  # pods are deleted @@ -587,13 +587,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',  #  # Configure loggingPublicURL in the master config for aggregate logging, defaults  # to kibana.{{ openshift_master_default_subdomain }} -#openshift_hosted_logging_hostname=logging.apps.example.com +#openshift_logging_kibana_hostname=logging.apps.example.com  # Configure the number of elastic search nodes, unless you're using dynamic provisioning  # this value must be 1 -#openshift_hosted_logging_elasticsearch_cluster_size=1 +#openshift_logging_es_cluster_size=1  # Configure the prefix and version for the component images -#openshift_hosted_logging_deployer_prefix=docker.io/openshift/origin- -#openshift_hosted_logging_deployer_version=v3.7.0 +#openshift_logging_image_prefix=docker.io/openshift/origin- +#openshift_logging_image_version=v3.7.0  # Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')  # os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example index 92a0927e5..8a742f8be 100644 --- a/inventory/byo/hosts.ose.example +++ b/inventory/byo/hosts.ose.example @@ -499,10 +499,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',  # See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html  #  # By default metrics are not automatically deployed, set this to enable them -# openshift_hosted_metrics_deploy=true +#openshift_metrics_install_metrics=true  #  # Storage Options -# If openshift_hosted_metrics_storage_kind is unset then metrics will be stored +# If openshift_metrics_storage_kind is unset then metrics will be stored  # in an EmptyDir volume and will be deleted when the cassandra pod terminates.  # Storage options A & B currently support only one cassandra pod which is  # generally enough for up to 1000 pods. Additional volumes can be created @@ -512,29 +512,29 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',  # An NFS volume will be created with path "nfs_directory/volume_name"  # on the host within the [nfs] host group.  For example, the volume  # path using these options would be "/exports/metrics" -#openshift_hosted_metrics_storage_kind=nfs -#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_metrics_storage_nfs_directory=/exports -#openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)' -#openshift_hosted_metrics_storage_volume_name=metrics -#openshift_hosted_metrics_storage_volume_size=10Gi -#openshift_hosted_metrics_storage_labels={'storage': 'metrics'} +#openshift_metrics_storage_kind=nfs +#openshift_metrics_storage_access_modes=['ReadWriteOnce'] +#openshift_metrics_storage_nfs_directory=/exports +#openshift_metrics_storage_nfs_options='*(rw,root_squash)' +#openshift_metrics_storage_volume_name=metrics +#openshift_metrics_storage_volume_size=10Gi +#openshift_metrics_storage_labels={'storage': 'metrics'}  #  # Option B - External NFS Host  # NFS volume must already exist with path "nfs_directory/_volume_name" on  # the storage_host. For example, the remote volume path using these  # options would be "nfs.example.com:/exports/metrics" -#openshift_hosted_metrics_storage_kind=nfs -#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_metrics_storage_host=nfs.example.com -#openshift_hosted_metrics_storage_nfs_directory=/exports -#openshift_hosted_metrics_storage_volume_name=metrics -#openshift_hosted_metrics_storage_volume_size=10Gi -#openshift_hosted_metrics_storage_labels={'storage': 'metrics'} +#openshift_metrics_storage_kind=nfs +#openshift_metrics_storage_access_modes=['ReadWriteOnce'] +#openshift_metrics_storage_host=nfs.example.com +#openshift_metrics_storage_nfs_directory=/exports +#openshift_metrics_storage_volume_name=metrics +#openshift_metrics_storage_volume_size=10Gi +#openshift_metrics_storage_labels={'storage': 'metrics'}  #  # Option C - Dynamic -- If openshift supports dynamic volume provisioning for  # your cloud platform use this. -#openshift_hosted_metrics_storage_kind=dynamic +#openshift_metrics_storage_kind=dynamic  #  # Other Metrics Options -- Common items you may wish to reconfigure, for the complete  # list of options please see roles/openshift_metrics/README.md @@ -543,10 +543,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',  # Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics  # Currently, you may only alter the hostname portion of the url, alterting the  # `/hawkular/metrics` path will break installation of metrics. -#openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics +#openshift_metrics_hawkular_hostname=https://hawkular-metrics.example.com/hawkular/metrics  # Configure the prefix and version for the component images -#openshift_hosted_metrics_deployer_prefix=registry.example.com:8888/openshift3/ -#openshift_hosted_metrics_deployer_version=3.7.0 +#openshift_metrics_image_prefix=registry.example.com:8888/openshift3/ +#openshift_metrics_image_version=3.7.0  #  # StorageClass  # openshift_storageclass_name=gp2 @@ -556,36 +556,36 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',  # Logging deployment  #  # Currently logging deployment is disabled by default, enable it by setting this -#openshift_hosted_logging_deploy=true +#openshift_logging_install_logging=true  #  # Logging storage config  # Option A - NFS Host Group  # An NFS volume will be created with path "nfs_directory/volume_name"  # on the host within the [nfs] host group.  For example, the volume  # path using these options would be "/exports/logging" -#openshift_hosted_logging_storage_kind=nfs -#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_logging_storage_nfs_directory=/exports -#openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)' -#openshift_hosted_logging_storage_volume_name=logging -#openshift_hosted_logging_storage_volume_size=10Gi -#openshift_hosted_logging_storage_labels={'storage': 'logging'} +#openshift_logging_storage_kind=nfs +#openshift_logging_storage_access_modes=['ReadWriteOnce'] +#openshift_logging_storage_nfs_directory=/exports +#openshift_logging_storage_nfs_options='*(rw,root_squash)' +#openshift_logging_storage_volume_name=logging +#openshift_logging_storage_volume_size=10Gi +#openshift_logging_storage_labels={'storage': 'logging'}  #  # Option B - External NFS Host  # NFS volume must already exist with path "nfs_directory/_volume_name" on  # the storage_host. For example, the remote volume path using these  # options would be "nfs.example.com:/exports/logging" -#openshift_hosted_logging_storage_kind=nfs -#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_logging_storage_host=nfs.example.com -#openshift_hosted_logging_storage_nfs_directory=/exports -#openshift_hosted_logging_storage_volume_name=logging -#openshift_hosted_logging_storage_volume_size=10Gi -#openshift_hosted_logging_storage_labels={'storage': 'logging'} +#openshift_logging_storage_kind=nfs +#openshift_logging_storage_access_modes=['ReadWriteOnce'] +#openshift_logging_storage_host=nfs.example.com +#openshift_logging_storage_nfs_directory=/exports +#openshift_logging_storage_volume_name=logging +#openshift_logging_storage_volume_size=10Gi +#openshift_logging_storage_labels={'storage': 'logging'}  #  # Option C - Dynamic -- If openshift supports dynamic volume provisioning for  # your cloud platform use this. -#openshift_hosted_logging_storage_kind=dynamic +#openshift_logging_storage_kind=dynamic  #  # Option D - none -- Logging will use emptydir volumes which are destroyed when  # pods are deleted @@ -595,13 +595,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',  #  # Configure loggingPublicURL in the master config for aggregate logging, defaults  # to kibana.{{ openshift_master_default_subdomain }} -#openshift_hosted_logging_hostname=logging.apps.example.com +#openshift_logging_kibana_hostname=logging.apps.example.com  # Configure the number of elastic search nodes, unless you're using dynamic provisioning  # this value must be 1 -#openshift_hosted_logging_elasticsearch_cluster_size=1 +#openshift_logging_es_cluster_size=1  # Configure the prefix and version for the component images -#openshift_hosted_logging_deployer_prefix=registry.example.com:8888/openshift3/ -#openshift_hosted_logging_deployer_version=3.7.0 +#openshift_logging_image_prefix=registry.example.com:8888/openshift3/ +#openshift_logging_image_version=3.7.0  # Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')  # os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index bbd5a0185..c97b3be4b 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -62,3 +62,11 @@    - openshift_enable_service_catalog | default(false) | bool    tags:    - servicecatalog + +- name: Print deprecated variable warning message if necessary +  hosts: oo_first_master +  gather_facts: no +  tasks: +  - debug: msg="{{__deprecation_message}}" +    when: +    - __deprecation_message | default ('') | length > 0 diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml index 75339f6df..0e970f376 100644 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -19,31 +19,15 @@        openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"        openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"      when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master" -  - set_fact: -      logging_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" -      logging_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default('kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" -      logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default(openshift.master.public_api_url) }}" -      logging_elasticsearch_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}" -      logging_elasticsearch_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}" +    roles:    - role: openshift_default_storage_class      when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')    - role: openshift_hosted    - role: openshift_metrics -    when: openshift_hosted_metrics_deploy | default(false) | bool +    when: openshift_metrics_install_metrics | default(false) | bool    - role: openshift_logging -    when: openshift_hosted_logging_deploy | default(false) | bool -    openshift_hosted_logging_hostname: "{{ logging_hostname }}" -    openshift_hosted_logging_ops_hostname: "{{ logging_ops_hostname }}" -    openshift_hosted_logging_master_public_url: "{{ logging_master_public_url }}" -    openshift_hosted_logging_elasticsearch_cluster_size: "{{ logging_elasticsearch_cluster_size }}" -    openshift_hosted_logging_elasticsearch_pvc_dynamic: "{{ 'true' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}" -    openshift_hosted_logging_elasticsearch_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) in ['dynamic','nfs'] else ''  }}" -    openshift_hosted_logging_elasticsearch_pvc_prefix: "{{ 'logging-es' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}" -    openshift_hosted_logging_elasticsearch_ops_cluster_size: "{{ logging_elasticsearch_ops_cluster_size }}" -    openshift_hosted_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift_hosted_loggingops_storage_kind | default(none) == 'dynamic' else '' }}" -    openshift_hosted_logging_elasticsearch_ops_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) in ['dynamic','nfs' ] else ''  }}" -    openshift_hosted_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es-ops' if openshift_hosted_loggingops_storage_kind | default(none) =='dynamic' else '' }}" +    when: openshift_logging_install_logging | default(false) | bool    - role: cockpit-ui      when: ( openshift.common.version_gte_3_3_or_1_3  | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool) @@ -57,8 +41,6 @@    - hosted    pre_tasks:    - set_fact: -      openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" -  - set_fact:        openshift_metrics_hawkular_hostname: "{{ g_metrics_hostname | default('hawkular-metrics.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"    tasks: @@ -66,10 +48,10 @@      - include_role:          name: openshift_logging          tasks_from: update_master_config -    when: openshift_hosted_logging_deploy | default(false) | bool +    when: openshift_logging_install_logging | default(false) | bool    - block:      - include_role:          name: openshift_metrics          tasks_from: update_master_config -    when: openshift_hosted_metrics_deploy | default(false) | bool +    when: openshift_metrics_install_metrics | default(false) | bool diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index a76751e81..b6e7507ff 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -493,10 +493,10 @@ def set_selectors(facts):          facts['hosted']['metrics'] = {}      if 'selector' not in facts['hosted']['metrics'] or facts['hosted']['metrics']['selector'] in [None, 'None']:          facts['hosted']['metrics']['selector'] = None -    if 'logging' not in facts['hosted']: -        facts['hosted']['logging'] = {} -    if 'selector' not in facts['hosted']['logging'] or facts['hosted']['logging']['selector'] in [None, 'None']: -        facts['hosted']['logging']['selector'] = None +    if 'logging' not in facts: +        facts['logging'] = {} +    if 'selector' not in facts['logging'] or facts['logging']['selector'] in [None, 'None']: +        facts['logging']['selector'] = None      if 'etcd' not in facts['hosted']:          facts['hosted']['etcd'] = {}      if 'selector' not in facts['hosted']['etcd'] or facts['hosted']['etcd']['selector'] in [None, 'None']: @@ -1785,7 +1785,10 @@ class OpenShiftFacts(object):                     'etcd',                     'hosted',                     'master', -                   'node'] +                   'node', +                   'logging', +                   'loggingops', +                   'metrics']      # Disabling too-many-arguments, this should be cleaned up as a TODO item.      # pylint: disable=too-many-arguments,no-value-for-parameter @@ -1966,66 +1969,6 @@ class OpenShiftFacts(object):          if 'hosted' in roles or self.role == 'hosted':              defaults['hosted'] = dict( -                metrics=dict( -                    deploy=False, -                    duration=7, -                    resolution='10s', -                    storage=dict( -                        kind=None, -                        volume=dict( -                            name='metrics', -                            size='10Gi' -                        ), -                        nfs=dict( -                            directory='/exports', -                            options='*(rw,root_squash)' -                        ), -                        host=None, -                        access=dict( -                            modes=['ReadWriteOnce'] -                        ), -                        create_pv=True, -                        create_pvc=False -                    ) -                ), -                loggingops=dict( -                    storage=dict( -                        kind=None, -                        volume=dict( -                            name='logging-es-ops', -                            size='10Gi' -                        ), -                        nfs=dict( -                            directory='/exports', -                            options='*(rw,root_squash)' -                        ), -                        host=None, -                        access=dict( -                            modes=['ReadWriteOnce'] -                        ), -                        create_pv=True, -                        create_pvc=False -                    ) -                ), -                logging=dict( -                    storage=dict( -                        kind=None, -                        volume=dict( -                            name='logging-es', -                            size='10Gi' -                        ), -                        nfs=dict( -                            directory='/exports', -                            options='*(rw,root_squash)' -                        ), -                        host=None, -                        access=dict( -                            modes=['ReadWriteOnce'] -                        ), -                        create_pv=True, -                        create_pvc=False -                    ) -                ),                  etcd=dict(                      storage=dict(                          kind=None, @@ -2072,6 +2015,69 @@ class OpenShiftFacts(object):                  router=dict()              ) +            defaults['logging'] = dict( +                storage=dict( +                    kind=None, +                    volume=dict( +                        name='logging-es', +                        size='10Gi' +                    ), +                    nfs=dict( +                        directory='/exports', +                        options='*(rw,root_squash)' +                    ), +                    host=None, +                    access=dict( +                        modes=['ReadWriteOnce'] +                    ), +                    create_pv=True, +                    create_pvc=False +                ) +            ) + +            defaults['loggingops'] = dict( +                storage=dict( +                    kind=None, +                    volume=dict( +                        name='logging-es-ops', +                        size='10Gi' +                    ), +                    nfs=dict( +                        directory='/exports', +                        options='*(rw,root_squash)' +                    ), +                    host=None, +                    access=dict( +                        modes=['ReadWriteOnce'] +                    ), +                    create_pv=True, +                    create_pvc=False +                ) +            ) + +            defaults['metrics'] = dict( +                deploy=False, +                duration=7, +                resolution='10s', +                storage=dict( +                    kind=None, +                    volume=dict( +                        name='metrics', +                        size='10Gi' +                    ), +                    nfs=dict( +                        directory='/exports', +                        options='*(rw,root_squash)' +                    ), +                    host=None, +                    access=dict( +                        modes=['ReadWriteOnce'] +                    ), +                    create_pv=True, +                    create_pvc=False +                ) +            ) +          return defaults      def guess_host_provider(self): diff --git a/roles/openshift_hosted_facts/tasks/main.yml b/roles/openshift_hosted_facts/tasks/main.yml index 631bf3e2a..53d1a8bc7 100644 --- a/roles/openshift_hosted_facts/tasks/main.yml +++ b/roles/openshift_hosted_facts/tasks/main.yml @@ -8,9 +8,10 @@  - name: Set hosted facts    openshift_facts: -    role: hosted +    role: "{{ item }}"      openshift_env: "{{ hostvars                         | oo_merge_hostvars(vars, inventory_hostname)                         | oo_openshift_env }}"      openshift_env_structures:      - 'openshift.hosted.router.*' +  with_items: [hosted, logging, loggingops, metrics] diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml index 716f0e002..06bbbc60e 100644 --- a/roles/openshift_logging/defaults/main.yml +++ b/roles/openshift_logging/defaults/main.yml @@ -1,15 +1,16 @@  --- -openshift_logging_use_ops: "{{ openshift_hosted_logging_enable_ops_cluster | default('false') | bool }}" +openshift_logging_use_ops: False  openshift_logging_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}" -openshift_logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true))) }}" +openshift_logging_master_public_url: "{{ 'https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true)) }}"  openshift_logging_namespace: logging  openshift_logging_nodeselector: null  openshift_logging_labels: {}  openshift_logging_label_key: ""  openshift_logging_label_value: ""  openshift_logging_install_logging: True +  openshift_logging_purge_logging: False -openshift_logging_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}" +openshift_logging_image_pull_secret: ""  openshift_logging_curator_default_days: 30  openshift_logging_curator_run_hour: 0 @@ -19,13 +20,13 @@ openshift_logging_curator_script_log_level: INFO  openshift_logging_curator_log_level: ERROR  openshift_logging_curator_cpu_limit: 100m  openshift_logging_curator_memory_limit: null -openshift_logging_curator_nodeselector: "{{ openshift_hosted_logging_curator_nodeselector | default('') | map_from_pairs }}" +openshift_logging_curator_nodeselector: {}  openshift_logging_curator_ops_cpu_limit: 100m  openshift_logging_curator_ops_memory_limit: null -openshift_logging_curator_ops_nodeselector: "{{ openshift_hosted_logging_curator_ops_nodeselector | default('') | map_from_pairs }}" +openshift_logging_curator_ops_nodeselector: {} -openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" +openshift_logging_kibana_hostname: "{{ 'kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"  openshift_logging_kibana_cpu_limit: null  openshift_logging_kibana_memory_limit: 736Mi  openshift_logging_kibana_proxy_debug: false @@ -34,8 +35,8 @@ openshift_logging_kibana_proxy_memory_limit: 96Mi  openshift_logging_kibana_replica_count: 1  openshift_logging_kibana_edge_term_policy: Redirect -openshift_logging_kibana_nodeselector: "{{ openshift_hosted_logging_kibana_nodeselector | default('') | map_from_pairs }}" -openshift_logging_kibana_ops_nodeselector: "{{ openshift_hosted_logging_kibana_ops_nodeselector | default('') | map_from_pairs }}" +openshift_logging_kibana_nodeselector: {} +openshift_logging_kibana_ops_nodeselector: {}  #The absolute path on the control node to the cert file to use  #for the public facing kibana certs @@ -49,7 +50,7 @@ openshift_logging_kibana_key: ""  #for the public facing kibana certs  openshift_logging_kibana_ca: "" -openshift_logging_kibana_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default('kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" +openshift_logging_kibana_ops_hostname: "{{ 'kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"  openshift_logging_kibana_ops_cpu_limit: null  openshift_logging_kibana_ops_memory_limit: 736Mi  openshift_logging_kibana_ops_proxy_debug: false @@ -69,12 +70,12 @@ openshift_logging_kibana_ops_key: ""  #for the public facing ops kibana certs  openshift_logging_kibana_ops_ca: "" -openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}" +openshift_logging_fluentd_nodeselector: {'logging-infra-fluentd': 'true'}  openshift_logging_fluentd_cpu_limit: 100m  openshift_logging_fluentd_memory_limit: 512Mi  openshift_logging_fluentd_es_copy: false -openshift_logging_fluentd_journal_source: "{{ openshift_hosted_logging_journal_source | default('') }}" -openshift_logging_fluentd_journal_read_from_head: "{{ openshift_hosted_logging_journal_read_from_head | default('') }}" +openshift_logging_fluentd_journal_source: "" +openshift_logging_fluentd_journal_read_from_head: ""  openshift_logging_fluentd_hosts: ['--all']  openshift_logging_fluentd_buffer_queue_limit: 1024  openshift_logging_fluentd_buffer_size_limit: 1m @@ -84,18 +85,18 @@ openshift_logging_es_port: 9200  openshift_logging_es_ca: /etc/fluent/keys/ca  openshift_logging_es_client_cert: /etc/fluent/keys/cert  openshift_logging_es_client_key: /etc/fluent/keys/key -openshift_logging_es_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}" +openshift_logging_es_cluster_size: 1  openshift_logging_es_cpu_limit: 1000m  # the logging appenders for the root loggers to write ES logs. Valid values: 'file', 'console'  openshift_logging_es_log_appenders: ['file'] -openshift_logging_es_memory_limit: "{{ openshift_hosted_logging_elasticsearch_instance_ram | default('8Gi') }}" -openshift_logging_es_pv_selector: "{{ openshift_hosted_logging_storage_labels | default('') }}" -openshift_logging_es_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_pvc_dynamic | default(False) }}" -openshift_logging_es_pvc_size: "{{ openshift_hosted_logging_elasticsearch_pvc_size | default('') }}" -openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default('logging-es') }}" +openshift_logging_es_memory_limit: "8Gi" +openshift_logging_es_pv_selector: "{{ openshift_logging_storage_labels | default('') }}" +openshift_logging_es_pvc_dynamic: "{{ openshift_logging_elasticsearch_pvc_dynamic | default(False) }}" +openshift_logging_es_pvc_size: "{{ openshift_logging_elasticsearch_pvc_size | default('') }}" +openshift_logging_es_pvc_prefix: "{{ openshift_logging_elasticsearch_pvc_prefix | default('logging-es') }}"  openshift_logging_es_recover_after_time: 5m -openshift_logging_es_storage_group: "{{ openshift_hosted_logging_elasticsearch_storage_group | default('65534') }}" -openshift_logging_es_nodeselector: "{{ openshift_hosted_logging_elasticsearch_nodeselector | default('') | map_from_pairs }}" +openshift_logging_es_storage_group: "{{ openshift_logging_elasticsearch_storage_group | default('65534') }}" +openshift_logging_es_nodeselector: {}  # openshift_logging_es_config is a hash to be merged into the defaults for the elasticsearch.yaml  openshift_logging_es_config: {}  openshift_logging_es_number_of_shards: 1 @@ -125,16 +126,16 @@ openshift_logging_es_ops_port: 9200  openshift_logging_es_ops_ca: /etc/fluent/keys/ca  openshift_logging_es_ops_client_cert: /etc/fluent/keys/cert  openshift_logging_es_ops_client_key: /etc/fluent/keys/key -openshift_logging_es_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}" +openshift_logging_es_ops_cluster_size: "{{ openshift_logging_elasticsearch_ops_cluster_size | default(1) }}"  openshift_logging_es_ops_cpu_limit: 1000m -openshift_logging_es_ops_memory_limit: "{{ openshift_hosted_logging_elasticsearch_ops_instance_ram | default('8Gi') }}" -openshift_logging_es_ops_pv_selector: "{{ openshift_hosted_loggingops_storage_labels | default('') }}" -openshift_logging_es_ops_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | default(False) }}" -openshift_logging_es_ops_pvc_size: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_size | default('') }}" -openshift_logging_es_ops_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_prefix | default('logging-es-ops') }}" +openshift_logging_es_ops_memory_limit: "8Gi" +openshift_logging_es_ops_pv_selector: "{{ openshift_loggingops_storage_labels | default('') }}" +openshift_logging_es_ops_pvc_dynamic: "{{ openshift_logging_elasticsearch_ops_pvc_dynamic | default(False) }}" +openshift_logging_es_ops_pvc_size: "{{ openshift_logging_elasticsearch_ops_pvc_size | default('') }}" +openshift_logging_es_ops_pvc_prefix: "{{ openshift_logging_elasticsearch_ops_pvc_prefix | default('logging-es-ops') }}"  openshift_logging_es_ops_recover_after_time: 5m -openshift_logging_es_ops_storage_group: "{{ openshift_hosted_logging_elasticsearch_storage_group | default('65534') }}" -openshift_logging_es_ops_nodeselector: "{{ openshift_hosted_logging_elasticsearch_ops_nodeselector | default('') | map_from_pairs }}" +openshift_logging_es_ops_storage_group: "{{ openshift_logging_elasticsearch_storage_group | default('65534') }}" +openshift_logging_es_ops_nodeselector: {}  # for exposing es-ops to external (outside of the cluster) clients  openshift_logging_es_ops_allow_external: False @@ -153,7 +154,7 @@ openshift_logging_es_ops_key: ""  openshift_logging_es_ops_ca_ext: ""  # storage related defaults -openshift_logging_storage_access_modes: "{{ openshift_hosted_logging_storage_access_modes | default(['ReadWriteOnce']) }}" +openshift_logging_storage_access_modes: ['ReadWriteOnce']  # mux - secure_forward listener service  openshift_logging_mux_allow_external: False diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml index d9a17ae7f..3ebc90ce2 100644 --- a/roles/openshift_metrics/defaults/main.yaml +++ b/roles/openshift_metrics/defaults/main.yaml @@ -14,9 +14,9 @@ openshift_metrics_hawkular_ca: ""  openshift_metrics_hawkular_nodeselector: ""  openshift_metrics_cassandra_replicas: 1 -openshift_metrics_cassandra_storage_type: "{{ openshift_hosted_metrics_storage_kind | default('emptydir') }}" -openshift_metrics_cassandra_pvc_size: "{{ openshift_hosted_metrics_storage_volume_size | default('10Gi') }}" -openshift_metrics_cassandra_pv_selector: "{{ openshift_hosted_metrics_storage_labels | default('') }}" +openshift_metrics_cassandra_storage_type: "{{ openshift_metrics_storage_kind | default('emptydir') }}" +openshift_metrics_cassandra_pvc_size: "{{ openshift_metrics_storage_volume_size | default('10Gi') }}" +openshift_metrics_cassandra_pv_selector: "{{ openshift_metrics_storage_labels | default('') }}"  openshift_metrics_cassandra_limits_memory: 2G  openshift_metrics_cassandra_limits_cpu: null  openshift_metrics_cassandra_requests_memory: 1G @@ -53,8 +53,8 @@ openshift_metrics_master_url: https://kubernetes.default.svc  openshift_metrics_node_id: nodename  openshift_metrics_project: openshift-infra -openshift_metrics_cassandra_pvc_prefix: "{{ openshift_hosted_metrics_storage_volume_name | default('metrics-cassandra') }}" -openshift_metrics_cassandra_pvc_access: "{{ openshift_hosted_metrics_storage_access_modes | default(['ReadWriteOnce']) }}" +openshift_metrics_cassandra_pvc_prefix: "{{ openshift_metrics_storage_volume_name | default('metrics-cassandra') }}" +openshift_metrics_cassandra_pvc_access: "{{ openshift_metrics_storage_access_modes | default(['ReadWriteOnce']) }}"  openshift_metrics_hawkular_user_write_access: False diff --git a/roles/openshift_metrics/vars/default_images.yml b/roles/openshift_metrics/vars/default_images.yml index 678c4104c..8704ddfa0 100644 --- a/roles/openshift_metrics/vars/default_images.yml +++ b/roles/openshift_metrics/vars/default_images.yml @@ -1,3 +1,3 @@  --- -__openshift_metrics_image_prefix: "{{ openshift_hosted_metrics_deployer_prefix | default('docker.io/openshift/origin-') }}" -__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default('latest') }}" +__openshift_metrics_image_prefix: "docker.io/openshift/origin-" +__openshift_metrics_image_version: "latest" diff --git a/roles/openshift_metrics/vars/openshift-enterprise.yml b/roles/openshift_metrics/vars/openshift-enterprise.yml index f0bdac7d2..68cdf06fe 100644 --- a/roles/openshift_metrics/vars/openshift-enterprise.yml +++ b/roles/openshift_metrics/vars/openshift-enterprise.yml @@ -1,3 +1,3 @@  --- -__openshift_metrics_image_prefix: "{{ openshift_hosted_metrics_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}" -__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default ('v3.6') }}" +__openshift_metrics_image_prefix: "registry.access.redhat.com/openshift3/" +__openshift_metrics_image_version: "v3.6" diff --git a/roles/openshift_persistent_volumes/meta/main.yml b/roles/openshift_persistent_volumes/meta/main.yml index 8d3d010e4..19e9a56b7 100644 --- a/roles/openshift_persistent_volumes/meta/main.yml +++ b/roles/openshift_persistent_volumes/meta/main.yml @@ -9,5 +9,4 @@ galaxy_info:    - name: EL      versions:      - 7 -dependencies: -- role: openshift_hosted_facts +dependencies: {} diff --git a/roles/openshift_sanitize_inventory/filter_plugins/openshift_logging.py b/roles/openshift_sanitize_inventory/filter_plugins/openshift_logging.py new file mode 100644 index 000000000..d42c9bdb9 --- /dev/null +++ b/roles/openshift_sanitize_inventory/filter_plugins/openshift_logging.py @@ -0,0 +1,25 @@ +''' + Openshift Logging class that provides useful filters used in Logging. + + This should be removed after map_from_pairs is no longer used in __deprecations_logging.yml +''' + + +def map_from_pairs(source, delim="="): +    ''' Returns a dict given the source and delim delimited ''' +    if source == '': +        return dict() + +    return dict(item.split(delim) for item in source.split(",")) + + +# pylint: disable=too-few-public-methods +class FilterModule(object): +    ''' OpenShift Logging Filters ''' + +    # pylint: disable=no-self-use, too-few-public-methods +    def filters(self): +        ''' Returns the names of the filters provided by this class ''' +        return { +            'map_from_pairs': map_from_pairs +        } diff --git a/roles/openshift_sanitize_inventory/library/conditional_set_fact.py b/roles/openshift_sanitize_inventory/library/conditional_set_fact.py new file mode 100644 index 000000000..f61801714 --- /dev/null +++ b/roles/openshift_sanitize_inventory/library/conditional_set_fact.py @@ -0,0 +1,68 @@ +#!/usr/bin/python + +""" Ansible module to help with setting facts conditionally based on other facts """ + +from ansible.module_utils.basic import AnsibleModule + + +DOCUMENTATION = ''' +--- +module: conditional_set_fact + +short_description: This will set a fact if the value is defined + +description: +    - "To avoid constant set_fact & when conditions for each var we can use this" + +author: +    - Eric Wolinetz ewolinet@redhat.com +''' + + +EXAMPLES = ''' +- name: Conditionally set fact +  conditional_set_fact: +    fact1: not_defined_variable + +- name: Conditionally set fact +  conditional_set_fact: +    fact1: not_defined_variable +    fact2: defined_variable + +''' + + +def run_module(): +    """ The body of the module, we check if the variable name specified as the value +        for the key is defined. If it is then we use that value as for the original key """ + +    module = AnsibleModule( +        argument_spec=dict( +            facts=dict(type='dict', required=True), +            vars=dict(required=False, type='dict', default=[]) +        ), +        supports_check_mode=True +    ) + +    local_facts = dict() +    is_changed = False + +    for param in module.params['vars']: +        other_var = module.params['vars'][param] + +        if other_var in module.params['facts']: +            local_facts[param] = module.params['facts'][other_var] +            if not is_changed: +                is_changed = True + +    return module.exit_json(changed=is_changed,  # noqa: F405 +                            ansible_facts=local_facts) + + +def main(): +    """ main """ +    run_module() + + +if __name__ == '__main__': +    main() diff --git a/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml b/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml new file mode 100644 index 000000000..e52ab5f6d --- /dev/null +++ b/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml @@ -0,0 +1,48 @@ +--- +# this is used to set the logging variables from deprecated values to the current variables names +# this file should be deleted once variables are no longer honored + +- conditional_set_fact: +    facts: "{{ hostvars[inventory_hostname] }}" +    vars: +      logging_hostname: openshift_hosted_logging_hostname +      logging_ops_hostname: openshift_hosted_logging_ops_hostname +      logging_elasticsearch_cluster_size: openshift_hosted_logging_elasticsearch_cluster_size +      logging_elasticsearch_ops_cluster_size: openshift_hosted_logging_elasticsearch_ops_cluster_size +      openshift_logging_storage_kind: openshift_hosted_logging_storage_kind +      openshift_logging_storage_host: openshift_hosted_logging_storage_host +      openshift_logging_storage_labels: openshift_hosted_logging_storage_labels +      openshift_logging_storage_volume_size: openshift_hosted_logging_storage_volume_size +      openshift_loggingops_storage_kind: openshift_hosted_loggingops_storage_kind +      openshift_loggingops_storage_host: openshift_hosted_loggingops_storage_host +      openshift_loggingops_storage_labels: openshift_hosted_loggingops_storage_labels +      openshift_loggingops_storage_volume_size: openshift_hosted_loggingops_storage_volume_size +      openshift_logging_use_ops: openshift_hosted_logging_enable_ops_cluster +      openshift_logging_image_pull_secret: openshift_hosted_logging_image_pull_secret +      openshift_logging_kibana_hostname: openshift_hosted_logging_hostname +      openshift_logging_kibana_ops_hostname: openshift_hosted_logging_ops_hostname +      openshift_logging_fluentd_journal_source: openshift_hosted_logging_journal_source +      openshift_logging_fluentd_journal_read_from_head: openshift_hosted_logging_journal_read_from_head +      openshift_logging_es_memory_limit: openshift_hosted_logging_elasticsearch_instance_ram +      openshift_logging_es_nodeselector: openshift_hosted_logging_elasticsearch_nodeselector +      openshift_logging_es_ops_memory_limit: openshift_hosted_logging_elasticsearch_ops_instance_ram +      openshift_logging_storage_access_modes: openshift_hosted_logging_storage_access_modes +      openshift_logging_master_public_url: openshift_hosted_logging_master_public_url +      openshift_logging_image_prefix: openshift_hosted_logging_deployer_prefix +      openshift_logging_image_version: openshift_hosted_logging_deployer_version +      openshift_logging_install_logging: openshift_hosted_logging_deploy + + +- set_fact: +    openshift_logging_elasticsearch_pvc_dynamic: "{{ 'true' if openshift_logging_storage_kind | default(none) == 'dynamic' else '' }}" +    openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_storage_volume_size if openshift_logging_storage_kind | default(none) in ['dynamic','nfs'] else ''  }}" +    openshift_logging_elasticsearch_pvc_prefix: "{{ 'logging-es' if openshift_logging_storage_kind | default(none) == 'dynamic' else '' }}" +    openshift_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift_loggingops_storage_kind | default(none) == 'dynamic' else '' }}" +    openshift_logging_elasticsearch_ops_pvc_size: "{{ openshift_loggingops_storage_volume_size if openshift_loggingops_storage_kind | default(none) in ['dynamic','nfs'] else ''  }}" +    openshift_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es-ops' if openshift_loggingops_storage_kind | default(none) == 'dynamic' else '' }}" +    openshift_logging_curator_nodeselector: "{{ openshift_hosted_logging_curator_nodeselector | default('') | map_from_pairs }}" +    openshift_logging_curator_ops_nodeselector: "{{ openshift_hosted_logging_curator_ops_nodeselector | default('') | map_from_pairs }}" +    openshift_logging_kibana_nodeselector: "{{ openshift_hosted_logging_kibana_nodeselector | default('') | map_from_pairs }}" +    openshift_logging_kibana_ops_nodeselector: "{{ openshift_hosted_logging_kibana_ops_nodeselector | default('') | map_from_pairs }}" +    openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}" +    openshift_logging_es_ops_nodeselector: "{{ openshift_hosted_logging_elasticsearch_ops_nodeselector | default('') | map_from_pairs }}" diff --git a/roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml b/roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml new file mode 100644 index 000000000..279646981 --- /dev/null +++ b/roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml @@ -0,0 +1,17 @@ +--- +# this is used to set the metrics variables from deprecated values to the current variables names +# this file should be deleted once variables are no longer honored + +- conditional_set_fact: +    facts: "{{ hostvars[inventory_hostname] }}" +    vars: +      openshift_metrics_storage_access_modes: openshift_hosted_metrics_storage_access_modes +      openshift_metrics_storage_host: openshift_hosted_metrics_storage_host +      openshift_metrics_storage_nfs_directory: openshift_hosted_metrics_storage_nfs_directory +      openshift_metrics_storage_volume_name: openshift_hosted_metrics_storage_volume_name +      openshift_metrics_storage_volume_size: openshift_hosted_metrics_storage_volume_size +      openshift_metrics_storage_labels: openshift_hosted_metrics_storage_labels +      openshift_metrics_image_prefix: openshift_hosted_metrics_deployer_prefix +      openshift_metrics_image_version: openshift_hosted_metrics_deployer_version +      openshift_metrics_install_metrics: openshift_hosted_metrics_deploy +      openshift_metrics_storage_kind: openshift_hosted_metrics_storage_kind diff --git a/roles/openshift_sanitize_inventory/tasks/deprecations.yml b/roles/openshift_sanitize_inventory/tasks/deprecations.yml new file mode 100644 index 000000000..94d3acffc --- /dev/null +++ b/roles/openshift_sanitize_inventory/tasks/deprecations.yml @@ -0,0 +1,21 @@ +--- + +- name: Check for usage of deprecated variables +  set_fact: +    __deprecation_message: "{{ __deprecation_message | default([]) }} + ['{{ __deprecation_header }} {{ item }} is a deprecated variable and will be no longer be used in the next minor release. Please update your inventory accordingly.']" +  when: +  - hostvars[inventory_hostname][item] is defined +  with_items: "{{ __warn_deprecated_vars }}" + +- block: +  - debug: msg="{{__deprecation_message}}" +  - pause: +      seconds: "{{ 10 }}" +  when: +  - __deprecation_message | default ('') | length > 0 + +# for with_fileglob Ansible resolves the path relative to the roles/<rolename>/files directory +- name: Assign deprecated variables to correct counterparts +  include: "{{ item }}" +  with_fileglob: +  - "../tasks/__deprecations_*.yml" diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml index 47d7be05a..ca69c5b84 100644 --- a/roles/openshift_sanitize_inventory/tasks/main.yml +++ b/roles/openshift_sanitize_inventory/tasks/main.yml @@ -1,4 +1,8 @@  --- +# We should print out deprecations prior to any failures so that if a play does fail for other reasons +# the user would also be aware of any deprecated variables they should note to adjust +- include: deprecations.yml +  - name: Abort when conflicting deployment type variables are set    when:      - deployment_type is defined diff --git a/roles/openshift_sanitize_inventory/vars/main.yml b/roles/openshift_sanitize_inventory/vars/main.yml index 37e88758d..0fc2372d2 100644 --- a/roles/openshift_sanitize_inventory/vars/main.yml +++ b/roles/openshift_sanitize_inventory/vars/main.yml @@ -2,3 +2,77 @@  # origin uses community packages named 'origin'  # openshift-enterprise uses Red Hat packages named 'atomic-openshift'  known_openshift_deployment_types: ['origin', 'openshift-enterprise'] + +__deprecation_header: "[DEPRECATION WARNING]:" + +# this is a list of variables that we will be deprecating within the next minor release, this list should be expected to change from release to release +__warn_deprecated_vars: +  # logging +  - 'openshift_hosted_logging_deploy' +  - 'openshift_hosted_logging_hostname' +  - 'openshift_hosted_logging_ops_hostname' +  - 'openshift_hosted_logging_master_public_url' +  - 'openshift_hosted_logging_elasticsearch_cluster_size' +  - 'openshift_hosted_logging_elasticsearch_ops_cluster_size' +  - 'openshift_hosted_logging_image_pull_secret' +  - 'openshift_hosted_logging_enable_ops_cluster' +  - 'openshift_hosted_logging_curator_nodeselector' +  - 'openshift_hosted_logging_curator_ops_nodeselector' +  - 'openshift_hosted_logging_kibana_nodeselector' +  - 'openshift_hosted_logging_kibana_ops_nodeselector' +  - 'openshift_hosted_logging_fluentd_nodeselector_label' +  - 'openshift_hosted_logging_journal_source' +  - 'openshift_hosted_logging_journal_read_from_head' +  - 'openshift_hosted_logging_elasticsearch_instance_ram' +  - 'openshift_hosted_logging_storage_labels' +  - 'openshift_hosted_logging_elasticsearch_pvc_dynamic' +  - 'openshift_hosted_logging_elasticsearch_pvc_size' +  - 'openshift_hosted_logging_elasticsearch_pvc_prefix' +  - 'openshift_hosted_logging_elasticsearch_storage_group' +  - 'openshift_hosted_logging_elasticsearch_nodeselector' +  - 'openshift_hosted_logging_elasticsearch_ops_instance_ram' +  - 'openshift_hosted_loggingops_storage_labels' +  - 'openshift_hosted_logging_elasticsearch_ops_pvc_dynamic' +  - 'openshift_hosted_logging_elasticsearch_ops_pvc_size' +  - 'openshift_hosted_logging_elasticsearch_ops_pvc_prefix' +  - 'openshift_hosted_logging_elasticsearch_storage_group' +  - 'openshift_hosted_logging_elasticsearch_ops_nodeselector' +  - 'openshift_hosted_logging_storage_access_modes' +  - 'openshift_hosted_logging_storage_kind' +  - 'openshift_hosted_loggingops_storage_kind' +  - 'openshift_hosted_logging_storage_host' +  - 'openshift_hosted_loggingops_storage_host' +  - 'openshift_hosted_logging_storage_nfs_directory' +  - 'openshift_hosted_loggingops_storage_nfs_directory' +  - 'openshift_hosted_logging_storage_volume_name' +  - 'openshift_hosted_loggingops_storage_volume_name' +  - 'openshift_hosted_logging_storage_volume_size' +  - 'openshift_hosted_loggingops_storage_volume_size' +  - 'openshift_hosted_logging_enable_ops_cluster' +  - 'openshift_hosted_logging_image_pull_secret' +  - 'openshift_hosted_logging_curator_nodeselector' +  - 'openshift_hosted_logging_curator_ops_nodeselector' +  - 'openshift_hosted_logging_kibana_nodeselector' +  - 'openshift_hosted_logging_kibana_ops_nodeselector' +  - 'openshift_hosted_logging_ops_hostname' +  - 'openshift_hosted_logging_fluentd_nodeselector_label' +  - 'openshift_hosted_logging_journal_source' +  - 'openshift_hosted_logging_journal_read_from_head' +  - 'openshift_hosted_logging_elasticsearch_instance_ram' +  - 'openshift_hosted_logging_elasticsearch_nodeselector' +  - 'openshift_hosted_logging_elasticsearch_ops_instance_ram' +  - 'openshift_hosted_logging_elasticsearch_ops_nodeselector' +  - 'openshift_hosted_logging_storage_access_modes' +  - 'openshift_hosted_logging_deployer_prefix' +  - 'openshift_hosted_logging_deployer_version' +  # metrics +  - 'openshift_hosted_metrics_deploy' +  - 'openshift_hosted_metrics_storage_kind' +  - 'openshift_hosted_metrics_storage_access_modes' +  - 'openshift_hosted_metrics_storage_host' +  - 'openshift_hosted_metrics_storage_nfs_directory' +  - 'openshift_hosted_metrics_storage_volume_name' +  - 'openshift_hosted_metrics_storage_volume_size' +  - 'openshift_hosted_metrics_storage_labels' +  - 'openshift_hosted_metrics_deployer_prefix' +  - 'openshift_hosted_metrics_deployer_version' diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml index 51f8f4e0e..3047fbaf9 100644 --- a/roles/openshift_storage_nfs/tasks/main.yml +++ b/roles/openshift_storage_nfs/tasks/main.yml @@ -31,9 +31,9 @@      group: nfsnobody    with_items:      - "{{ openshift.hosted.registry }}" -    - "{{ openshift.hosted.metrics }}" -    - "{{ openshift.hosted.logging }}" -    - "{{ openshift.hosted.loggingops }}" +    - "{{ openshift.metrics }}" +    - "{{ openshift.logging }}" +    - "{{ openshift.loggingops }}"      - "{{ openshift.hosted.etcd }}"  - name: Configure exports diff --git a/roles/openshift_storage_nfs/templates/exports.j2 b/roles/openshift_storage_nfs/templates/exports.j2 index 7e8f70b23..0141e0d25 100644 --- a/roles/openshift_storage_nfs/templates/exports.j2 +++ b/roles/openshift_storage_nfs/templates/exports.j2 @@ -1,5 +1,5 @@  {{ openshift.hosted.registry.storage.nfs.directory }}/{{ openshift.hosted.registry.storage.volume.name }} {{ openshift.hosted.registry.storage.nfs.options }} -{{ openshift.hosted.metrics.storage.nfs.directory }}/{{ openshift.hosted.metrics.storage.volume.name }} {{ openshift.hosted.metrics.storage.nfs.options }} -{{ openshift.hosted.logging.storage.nfs.directory }}/{{ openshift.hosted.logging.storage.volume.name }} {{ openshift.hosted.logging.storage.nfs.options }} -{{ openshift.hosted.loggingops.storage.nfs.directory }}/{{ openshift.hosted.loggingops.storage.volume.name }} {{ openshift.hosted.loggingops.storage.nfs.options }} +{{ openshift.metrics.storage.nfs.directory }}/{{ openshift.metrics.storage.volume.name }} {{ openshift.metrics.storage.nfs.options }} +{{ openshift.logging.storage.nfs.directory }}/{{ openshift.logging.storage.volume.name }} {{ openshift.logging.storage.nfs.options }} +{{ openshift.loggingops.storage.nfs.directory }}/{{ openshift.loggingops.storage.volume.name }} {{ openshift.loggingops.storage.nfs.options }}  {{ openshift.hosted.etcd.storage.nfs.directory }}/{{ openshift.hosted.etcd.storage.volume.name }} {{ openshift.hosted.etcd.storage.nfs.options }}  | 
