diff options
177 files changed, 3054 insertions, 1454 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index 7a8a6c820..4e4490141 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.7.0-0.181.0 ./ +3.7.0-0.197.0 ./ diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index 2fbd23450..f9564499d 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -1125,6 +1125,73 @@ of items as ['region=infra', 'zone=primary'] return selectors +def oo_filter_sa_secrets(sa_secrets, secret_hint='-token-'): + """Parse the Service Account Secrets list, `sa_secrets`, (as from +oc_serviceaccount_secret:state=list) and return the name of the secret +containing the `secret_hint` string. For example, by default this will +return the name of the secret holding the SA bearer token. + +Only provide the 'results' object to this filter. This filter expects +to receive a list like this: + + [ + { + "name": "management-admin-dockercfg-p31s2" + }, + { + "name": "management-admin-token-bnqsh" + } + ] + + +Returns: + +* `secret_name` [string] - The name of the secret matching the + `secret_hint` parameter. By default this is the secret holding the + SA's bearer token. + +Example playbook usage: + +Register a return value from oc_serviceaccount_secret with and pass +that result to this filter plugin. + + - name: Get all SA Secrets + oc_serviceaccount_secret: + state: list + service_account: management-admin + namespace: management-infra + register: sa + + - name: Save the SA bearer token secret name + set_fact: + management_token: "{{ sa.results | oo_filter_sa_secrets }}" + + - name: Get the SA bearer token value + oc_secret: + state: list + name: "{{ management_token }}" + namespace: management-infra + decode: true + register: sa_secret + + - name: Print the bearer token value + debug: + var: sa_secret.results.decoded.token + + """ + secret_name = None + + for secret in sa_secrets: + # each secret is a hash + if secret['name'].find(secret_hint) == -1: + continue + else: + secret_name = secret['name'] + break + + return secret_name + + class FilterModule(object): """ Custom ansible filter mapping """ @@ -1167,5 +1234,6 @@ class FilterModule(object): "to_padded_yaml": to_padded_yaml, "oo_random_word": oo_random_word, "oo_contains_rule": oo_contains_rule, - "oo_selector_to_string_list": oo_selector_to_string_list + "oo_selector_to_string_list": oo_selector_to_string_list, + "oo_filter_sa_secrets": oo_filter_sa_secrets, } diff --git a/inventory/byo/hosts.byo.glusterfs.external.example b/inventory/byo/hosts.byo.glusterfs.external.example index 5a284ce97..acf68266e 100644 --- a/inventory/byo/hosts.byo.glusterfs.external.example +++ b/inventory/byo/hosts.byo.glusterfs.external.example @@ -19,6 +19,7 @@ [OSEv3:children] masters nodes +etcd # Specify there will be GlusterFS nodes glusterfs @@ -39,6 +40,9 @@ node0 openshift_schedulable=True node1 openshift_schedulable=True node2 openshift_schedulable=True +[etcd] +master + # Specify the glusterfs group, which contains the nodes of the external # GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname" # and "glusterfs_devices" variables defined. diff --git a/inventory/byo/hosts.byo.glusterfs.mixed.example b/inventory/byo/hosts.byo.glusterfs.mixed.example index d16df6470..a559dc377 100644 --- a/inventory/byo/hosts.byo.glusterfs.mixed.example +++ b/inventory/byo/hosts.byo.glusterfs.mixed.example @@ -19,6 +19,7 @@ [OSEv3:children] masters nodes +etcd # Specify there will be GlusterFS nodes glusterfs @@ -42,6 +43,9 @@ node0 openshift_schedulable=True node1 openshift_schedulable=True node2 openshift_schedulable=True +[etcd] +master + # Specify the glusterfs group, which contains the nodes of the external # GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname" # and "glusterfs_devices" variables defined. diff --git a/inventory/byo/hosts.byo.glusterfs.native.example b/inventory/byo/hosts.byo.glusterfs.native.example index c1a1f6f84..ca4765c53 100644 --- a/inventory/byo/hosts.byo.glusterfs.native.example +++ b/inventory/byo/hosts.byo.glusterfs.native.example @@ -16,6 +16,7 @@ [OSEv3:children] masters nodes +etcd # Specify there will be GlusterFS nodes glusterfs @@ -34,6 +35,9 @@ node0 openshift_schedulable=True node1 openshift_schedulable=True node2 openshift_schedulable=True +[etcd] +master + # Specify the glusterfs group, which contains the nodes that will host # GlusterFS storage pods. At a minimum, each node must have a # "glusterfs_devices" variable defined. This variable is a list of block diff --git a/inventory/byo/hosts.byo.glusterfs.registry-only.example b/inventory/byo/hosts.byo.glusterfs.registry-only.example index 31a85ee42..32040f593 100644 --- a/inventory/byo/hosts.byo.glusterfs.registry-only.example +++ b/inventory/byo/hosts.byo.glusterfs.registry-only.example @@ -20,6 +20,7 @@ [OSEv3:children] masters nodes +etcd # Specify there will be GlusterFS nodes glusterfs_registry @@ -40,6 +41,9 @@ node0 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True node1 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True node2 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True +[etcd] +master + # Specify the glusterfs group, which contains the nodes that will host # GlusterFS storage pods. At a minimum, each node must have a # "glusterfs_devices" variable defined. This variable is a list of block diff --git a/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example b/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example index 54bd89ddc..9bd37cbf6 100644 --- a/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example +++ b/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example @@ -20,6 +20,7 @@ [OSEv3:children] masters nodes +etcd # Specify there will be GlusterFS nodes glusterfs glusterfs_registry @@ -46,6 +47,9 @@ node3 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True node4 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True node5 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True +[etcd] +master + # Specify the glusterfs group, which contains the nodes that will host # GlusterFS storage pods. At a minimum, each node must have a # "glusterfs_devices" variable defined. This variable is a list of block diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example index 7c4a7885d..5de43270e 100644 --- a/inventory/byo/hosts.example +++ b/inventory/byo/hosts.example @@ -123,6 +123,15 @@ openshift_release=v3.7 # use this option if you are sure you know what you are doing! #openshift_docker_systemcontainer_image_override="registry.example.com/container-engine:latest" #openshift_crio_systemcontainer_image_override="registry.example.com/cri-o:latest" +# NOTE: The following crio docker-gc items are tech preview and likely shouldn't be used +# unless you know what you are doing!! +# The following two variables are used when opneshift_use_crio is True +# and cleans up after builds that pass through docker. +# Enable docker garbage collection when using cri-o +#openshift_crio_enable_docker_gc=false +# Node Selectors to run the garbage collection +#openshift_crio_docker_gc_node_selector: {'runtime': 'cri-o'} + # Items added, as is, to end of /etc/sysconfig/docker OPTIONS # Default value: "--log-driver=journald" #openshift_docker_options="-l warn --ipv6=false" @@ -310,9 +319,6 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_master_cluster_hostname=openshift-ansible.test.example.com #openshift_master_cluster_public_hostname=openshift-ansible.test.example.com -# Override the default controller lease ttl -#osm_controller_lease_ttl=30 - # Configure controller arguments #osm_controller_args={'resource-quota-sync-period': ['10s']} @@ -974,25 +980,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # openshift_upgrade_post_storage_migration_enabled=true # openshift_upgrade_post_storage_migration_fatal=false -# host group for masters -[masters] -ose3-master[1:3]-ansible.test.example.com - -[etcd] -ose3-etcd[1:3]-ansible.test.example.com - -# NOTE: Containerized load balancer hosts are not yet supported, if using a global -# containerized=true host variable we must set to false. -[lb] -ose3-lb-ansible.test.example.com containerized=false - -# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes -# However, in order to ensure that your masters are not burdened with running pods you should -# make them unschedulable by adding openshift_schedulable=False any node that's also a master. -[nodes] -ose3-master[1:3]-ansible.test.example.com -ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}" - +###################################################################### # CloudForms/ManageIQ (CFME/MIQ) Configuration # See the readme for full descriptions and getting started @@ -1042,6 +1030,17 @@ ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'prima # setting this variable. Useful for testing specific task files. #openshift_management_storage_nfs_local_hostname: false +# These are the default values for the username and password of the +# management app. Changing these values in your inventory will not +# change your username or password. You should only need to change +# these values in your inventory if you already changed the actual +# name and password AND are trying to use integration scripts. +# +# For example, adding this cluster as a container provider, +# playbooks/byo/openshift-management/add_container_provider.yml +#openshift_management_username: admin +#openshift_management_password: smartvm + # A hash of parameters you want to override or set in the # miq-template.yaml or miq-template-ext-db.yaml templates. Set this in # your inventory file as a simple hash. Acceptable values are defined @@ -1050,3 +1049,28 @@ ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'prima # # openshift_management_template_parameters={'APPLICATION_MEM_REQ': '512Mi'} #openshift_management_template_parameters: {} + +# Firewall configuration +# You can open additional firewall ports by defining them as a list. of service +# names and ports/port ranges for either masters or nodes. +#openshift_master_open_ports=[{"service":"svc1","port":"11/tcp"}] +#openshift_node_open_ports=[{"service":"svc2","port":"12-13/tcp"},{"service":"svc3","port":"14/udp"}] + +# host group for masters +[masters] +ose3-master[1:3]-ansible.test.example.com + +[etcd] +ose3-etcd[1:3]-ansible.test.example.com + +# NOTE: Containerized load balancer hosts are not yet supported, if using a global +# containerized=true host variable we must set to false. +[lb] +ose3-lb-ansible.test.example.com containerized=false + +# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes +# However, in order to ensure that your masters are not burdened with running pods you should +# make them unschedulable by adding openshift_schedulable=False any node that's also a master. +[nodes] +ose3-master[1:3]-ansible.test.example.com +ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}" diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example deleted file mode 100644 index 9d811fcab..000000000 --- a/inventory/byo/hosts.origin.example +++ /dev/null @@ -1,900 +0,0 @@ -# This is an example of a bring your own (byo) host inventory - -# Create an OSEv3 group that contains the masters and nodes groups -[OSEv3:children] -masters -nodes -etcd -lb -nfs - -# Set variables common for all OSEv3 hosts -[OSEv3:vars] -# Enable unsupported configurations, things that will yield a partially -# functioning cluster but would not be supported for production use -#openshift_enable_unsupported_configurations=false - -# SSH user, this user should allow ssh based auth without requiring a -# password. If using ssh key based auth, then the key should be managed by an -# ssh agent. -ansible_ssh_user=root - -# If ansible_ssh_user is not root, ansible_become must be set to true and the -# user must be configured for passwordless sudo -#ansible_become=yes - -# Debug level for all OpenShift components (Defaults to 2) -debug_level=2 - -# Specify the deployment type. Valid values are origin and openshift-enterprise. -openshift_deployment_type=origin - -# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we -# rely on the version running on the first master. Works best for containerized installs where we can usually -# use this to lookup the latest exact version of the container images, which is the tag actually used to configure -# the cluster. For RPM installations we just verify the version detected in your configured repos matches this -# release. -openshift_release=v3.7 - -# Specify an exact container image tag to install or configure. -# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed. -# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up. -#openshift_image_tag=v3.7.0 - -# Specify an exact rpm version to install or configure. -# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed. -# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up. -#openshift_pkg_version=-3.7.0 - -# This enables all the system containers except for docker: -#openshift_use_system_containers=False -# -# But you can choose separately each component that must be a -# system container: -# -#openshift_use_openvswitch_system_container=False -#openshift_use_node_system_container=False -#openshift_use_master_system_container=False -#openshift_use_etcd_system_container=False -# -# In either case, system_images_registry must be specified to be able to find the system images -#system_images_registry="docker.io" - -# Install the openshift examples -#openshift_install_examples=true - -# Configure logoutURL in the master config for console customization -# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url -#openshift_master_logout_url=http://example.com - -# Configure extensionScripts in the master config for console customization -# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets -#openshift_master_extension_scripts=['/path/to/script1.js','/path/to/script2.js'] - -# Configure extensionStylesheets in the master config for console customization -# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets -#openshift_master_extension_stylesheets=['/path/to/stylesheet1.css','/path/to/stylesheet2.css'] - -# Configure extensions in the master config for console customization -# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files -#openshift_master_extensions=[{'name': 'images', 'sourceDirectory': '/path/to/my_images'}] - -# Configure extensions in the master config for console customization -# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files -#openshift_master_oauth_template=/path/to/login-template.html - -# Configure imagePolicyConfig in the master config -# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig -#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true} - -# Configure master API rate limits for external clients -#openshift_master_external_ratelimit_qps=200 -#openshift_master_external_ratelimit_burst=400 -# Configure master API rate limits for loopback clients -#openshift_master_loopback_ratelimit_qps=300 -#openshift_master_loopback_ratelimit_burst=600 - -# Docker Configuration -# Add additional, insecure, and blocked registries to global docker configuration -# For enterprise deployment types we ensure that registry.access.redhat.com is -# included if you do not include it -#openshift_docker_additional_registries=registry.example.com -#openshift_docker_insecure_registries=registry.example.com -#openshift_docker_blocked_registries=registry.hacker.com -# Disable pushing to dockerhub -#openshift_docker_disable_push_dockerhub=True -# Use Docker inside a System Container. Note that this is a tech preview and should -# not be used to upgrade! -# The following options for docker are ignored: -# - docker_version -# - docker_upgrade -# The following options must not be used -# - openshift_docker_options -#openshift_docker_use_system_container=False -# Instead of using docker, replacec it with cri-o -# NOTE: This uses openshift_docker_systemcontainer_image_registry_override as it's override -# just as container-engine does. -#openshift_use_crio=False -# Force the registry to use for the docker/crio system container. By default the registry -# will be built off of the deployment type and ansible_distribution. Only -# use this option if you are sure you know what you are doing! -#openshift_docker_systemcontainer_image_override="registry.example.com/container-engine:latest" -#openshift_crio_systemcontainer_image_override="registry.example.com/cri-o:latest" -# Items added, as is, to end of /etc/sysconfig/docker OPTIONS -# Default value: "--log-driver=journald" -#openshift_docker_options="-l warn --ipv6=false" - -# Specify exact version of Docker to configure or upgrade to. -# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10. -# docker_version="1.12.1" - -# Specify whether to run Docker daemon with SELinux enabled in containers. Default is True. -# Uncomment below to disable; for example if your kernel does not support the -# Docker overlay/overlay2 storage drivers with SELinux enabled. -#openshift_docker_selinux_enabled=False - -# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone. -# docker_upgrade=False - -# Specify exact version of etcd to configure or upgrade to. -# etcd_version="3.1.0" -# Enable etcd debug logging, defaults to false -# etcd_debug=true -# Set etcd log levels by package -# etcd_log_package_levels="etcdserver=WARNING,security=DEBUG" - -# Upgrade Hooks -# -# Hooks are available to run custom tasks at various points during a cluster -# upgrade. Each hook should point to a file with Ansible tasks defined. Suggest using -# absolute paths, if not the path will be treated as relative to the file where the -# hook is actually used. -# -# Tasks to run before each master is upgraded. -# openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml -# -# Tasks to run to upgrade the master. These tasks run after the main openshift-ansible -# upgrade steps, but before we restart system/services. -# openshift_master_upgrade_hook=/usr/share/custom/master.yml -# -# Tasks to run after each master is upgraded and system/services have been restarted. -# openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml - - -# Alternate image format string, useful if you've got your own registry mirror -# Configure this setting just on node or master -#oreg_url_master=example.com/openshift3/ose-${component}:${version} -#oreg_url_node=example.com/openshift3/ose-${component}:${version} -# For setting the configuration globally -#oreg_url=example.com/openshift3/ose-${component}:${version} -# If oreg_url points to a registry other than registry.access.redhat.com we can -# modify image streams to point at that registry by setting the following to true -#openshift_examples_modify_imagestreams=true - -# OpenShift repository configuration -#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}] -#openshift_repos_enable_testing=false - -# htpasswd auth -openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}] -# Defining htpasswd users -#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'} -# or -#openshift_master_htpasswd_file=<path to local pre-generated htpasswd file> - -# Allow all auth -#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}] - -# LDAP auth -#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': 'my-ldap-ca.crt', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}] -# -# Configure LDAP CA certificate -# Specify either the ASCII contents of the certificate or the path to -# the local file that will be copied to the remote host. CA -# certificate contents will be copied to master systems and saved -# within /etc/origin/master/ with a filename matching the "ca" key set -# within the LDAPPasswordIdentityProvider. -# -#openshift_master_ldap_ca=<ca text> -# or -#openshift_master_ldap_ca_file=<path to local ca file to use> - -# OpenID auth -#openshift_master_identity_providers=[{"name": "openid_auth", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "my_client_id", "client_secret": "my_client_secret", "claims": {"id": ["sub"], "preferredUsername": ["preferred_username"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://myidp.example.com/oauth2/authorize", "token": "https://myidp.example.com/oauth2/token"}, "ca": "my-openid-ca-bundle.crt"}] -# -# Configure OpenID CA certificate -# Specify either the ASCII contents of the certificate or the path to -# the local file that will be copied to the remote host. CA -# certificate contents will be copied to master systems and saved -# within /etc/origin/master/ with a filename matching the "ca" key set -# within the OpenIDIdentityProvider. -# -#openshift_master_openid_ca=<ca text> -# or -#openshift_master_openid_ca_file=<path to local ca file to use> - -# Request header auth -#openshift_master_identity_providers=[{"name": "my_request_header_provider", "challenge": "true", "login": "true", "kind": "RequestHeaderIdentityProvider", "challengeURL": "https://www.example.com/challenging-proxy/oauth/authorize?${query}", "loginURL": "https://www.example.com/login-proxy/oauth/authorize?${query}", "clientCA": "my-request-header-ca.crt", "clientCommonNames": ["my-auth-proxy"], "headers": ["X-Remote-User", "SSO-User"], "emailHeaders": ["X-Remote-User-Email"], "nameHeaders": ["X-Remote-User-Display-Name"], "preferredUsernameHeaders": ["X-Remote-User-Login"]}] -# -# Configure request header CA certificate -# Specify either the ASCII contents of the certificate or the path to -# the local file that will be copied to the remote host. CA -# certificate contents will be copied to master systems and saved -# within /etc/origin/master/ with a filename matching the "clientCA" -# key set within the RequestHeaderIdentityProvider. -# -#openshift_master_request_header_ca=<ca text> -# or -#openshift_master_request_header_ca_file=<path to local ca file to use> - -# CloudForms Management Engine (ManageIQ) App Install -# -# Enables installation of MIQ server. Recommended for dedicated -# clusters only. See roles/openshift_cfme/README.md for instructions -# and requirements. -#openshift_cfme_install_app=False - -# Cloud Provider Configuration -# -# Note: You may make use of environment variables rather than store -# sensitive configuration within the ansible inventory. -# For example: -#openshift_cloudprovider_aws_access_key="{{ lookup('env','AWS_ACCESS_KEY_ID') }}" -#openshift_cloudprovider_aws_secret_key="{{ lookup('env','AWS_SECRET_ACCESS_KEY') }}" -# -# AWS -#openshift_cloudprovider_kind=aws -# Note: IAM profiles may be used instead of storing API credentials on disk. -#openshift_cloudprovider_aws_access_key=aws_access_key_id -#openshift_cloudprovider_aws_secret_key=aws_secret_access_key -# -# Openstack -#openshift_cloudprovider_kind=openstack -#openshift_cloudprovider_openstack_auth_url=http://openstack.example.com:35357/v2.0/ -#openshift_cloudprovider_openstack_username=username -#openshift_cloudprovider_openstack_password=password -#openshift_cloudprovider_openstack_domain_id=domain_id -#openshift_cloudprovider_openstack_domain_name=domain_name -#openshift_cloudprovider_openstack_tenant_id=tenant_id -#openshift_cloudprovider_openstack_tenant_name=tenant_name -#openshift_cloudprovider_openstack_region=region -#openshift_cloudprovider_openstack_lb_subnet_id=subnet_id -# -# GCE -#openshift_cloudprovider_kind=gce - -# Project Configuration -#osm_project_request_message='' -#osm_project_request_template='' -#osm_mcs_allocator_range='s0:/2' -#osm_mcs_labels_per_project=5 -#osm_uid_allocator_range='1000000000-1999999999/10000' - -# Configure additional projects -#openshift_additional_projects={'my-project': {'default_node_selector': 'label=value'}} - -# Enable cockpit -#osm_use_cockpit=true -# -# Set cockpit plugins -#osm_cockpit_plugins=['cockpit-kubernetes'] - -# Native high availability cluster method with optional load balancer. -# If no lb group is defined, the installer assumes that a load balancer has -# been preconfigured. For installation the value of -# openshift_master_cluster_hostname must resolve to the load balancer -# or to one or all of the masters defined in the inventory if no load -# balancer is present. -#openshift_master_cluster_method=native -#openshift_master_cluster_hostname=openshift-ansible.test.example.com -#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com - -# Pacemaker high availability cluster method. -# Pacemaker HA environment must be able to self provision the -# configured VIP. For installation openshift_master_cluster_hostname -# must resolve to the configured VIP. -#openshift_master_cluster_method=pacemaker -#openshift_master_cluster_password=openshift_cluster -#openshift_master_cluster_vip=192.168.133.25 -#openshift_master_cluster_public_vip=192.168.133.25 -#openshift_master_cluster_hostname=openshift-ansible.test.example.com -#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com - -# Override the default controller lease ttl -#osm_controller_lease_ttl=30 - -# Configure controller arguments -#osm_controller_args={'resource-quota-sync-period': ['10s']} - -# Configure api server arguments -#osm_api_server_args={'max-requests-inflight': ['400']} - -# default subdomain to use for exposed routes -#openshift_master_default_subdomain=apps.test.example.com - -# additional cors origins -#osm_custom_cors_origins=['foo.example.com', 'bar.example.com'] - -# default project node selector -#osm_default_node_selector='region=primary' - -# Override the default pod eviction timeout -#openshift_master_pod_eviction_timeout=5m - -# Override the default oauth tokenConfig settings: -# openshift_master_access_token_max_seconds=86400 -# openshift_master_auth_token_max_seconds=500 - -# Override master servingInfo.maxRequestsInFlight -#openshift_master_max_requests_inflight=500 - -# Override master and node servingInfo.minTLSVersion and .cipherSuites -# valid TLS versions are VersionTLS10, VersionTLS11, VersionTLS12 -# example cipher suites override, valid cipher suites are https://golang.org/pkg/crypto/tls/#pkg-constants -#openshift_master_min_tls_version=VersionTLS12 -#openshift_master_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...'] -# -#openshift_node_min_tls_version=VersionTLS12 -#openshift_node_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...'] - -# default storage plugin dependencies to install, by default the ceph and -# glusterfs plugin dependencies will be installed, if available. -#osn_storage_plugin_deps=['ceph','glusterfs','iscsi'] - -# OpenShift Router Options -# -# An OpenShift router will be created during install if there are -# nodes present with labels matching the default router selector, -# "region=infra". Set openshift_node_labels per node as needed in -# order to label nodes. -# -# Example: -# [nodes] -# node.example.com openshift_node_labels="{'region': 'infra'}" -# -# Router selector (optional) -# Router will only be created if nodes matching this label are present. -# Default value: 'region=infra' -#openshift_hosted_router_selector='region=infra' -# -# Router replicas (optional) -# Unless specified, openshift-ansible will calculate the replica count -# based on the number of nodes matching the openshift router selector. -#openshift_hosted_router_replicas=2 -# -# Router force subdomain (optional) -# A router path format to force on all routes used by this router -# (will ignore the route host value) -#openshift_hosted_router_force_subdomain='${name}-${namespace}.apps.example.com' -# -# Router certificate (optional) -# Provide local certificate paths which will be configured as the -# router's default certificate. -#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"} -# -# Manage the OpenShift Router -#openshift_hosted_manage_router=true -# -# Router sharding support has been added and can be achieved by supplying the correct -# data to the inventory. The variable to house the data is openshift_hosted_routers -# and is in the form of a list. If no data is passed then a default router will be -# created. There are multiple combinations of router sharding. The one described -# below supports routers on separate nodes. -# -#openshift_hosted_routers=[{'name': 'router1', 'certificate': {'certfile': '/path/to/certificate/abc.crt', 'keyfile': '/path/to/certificate/abc.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router1', 'ports': ['80:80', '443:443']}, {'name': 'router2', 'certificate': {'certfile': '/path/to/certificate/xyz.crt', 'keyfile': '/path/to/certificate/xyz.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [{'action': 'append', 'key': 'spec.template.spec.containers[0].env', 'value': {'name': 'ROUTE_LABELS', 'value': 'route=external'}}], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router2', 'ports': ['80:80', '443:443']}] - -# OpenShift Registry Console Options -# Override the console image prefix for enterprise deployments, not used in origin -# default is "registry.access.redhat.com/openshift3/" and the image appended is "registry-console" -#openshift_cockpit_deployer_prefix=registry.example.com/myrepo/ -# Override image version, defaults to latest for origin, matches the product version for enterprise -#openshift_cockpit_deployer_version=1.4.1 - -# Openshift Registry Options -# -# An OpenShift registry will be created during install if there are -# nodes present with labels matching the default registry selector, -# "region=infra". Set openshift_node_labels per node as needed in -# order to label nodes. -# -# Example: -# [nodes] -# node.example.com openshift_node_labels="{'region': 'infra'}" -# -# Registry selector (optional) -# Registry will only be created if nodes matching this label are present. -# Default value: 'region=infra' -#openshift_hosted_registry_selector='region=infra' -# -# Registry replicas (optional) -# Unless specified, openshift-ansible will calculate the replica count -# based on the number of nodes matching the openshift registry selector. -#openshift_hosted_registry_replicas=2 -# -# Validity of the auto-generated certificate in days (optional) -#openshift_hosted_registry_cert_expire_days=730 -# -# Manage the OpenShift Registry -#openshift_hosted_manage_registry=true - -# Registry Storage Options -# -# NFS Host Group -# An NFS volume will be created with path "nfs_directory/volume_name" -# on the host within the [nfs] host group. For example, the volume -# path using these options would be "/exports/registry" -#openshift_hosted_registry_storage_kind=nfs -#openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] -#openshift_hosted_registry_storage_nfs_directory=/exports -#openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)' -#openshift_hosted_registry_storage_volume_name=registry -#openshift_hosted_registry_storage_volume_size=10Gi -# -# External NFS Host -# NFS volume must already exist with path "nfs_directory/_volume_name" on -# the storage_host. For example, the remote volume path using these -# options would be "nfs.example.com:/exports/registry" -#openshift_hosted_registry_storage_kind=nfs -#openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] -#openshift_hosted_registry_storage_host=nfs.example.com -#openshift_hosted_registry_storage_nfs_directory=/exports -#openshift_hosted_registry_storage_volume_name=registry -#openshift_hosted_registry_storage_volume_size=10Gi -# -# Openstack -# Volume must already exist. -#openshift_hosted_registry_storage_kind=openstack -#openshift_hosted_registry_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_registry_storage_openstack_filesystem=ext4 -#openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57 -#openshift_hosted_registry_storage_volume_size=10Gi -# -# AWS S3 -# S3 bucket must already exist. -#openshift_hosted_registry_storage_kind=object -#openshift_hosted_registry_storage_provider=s3 -#openshift_hosted_registry_storage_s3_encrypt=false -#openshift_hosted_registry_storage_s3_kmskeyid=aws_kms_key_id -#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id -#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key -#openshift_hosted_registry_storage_s3_bucket=bucket_name -#openshift_hosted_registry_storage_s3_region=bucket_region -#openshift_hosted_registry_storage_s3_chunksize=26214400 -#openshift_hosted_registry_storage_s3_rootdirectory=/registry -#openshift_hosted_registry_pullthrough=true -#openshift_hosted_registry_acceptschema2=true -#openshift_hosted_registry_enforcequota=true -# -# Any S3 service (Minio, ExoScale, ...): Basically the same as above -# but with regionendpoint configured -# S3 bucket must already exist. -#openshift_hosted_registry_storage_kind=object -#openshift_hosted_registry_storage_provider=s3 -#openshift_hosted_registry_storage_s3_accesskey=access_key_id -#openshift_hosted_registry_storage_s3_secretkey=secret_access_key -#openshift_hosted_registry_storage_s3_regionendpoint=https://myendpoint.example.com/ -#openshift_hosted_registry_storage_s3_bucket=bucket_name -#openshift_hosted_registry_storage_s3_region=bucket_region -#openshift_hosted_registry_storage_s3_chunksize=26214400 -#openshift_hosted_registry_storage_s3_rootdirectory=/registry -#openshift_hosted_registry_pullthrough=true -#openshift_hosted_registry_acceptschema2=true -#openshift_hosted_registry_enforcequota=true -# -# Additional CloudFront Options. When using CloudFront all three -# of the followingg variables must be defined. -#openshift_hosted_registry_storage_s3_cloudfront_baseurl=https://myendpoint.cloudfront.net/ -#openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile=/full/path/to/secret.pem -#openshift_hosted_registry_storage_s3_cloudfront_keypairid=yourpairid - -# Metrics deployment -# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html -# -# By default metrics are not automatically deployed, set this to enable them -#openshift_metrics_install_metrics=true -# -# Storage Options -# If openshift_metrics_storage_kind is unset then metrics will be stored -# in an EmptyDir volume and will be deleted when the cassandra pod terminates. -# Storage options A & B currently support only one cassandra pod which is -# generally enough for up to 1000 pods. Additional volumes can be created -# manually after the fact and metrics scaled per the docs. -# -# Option A - NFS Host Group -# An NFS volume will be created with path "nfs_directory/volume_name" -# on the host within the [nfs] host group. For example, the volume -# path using these options would be "/exports/metrics" -#openshift_metrics_storage_kind=nfs -#openshift_metrics_storage_access_modes=['ReadWriteOnce'] -#openshift_metrics_storage_nfs_directory=/exports -#openshift_metrics_storage_nfs_options='*(rw,root_squash)' -#openshift_metrics_storage_volume_name=metrics -#openshift_metrics_storage_volume_size=10Gi -#openshift_metrics_storage_labels={'storage': 'metrics'} -# -# Option B - External NFS Host -# NFS volume must already exist with path "nfs_directory/_volume_name" on -# the storage_host. For example, the remote volume path using these -# options would be "nfs.example.com:/exports/metrics" -#openshift_metrics_storage_kind=nfs -#openshift_metrics_storage_access_modes=['ReadWriteOnce'] -#openshift_metrics_storage_host=nfs.example.com -#openshift_metrics_storage_nfs_directory=/exports -#openshift_metrics_storage_volume_name=metrics -#openshift_metrics_storage_volume_size=10Gi -#openshift_metrics_storage_labels={'storage': 'metrics'} -# -# Option C - Dynamic -- If openshift supports dynamic volume provisioning for -# your cloud platform use this. -#openshift_metrics_storage_kind=dynamic -# -# Other Metrics Options -- Common items you may wish to reconfigure, for the complete -# list of options please see roles/openshift_metrics/README.md -# -# Override metricsPublicURL in the master config for cluster metrics -# Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics -# Currently, you may only alter the hostname portion of the url, alterting the -# `/hawkular/metrics` path will break installation of metrics. -#openshift_metrics_hawkular_hostname=https://hawkular-metrics.example.com/hawkular/metrics -# Configure the prefix and version for the component images -#openshift_metrics_image_prefix=docker.io/openshift/origin- -#openshift_metrics_image_version=v3.7.0 -# -# StorageClass -# openshift_storageclass_name=gp2 -# openshift_storageclass_parameters={'type': 'gp2', 'encrypted': 'false'} -# - -# Logging deployment -# -# Currently logging deployment is disabled by default, enable it by setting this -#openshift_logging_install_logging=true -# -# Logging storage config -# Option A - NFS Host Group -# An NFS volume will be created with path "nfs_directory/volume_name" -# on the host within the [nfs] host group. For example, the volume -# path using these options would be "/exports/logging" -#openshift_logging_storage_kind=nfs -#openshift_logging_storage_access_modes=['ReadWriteOnce'] -#openshift_logging_storage_nfs_directory=/exports -#openshift_logging_storage_nfs_options='*(rw,root_squash)' -#openshift_logging_storage_volume_name=logging -#openshift_logging_storage_volume_size=10Gi -#openshift_logging_storage_labels={'storage': 'logging'} -# -# Option B - External NFS Host -# NFS volume must already exist with path "nfs_directory/_volume_name" on -# the storage_host. For example, the remote volume path using these -# options would be "nfs.example.com:/exports/logging" -#openshift_logging_storage_kind=nfs -#openshift_logging_storage_access_modes=['ReadWriteOnce'] -#openshift_logging_storage_host=nfs.example.com -#openshift_logging_storage_nfs_directory=/exports -#openshift_logging_storage_volume_name=logging -#openshift_logging_storage_volume_size=10Gi -#openshift_logging_storage_labels={'storage': 'logging'} -# -# Option C - Dynamic -- If openshift supports dynamic volume provisioning for -# your cloud platform use this. -#openshift_logging_storage_kind=dynamic -# -# Option D - none -- Logging will use emptydir volumes which are destroyed when -# pods are deleted -# -# Other Logging Options -- Common items you may wish to reconfigure, for the complete -# list of options please see roles/openshift_logging/README.md -# -# Configure loggingPublicURL in the master config for aggregate logging, defaults -# to kibana.{{ openshift_master_default_subdomain }} -#openshift_logging_kibana_hostname=logging.apps.example.com -# Configure the number of elastic search nodes, unless you're using dynamic provisioning -# this value must be 1 -#openshift_logging_es_cluster_size=1 -# Configure the prefix and version for the component images -#openshift_logging_image_prefix=docker.io/openshift/origin- -#openshift_logging_image_version=v3.7.0 - -# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') -# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' - -# Disable the OpenShift SDN plugin -# openshift_use_openshift_sdn=False - -# Configure SDN cluster network and kubernetes service CIDR blocks. These -# network blocks should be private and should not conflict with network blocks -# in your infrastructure that pods may require access to. Can not be changed -# after deployment. -# -# WARNING : Do not pick subnets that overlap with the default Docker bridge subnet of -# 172.17.0.0/16. Your installation will fail and/or your configuration change will -# cause the Pod SDN or Cluster SDN to fail. -# -# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting -# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS -# environment variable located in /etc/sysconfig/docker-network. -# When upgrading or scaling up the following must match whats in your master config! -# Inventory: master yaml field -# osm_cluster_network_cidr: clusterNetworkCIDR -# openshift_portal_net: serviceNetworkCIDR -# When installing osm_cluster_network_cidr and openshift_portal_net must be set. -# Sane examples are provided below. -#osm_cluster_network_cidr=10.128.0.0/14 -#openshift_portal_net=172.30.0.0/16 - -# ExternalIPNetworkCIDRs controls what values are acceptable for the -# service external IP field. If empty, no externalIP may be set. It -# may contain a list of CIDRs which are checked for access. If a CIDR -# is prefixed with !, IPs in that CIDR will be rejected. Rejections -# will be applied first, then the IP checked against one of the -# allowed CIDRs. You should ensure this range does not overlap with -# your nodes, pods, or service CIDRs for security reasons. -#openshift_master_external_ip_network_cidrs=['0.0.0.0/0'] - -# IngressIPNetworkCIDR controls the range to assign ingress IPs from for -# services of type LoadBalancer on bare metal. If empty, ingress IPs will not -# be assigned. It may contain a single CIDR that will be allocated from. For -# security reasons, you should ensure that this range does not overlap with -# the CIDRs reserved for external IPs, nodes, pods, or services. -#openshift_master_ingress_ip_network_cidr=172.46.0.0/16 - -# Configure number of bits to allocate to each host’s subnet e.g. 9 -# would mean a /23 network on the host. -# When upgrading or scaling up the following must match whats in your master config! -# Inventory: master yaml field -# osm_host_subnet_length: hostSubnetLength -# When installing osm_host_subnet_length must be set. A sane example is provided below. -#osm_host_subnet_length=9 - -# Configure master API and console ports. -#openshift_master_api_port=8443 -#openshift_master_console_port=8443 - -# set RPM version for debugging purposes -#openshift_pkg_version=-1.1 - -# Configure custom ca certificate -#openshift_master_ca_certificate={'certfile': '/path/to/ca.crt', 'keyfile': '/path/to/ca.key'} -# -# NOTE: CA certificate will not be replaced with existing clusters. -# This option may only be specified when creating a new cluster or -# when redeploying cluster certificates with the redeploy-certificates -# playbook. - -# Configure custom named certificates (SNI certificates) -# -# https://docs.openshift.org/latest/install_config/certificate_customization.html -# -# NOTE: openshift_master_named_certificates is cached on masters and is an -# additive fact, meaning that each run with a different set of certificates -# will add the newly provided certificates to the cached set of certificates. -# -# An optional CA may be specified for each named certificate. CAs will -# be added to the OpenShift CA bundle which allows for the named -# certificate to be served for internal cluster communication. -# -# If you would like openshift_master_named_certificates to be overwritten with -# the provided value, specify openshift_master_overwrite_named_certificates. -#openshift_master_overwrite_named_certificates=true -# -# Provide local certificate paths which will be deployed to masters -#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "cafile": "/path/to/custom-ca1.crt"}] -# -# Detected names may be overridden by specifying the "names" key -#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"], "cafile": "/path/to/custom-ca1.crt"}] - -# Session options -#openshift_master_session_name=ssn -#openshift_master_session_max_seconds=3600 - -# An authentication and encryption secret will be generated if secrets -# are not provided. If provided, openshift_master_session_auth_secrets -# and openshift_master_encryption_secrets must be equal length. -# -# Signing secrets, used to authenticate sessions using -# HMAC. Recommended to use secrets with 32 or 64 bytes. -#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] -# -# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32 -# characters long, to select AES-128, AES-192, or AES-256. -#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] - -# configure how often node iptables rules are refreshed -#openshift_node_iptables_sync_period=5s - -# Configure nodeIP in the node config -# This is needed in cases where node traffic is desired to go over an -# interface other than the default network interface. -#openshift_set_node_ip=True - -# Force setting of system hostname when configuring OpenShift -# This works around issues related to installations that do not have valid dns -# entries for the interfaces attached to the host. -#openshift_set_hostname=True - -# Configure dnsIP in the node config -#openshift_dns_ip=172.30.0.1 - -# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later. -#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['80']} - -# Configure logrotate scripts -# See: https://github.com/nickhammond/ansible-logrotate -#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}] - -# openshift-ansible will wait indefinitely for your input when it detects that the -# value of openshift_hostname resolves to an IP address not bound to any local -# interfaces. This mis-configuration is problematic for any pod leveraging host -# networking and liveness or readiness probes. -# Setting this variable to true will override that check. -#openshift_override_hostname_check=true - -# openshift_use_dnsmasq is deprecated. This must be true, or installs will fail -# in versions >= 3.6 -#openshift_use_dnsmasq=False - -# Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf -# This is useful for POC environments where DNS may not actually be available yet or to set -# options like 'strict-order' to alter dnsmasq configuration. -#openshift_node_dnsmasq_additional_config_file=/home/bob/ose-dnsmasq.conf - -# Global Proxy Configuration -# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment -# variables for docker and master services. -# -# Hosts in the openshift_no_proxy list will NOT use any globally -# configured HTTP(S)_PROXYs. openshift_no_proxy accepts domains -# (.example.com), and hosts (example.com), and IP addresses. -#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT -#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT -#openshift_no_proxy='.hosts.example.com,some-host.com' -# -# Most environments don't require a proxy between openshift masters, nodes, and -# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list. -# If all of your hosts share a common domain you may wish to disable this and -# specify that domain above instead. -# -# For example, having hosts with FQDNs: m1.ex.com, n1.ex.com, and -# n2.ex.com, one would simply add '.ex.com' to the openshift_no_proxy -# variable (above) and set this value to False -#openshift_generate_no_proxy_hosts=True -# -# These options configure the BuildDefaults admission controller which injects -# configuration into Builds. Proxy related values will default to the global proxy -# config values. You only need to set these if they differ from the global proxy settings. -# See BuildDefaults documentation at -# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html -#openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT -#openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT -#openshift_builddefaults_no_proxy=mycorp.com -#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT -#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT -#openshift_builddefaults_git_no_proxy=mycorp.com -#openshift_builddefaults_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}] -#openshift_builddefaults_nodeselectors={'nodelabel1':'nodelabelvalue1'} -#openshift_builddefaults_annotations={'annotationkey1':'annotationvalue1'} -#openshift_builddefaults_resources_requests_cpu=100m -#openshift_builddefaults_resources_requests_memory=256Mi -#openshift_builddefaults_resources_limits_cpu=1000m -#openshift_builddefaults_resources_limits_memory=512Mi - -# Or you may optionally define your own build defaults configuration serialized as json -#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","gitNoProxy":"ose3-master.example.com","kind":"BuildDefaultsConfig"}}}' - -# These options configure the BuildOverrides admission controller which injects -# configuration into Builds. -# See BuildOverrides documentation at -# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html -#openshift_buildoverrides_force_pull=true -#openshift_buildoverrides_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}] -#openshift_buildoverrides_nodeselectors={'nodelabel1':'nodelabelvalue1'} -#openshift_buildoverrides_annotations={'annotationkey1':'annotationvalue1'} - -# Or you may optionally define your own build overrides configuration serialized as json -#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}' - -# Enable template service broker by specifying one of more namespaces whose -# templates will be served by the broker -#openshift_template_service_broker_namespaces=['openshift'] - -# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default -#openshift_master_dynamic_provisioning_enabled=False - -# Admission plugin config -#openshift_master_admission_plugin_config={"ProjectRequestLimit":{"configuration":{"apiVersion":"v1","kind":"ProjectRequestLimitConfig","limits":[{"selector":{"admin":"true"}},{"maxProjects":"1"}]}},"PodNodeConstraints":{"configuration":{"apiVersion":"v1","kind":"PodNodeConstraintsConfig"}}} - -# Configure usage of openshift_clock role. -#openshift_clock_enabled=true - -# OpenShift Per-Service Environment Variables -# Environment variables are added to /etc/sysconfig files for -# each OpenShift service: node, master (api and controllers). -# API and controllers environment variables are merged in single -# master environments. -#openshift_master_api_env_vars={"ENABLE_HTTP2": "true"} -#openshift_master_controllers_env_vars={"ENABLE_HTTP2": "true"} -#openshift_node_env_vars={"ENABLE_HTTP2": "true"} - -# Enable API service auditing, available as of 1.3 -#openshift_master_audit_config={"enabled": true} -# -# In case you want more advanced setup for the auditlog you can -# use this line. -# The directory in "auditFilePath" will be created if it's not -# exist -#openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5} - -# Enable origin repos that point at Centos PAAS SIG, defaults to true, only used -# by deployment_type=origin -#openshift_enable_origin_repo=false - -# Validity of the auto-generated OpenShift certificates in days. -# See also openshift_hosted_registry_cert_expire_days above. -# -#openshift_ca_cert_expire_days=1825 -#openshift_node_cert_expire_days=730 -#openshift_master_cert_expire_days=730 - -# Validity of the auto-generated external etcd certificates in days. -# Controls validity for etcd CA, peer, server and client certificates. -# -#etcd_ca_default_days=1825 -# -# ServiceAccountConfig:LimitSecretRefences rejects pods that reference secrets their service accounts do not reference -# openshift_master_saconfig_limitsecretreferences=false - -# Upgrade Control -# -# By default nodes are upgraded in a serial manner one at a time and all failures -# are fatal, one set of variables for normal nodes, one set of variables for -# nodes that are part of control plane as the number of hosts may be different -# in those two groups. -#openshift_upgrade_nodes_serial=1 -#openshift_upgrade_nodes_max_fail_percentage=0 -#openshift_upgrade_control_plane_nodes_serial=1 -#openshift_upgrade_control_plane_nodes_max_fail_percentage=0 -# -# You can specify the number of nodes to upgrade at once. We do not currently -# attempt to verify that you have capacity to drain this many nodes at once -# so please be careful when specifying these values. You should also verify that -# the expected number of nodes are all schedulable and ready before starting an -# upgrade. If it's not possible to drain the requested nodes the upgrade will -# stall indefinitely until the drain is successful. -# -# If you're upgrading more than one node at a time you can specify the maximum -# percentage of failure within the batch before the upgrade is aborted. Any -# nodes that do fail are ignored for the rest of the playbook run and you should -# take care to investigate the failure and return the node to service so that -# your cluster. -# -# The percentage must exceed the value, this would fail on two failures -# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49 -# where as this would not -# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50 -# -# Multiple data migrations take place and if they fail they will fail the upgrade -# You may wish to disable these or make them non fatal -# -# openshift_upgrade_pre_storage_migration_enabled=true -# openshift_upgrade_pre_storage_migration_fatal==true -# openshift_upgrade_post_storage_migration_enabled=true -# openshift_upgrade_post_storage_migration_fatal==false - -# host group for masters -[masters] -ose3-master[1:3]-ansible.test.example.com - -[etcd] -ose3-etcd[1:3]-ansible.test.example.com - -# NOTE: Containerized load balancer hosts are not yet supported, if using a global -# containerized=true host variable we must set to false. -[lb] -ose3-lb-ansible.test.example.com containerized=false - -# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes -# However, in order to ensure that your masters are not burdened with running pods you should -# make them unschedulable by adding openshift_schedulable=False any node that's also a master. -[nodes] -ose3-master[1:3]-ansible.test.example.com -ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}" diff --git a/meta/main.yml b/meta/main.yml new file mode 100644 index 000000000..7f867d73b --- /dev/null +++ b/meta/main.yml @@ -0,0 +1,2 @@ +--- +dependencies: diff --git a/openshift-ansible.spec b/openshift-ansible.spec index c26fd44d6..b8944d0ae 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -10,7 +10,7 @@ Name: openshift-ansible Version: 3.7.0 -Release: 0.181.0%{?dist} +Release: 0.197.0%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 URL: https://github.com/openshift/openshift-ansible @@ -21,7 +21,12 @@ Requires: ansible >= 2.3 Requires: python2 Requires: python-six Requires: tar -Requires: openshift-ansible-docs = %{version} +Requires: %{name}-docs = %{version}-%{release} +Requires: %{name}-playbooks = %{version}-%{release} +Requires: %{name}-roles = %{version}-%{release} +Requires: %{name}-filter-plugins = %{version}-%{release} +Requires: %{name}-lookup-plugins = %{version}-%{release} +Requires: %{name}-callback-plugins = %{version}-%{release} Requires: java-1.8.0-openjdk-headless Requires: httpd-tools Requires: libselinux-python @@ -134,7 +139,7 @@ popd # ---------------------------------------------------------------------------------- %package docs Summary: Openshift and Atomic Enterprise Ansible documents -Requires: %{name} = %{version} +Requires: %{name} = %{version}-%{release} BuildArch: noarch %description docs @@ -148,11 +153,11 @@ BuildArch: noarch # ---------------------------------------------------------------------------------- %package playbooks Summary: Openshift and Atomic Enterprise Ansible Playbooks -Requires: %{name} = %{version} -Requires: %{name}-roles = %{version} -Requires: %{name}-lookup-plugins = %{version} -Requires: %{name}-filter-plugins = %{version} -Requires: %{name}-callback-plugins = %{version} +Requires: %{name} = %{version}-%{release} +Requires: %{name}-roles = %{version}-%{release} +Requires: %{name}-lookup-plugins = %{version}-%{release} +Requires: %{name}-filter-plugins = %{version}-%{release} +Requires: %{name}-callback-plugins = %{version}-%{release} BuildArch: noarch %description playbooks @@ -192,10 +197,10 @@ end # openshift-ansible-roles subpackage # ---------------------------------------------------------------------------------- Summary: Openshift and Atomic Enterprise Ansible roles -Requires: %{name} = %{version} -Requires: %{name}-lookup-plugins = %{version} -Requires: %{name}-filter-plugins = %{version} -Requires: %{name}-callback-plugins = %{version} +Requires: %{name} = %{version}-%{release} +Requires: %{name}-lookup-plugins = %{version}-%{release} +Requires: %{name}-filter-plugins = %{version}-%{release} +Requires: %{name}-callback-plugins = %{version}-%{release} BuildArch: noarch %description roles @@ -210,7 +215,7 @@ BuildArch: noarch # ---------------------------------------------------------------------------------- %package filter-plugins Summary: Openshift and Atomic Enterprise Ansible filter plugins -Requires: %{name} = %{version} +Requires: %{name} = %{version}-%{release} BuildArch: noarch Requires: pyOpenSSL @@ -227,7 +232,7 @@ Requires: pyOpenSSL # ---------------------------------------------------------------------------------- %package lookup-plugins Summary: Openshift and Atomic Enterprise Ansible lookup plugins -Requires: %{name} = %{version} +Requires: %{name} = %{version}-%{release} BuildArch: noarch %description lookup-plugins @@ -243,7 +248,7 @@ BuildArch: noarch # ---------------------------------------------------------------------------------- %package callback-plugins Summary: Openshift and Atomic Enterprise Ansible callback plugins -Requires: %{name} = %{version} +Requires: %{name} = %{version}-%{release} BuildArch: noarch %description callback-plugins @@ -260,7 +265,7 @@ BuildArch: noarch %package -n atomic-openshift-utils Summary: Atomic OpenShift Utilities BuildRequires: python-setuptools -Requires: %{name}-playbooks = %{version} +Requires: %{name}-playbooks = %{version}-%{release} Requires: python-click Requires: python-setuptools Requires: PyYAML @@ -280,6 +285,173 @@ Atomic OpenShift Utilities includes %changelog +* Tue Nov 07 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.197.0 +- Temporarily set master servingInfo.clientCA as client-ca-bundle.crt during + rolling CA redeployment. (abutcher@redhat.com) +- container-engine: ensure /var/lib/containers/ is properly labelled + (gscrivan@redhat.com) +- Moving docker location to share path with system containers. + (kwoodson@redhat.com) +- Retry restarting master controllers (mgugino@redhat.com) +- Bug 1509680- Fix ansible-service-broker registry validations + (fabian@fabianism.us) +- Fix preupgrade authorization objects are in sync (mgugino@redhat.com) +- Bug 1507617- Move etcd into its own service/dc with SSL (fabian@fabianism.us) + +* Mon Nov 06 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.196.0 +- Bug 1509052 - Remove logfile from broker config (david.j.zager@gmail.com) +- Fix github auth validation (mgugino@redhat.com) +- Re-generate lib_openshift (mail@jkroepke.de) +- Remove provisioner restrictions on oc_storageclass (mail@jkroepke.de) + +* Mon Nov 06 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.195.0 +- Bug 1507787- add full path to default asb etcd image (fabian@fabianism.us) + +* Sun Nov 05 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.194.0 +- Revert "Bootstrap enhancements." (ccoleman@redhat.com) + +* Sun Nov 05 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.193.0 +- management: enterprise users must acknowledge use of beta software + (tbielawa@redhat.com) + +* Sat Nov 04 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.192.0 +- Bootstrap enhancements. (kwoodson@redhat.com) +- Fix master upgrade version detect and systemd enable (mgugino@redhat.com) +- Correct groupname during upgrade_control_plane play (mgugino@redhat.com) +- openshift_hosted: Add docker-gc (smilner@redhat.com) +- Remove old /etc/yum.repos.d/openshift_additional.repo file. + (abutcher@redhat.com) +- CFME: Use cluster_hostname if cluster_public_hostname isn't available + (tbielawa@redhat.com) +- Use client binary and well defined kubeconfig (sdodson@redhat.com) +- Ensure install and remove are mutually exclusive via + openshift_sanitize_inventory (sdodson@redhat.com) +- Enable SC, ASB, TSB by default (sdodson@redhat.com) +- Using the currently attached pvc for an ES dc if available, otherwise falling + back to current logic (ewolinet@redhat.com) +- Adding elb changes to provision elbs and add to scale group. + (kwoodson@redhat.com) +- Give admin and edit roles permission to patch ServiceInstances and + ServiceBindings (staebler@redhat.com) + +* Fri Nov 03 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.191.0 +- Adding CONFIG_FILE option back. (kwoodson@redhat.com) +- Configurable node config location. (kwoodson@redhat.com) +- Add enterprise prometheus image defaults (sdodson@redhat.com) +- Adding meta/main.yml to allow for Galaxy use of this repo (bedin@redhat.com) + +* Thu Nov 02 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.190.0 +- check presence of v2 snapshot before the migration proceeds + (jchaloup@redhat.com) +- Remove delegate_to from openshift_facts within the openshift_ca role. + (abutcher@redhat.com) +- Don't use possibly undefined variables in error messages + (tbielawa@redhat.com) +- MTU for bootstrapping should default to openshift_node_sdn_mtu + (ccoleman@redhat.com) +- Retry service account bootstrap kubeconfig creation (ccoleman@redhat.com) +- Docker: make use of new etc/containers/registries.conf optional + (mgugino@redhat.com) +- Add rules to the view ClusterRole for service catalog. (staebler@redhat.com) +- Updating console OPENSHIFT_CONSTANTS flag for TSB (ewolinet@redhat.com) +- GlusterFS: Fix registry storage documentation (jarrpa@redhat.com) +- fix comment and make it visible to end-user (azagayno@redhat.com) +- escape also custom_cors_origins (azagayno@redhat.com) +- add comment on regexp specifics (azagayno@redhat.com) +- escape corsAllowedOrigins regexp strings and anchor them + (azagayno@redhat.com) + +* Wed Nov 01 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.189.0 +- Stating that certificate it is required when doing SSL on ELB. + (kwoodson@redhat.com) +- Ensure GCP image build instance gets cleaned up on teardown + (ccoleman@redhat.com) +- Switch from bind-interfaces to bind-dynamic (sdodson@redhat.com) +- Remove unused osm_controller_lease_ttl (mgugino@redhat.com) +- Delete images located in a family named {{ prefix }}images + (ccoleman@redhat.com) +- Use global IP to indicate node should pick DNS (ccoleman@redhat.com) +- Remove project metadata prefixed with the cluster prefix + (ccoleman@redhat.com) +- Use openshift.node.registry_url instead of oreg_url (ccoleman@redhat.com) +- Allow master node group to wait for stable on GCP (ccoleman@redhat.com) +- GCP cannot use AWS growpart package (ccoleman@redhat.com) +- dnsmasq cache-size dns-forward-max change (pcameron@redhat.com) +- Also require that we match the release (sdodson@redhat.com) +- Add arbitrary firewall port config to master too (sdodson@redhat.com) +- remove master.service during the non-ha to ha upgrade (jchaloup@redhat.com) +- Removing unneeded bootstrap which moved into the product. + (kwoodson@redhat.com) +- Add retry logic to docker auth credentials (mgugino@redhat.com) +- Retry restarting journald (mgugino@redhat.com) +- Modify StorageClass name to standard (piqin@redhat.com) +- Give PV & PVC empty storage class to avoid being assigned default gp2 + (mawong@redhat.com) +- Use oc_project to ensure openshift_provisioners_project present + (mawong@redhat.com) +- Fix yaml formatting (mawong@redhat.com) +- Create default storageclass for cloudprovider openstack (piqin@redhat.com) +- preserve the oo-install ansible_inventory_path value (rmeggins@redhat.com) + +* Tue Oct 31 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.188.0 +- Add dm_thin_pool for gluster use (sdodson@redhat.com) +- Fix broken oc_secret update function (barlik@gmx.com) +- add new clusterNetworks fields to new installs (jtanenba@redhat.com) +- docker: Create openshift_docker_is_node_or_master variable + (smilner@redhat.com) +- Correctly install cockpit (sdodson@redhat.com) +- Glusterfs storage templates for v1.5 added (chinacoolhacker@gmail.com) +- bug 1501599. Omit logging project from overcommit restrictions + (jcantril@redhat.com) +- GlusterFS: Remove image option from heketi command (jarrpa@redhat.com) + +* Mon Oct 30 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.187.0 +- + +* Sun Oct 29 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.186.0 +- + +* Sat Oct 28 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.185.0 +- bug 1506073. Lower cpu request for logging when it exceeds limit + (jcantril@redhat.com) +- Update the name of the service-catalog binary (staebler@redhat.com) +- disk_availability check: include submount storage (lmeyer@redhat.com) + +* Fri Oct 27 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.184.0 +- cri-o: Set max log size to 50 mb (mrunalp@gmail.com) +- cri-o: open port 10010 (gscrivan@redhat.com) +- bug 1435144. Remove uneeded upgrade in openshift_logging role + (jcantril@redhat.com) +- Remove inadvertently committed inventory file (rteague@redhat.com) +- crio: restorcon /var/lib/containers (smilner@redhat.com) +- Correct openshift_release regular expression (rteague@redhat.com) +- crio: Add failed_when to overlay check (smilner@redhat.com) +- docker: set credentials when using system container (gscrivan@redhat.com) +- Change dnsmasq to bind-interfaces + except-interfaces (mgugino@redhat.com) +- Fix CA Bundle passed to service-catalog broker for ansible-service-broker + (staebler@redhat.com) +- Renaming csr to bootstrap for consistency. (kwoodson@redhat.com) +- Add master config upgrade hook to upgrade-all plays (mgugino@redhat.com) +- Remove 'Not Started' status from playbook checkpoint (rteague@redhat.com) +- Force include_role to static for loading openshift_facts module + (rteague@redhat.com) +- Make openshift-ansible depend on all subpackages (sdodson@redhat.com) +- Refactor health check playbooks (rteague@redhat.com) + +* Fri Oct 27 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.183.0 +- + +* Thu Oct 26 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.182.0 +- Fixing documentation for the cert_key_path variable name. + (kwoodson@redhat.com) +- Moving removal of unwanted artifacts to image_prep. (kwoodson@redhat.com) +- Ensure journald persistence directories exist (mgugino@redhat.com) +- Fix lint (tbielawa@redhat.com) +- Move add_many_container_providers.yml to playbooks/byo/openshift-management + with a noop task include to load filter plugins. (abutcher@redhat.com) +- Refactor adding multiple container providers (tbielawa@redhat.com) +- Management Cleanup and Provider Integration (tbielawa@redhat.com) + * Thu Oct 26 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.181.0 - Fix loop_var warnings during logging install (mgugino@redhat.com) - Fix typo and add detailed comments in kuryr (sngchlko@gmail.com) diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md index fbab61189..417fb539a 100644 --- a/playbooks/aws/README.md +++ b/playbooks/aws/README.md @@ -65,8 +65,9 @@ openshift_release: # example: v3.7 openshift_pkg_version: # example: -3.7.0 openshift_aws_ssh_key_name: # example: myuser_key openshift_aws_base_ami: # example: ami-12345678 +# These are required when doing SSL on the ELBs openshift_aws_iam_cert_path: # example: '/path/to/wildcard.<clusterid>.example.com.crt' -openshift_aws_iam_key_path: # example: '/path/to/wildcard.<clusterid>.example.com.key' +openshift_aws_iam_cert_key_path: # example: '/path/to/wildcard.<clusterid>.example.com.key' ``` If customization is required for the instances, scale groups, or any other configurable option please see the ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml) for variables and overrides. These overrides can be placed in the `provisioning_vars.yml`, `inventory`, or `group_vars`. diff --git a/playbooks/aws/openshift-cluster/build_ami.yml b/playbooks/aws/openshift-cluster/build_ami.yml index 5b4a6a1e8..fae30eb0a 100644 --- a/playbooks/aws/openshift-cluster/build_ami.yml +++ b/playbooks/aws/openshift-cluster/build_ami.yml @@ -28,6 +28,8 @@ set_fact: ansible_ssh_user: "{{ openshift_aws_build_ami_ssh_user | default(ansible_ssh_user) }}" openshift_node_bootstrap: True + openshift_node_image_prep_packages: + - cloud-utils-growpart # This is the part that installs all of the software and configs for the instance # to become a node. diff --git a/playbooks/aws/provisioning_vars.yml.example b/playbooks/aws/provisioning_vars.yml.example index aa91363ae..1491fb868 100644 --- a/playbooks/aws/provisioning_vars.yml.example +++ b/playbooks/aws/provisioning_vars.yml.example @@ -116,5 +116,5 @@ openshift_aws_base_ami: # ami-12345678 # custom certificates are required for the ELB openshift_aws_iam_cert_path: # '/path/to/wildcard.<clusterid>.example.com.crt' -openshift_aws_iam_key_path: # '/path/to/wildcard.<clusterid>.example.com.key' -#openshift_aws_iam_cert_chain_path: '/path/to/cert.ca.crt' +openshift_aws_iam_cert_key_path: # '/path/to/wildcard.<clusterid>.example.com.key' +openshift_aws_iam_cert_chain_path: # '/path/to/cert.ca.crt' diff --git a/playbooks/byo/openshift-cluster/redeploy-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-certificates.yml index 255b0dbf7..f53d34145 100644 --- a/playbooks/byo/openshift-cluster/redeploy-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-certificates.yml @@ -42,3 +42,7 @@ - include: ../../common/openshift-cluster/redeploy-certificates/registry.yml when: openshift_hosted_manage_registry | default(true) | bool + +- include: ../../common/openshift-master/revert-client-ca.yml + +- include: ../../common/openshift-master/restart.yml diff --git a/playbooks/byo/openshift-management/add_container_provider.yml b/playbooks/byo/openshift-management/add_container_provider.yml new file mode 100644 index 000000000..3378b5abd --- /dev/null +++ b/playbooks/byo/openshift-management/add_container_provider.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/evaluate_groups.yml + +- include: ../../common/openshift-management/add_container_provider.yml diff --git a/playbooks/byo/openshift-management/add_many_container_providers.yml b/playbooks/byo/openshift-management/add_many_container_providers.yml new file mode 100644 index 000000000..62fdb11c5 --- /dev/null +++ b/playbooks/byo/openshift-management/add_many_container_providers.yml @@ -0,0 +1,36 @@ +--- +- hosts: localhost + tasks: + - name: Ensure the container provider configuration is defined + assert: + that: container_providers_config is defined + msg: | + Error: Must provide providers config path. Fix: Add '-e container_providers_config=/path/to/your/config' to the ansible-playbook command + + - name: Include providers/management configuration + include_vars: + file: "{{ container_providers_config }}" + + - name: Ensure this cluster is a container provider + uri: + url: "https://{{ management_server['hostname'] }}/api/providers" + body_format: json + method: POST + user: "{{ management_server['user'] }}" + password: "{{ management_server['password'] }}" + validate_certs: no + # Docs on formatting the BODY of the POST request: + # http://manageiq.org/docs/reference/latest/api/reference/providers.html#specifying-connection-configurations + body: "{{ item }}" + failed_when: false + with_items: "{{ container_providers }}" + register: results + + # Include openshift_management for access to filter_plugins. + - include_role: + name: openshift_management + tasks_from: noop + + - name: print each result + debug: + msg: "{{ results.results | oo_filter_container_providers }}" diff --git a/playbooks/byo/openshift-management/config.yml b/playbooks/byo/openshift-management/config.yml index e8795ef85..209c66502 100644 --- a/playbooks/byo/openshift-management/config.yml +++ b/playbooks/byo/openshift-management/config.yml @@ -1,6 +1,6 @@ --- - include: ../openshift-cluster/initialize_groups.yml -- include: ../../common/openshift-cluster/evaluate_groups.yml +- include: ../../common/openshift-cluster/std_include.yml - include: ../../common/openshift-management/config.yml diff --git a/playbooks/byo/openshift-management/roles b/playbooks/byo/openshift-management/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/byo/openshift-management/roles @@ -0,0 +1 @@ +../../../roles
\ No newline at end of file diff --git a/playbooks/byo/openshift-management/uninstall.yml b/playbooks/byo/openshift-management/uninstall.yml index a1fb1cdc4..e95c1c88a 100644 --- a/playbooks/byo/openshift-management/uninstall.yml +++ b/playbooks/byo/openshift-management/uninstall.yml @@ -1,4 +1,2 @@ --- -# - include: ../openshift-cluster/initialize_groups.yml - - include: ../../common/openshift-management/uninstall.yml diff --git a/playbooks/common/openshift-checks/adhoc.yml b/playbooks/common/openshift-checks/adhoc.yml index dfcef8435..d0deaeb65 100644 --- a/playbooks/common/openshift-checks/adhoc.yml +++ b/playbooks/common/openshift-checks/adhoc.yml @@ -1,12 +1,13 @@ --- -- name: OpenShift health checks +- name: OpenShift Health Checks hosts: oo_all_hosts + roles: - openshift_health_checker vars: - r_openshift_health_checker_playbook_context: adhoc post_tasks: - - name: Run health checks + - name: Run health checks (adhoc) action: openshift_health_check args: checks: '{{ openshift_checks | default([]) }}' diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/common/openshift-checks/health.yml index 21ea785ef..d0921b9d3 100644 --- a/playbooks/common/openshift-checks/health.yml +++ b/playbooks/common/openshift-checks/health.yml @@ -1,11 +1,13 @@ --- -- name: Run OpenShift health checks +- name: OpenShift Health Checks hosts: oo_all_hosts + roles: - openshift_health_checker vars: - r_openshift_health_checker_playbook_context: health post_tasks: - - action: openshift_health_check + - name: Run health checks (@health) + action: openshift_health_check args: checks: ['@health'] diff --git a/playbooks/common/openshift-checks/install.yml b/playbooks/common/openshift-checks/install.yml new file mode 100644 index 000000000..6701a2e15 --- /dev/null +++ b/playbooks/common/openshift-checks/install.yml @@ -0,0 +1,47 @@ +--- +- name: Health Check Checkpoint Start + hosts: oo_all_hosts + gather_facts: false + tasks: + - name: Set Health Check 'In Progress' + set_stats: + data: + installer_phase_health: "In Progress" + aggregate: false + +- name: OpenShift Health Checks + hosts: oo_all_hosts + any_errors_fatal: true + roles: + - openshift_health_checker + vars: + - r_openshift_health_checker_playbook_context: install + post_tasks: + - name: Run health checks (install) - EL + when: ansible_distribution != "Fedora" + action: openshift_health_check + args: + checks: + - disk_availability + - memory_availability + - package_availability + - package_version + - docker_image_availability + - docker_storage + + - name: Run health checks (install) - Fedora + when: ansible_distribution == "Fedora" + action: openshift_health_check + args: + checks: + - docker_image_availability + +- name: Health Check Checkpoint End + hosts: oo_all_hosts + gather_facts: false + tasks: + - name: Set Health Check 'Complete' + set_stats: + data: + installer_phase_health: "Complete" + aggregate: false diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/common/openshift-checks/pre-install.yml index 88e6f9120..32449d4e4 100644 --- a/playbooks/common/openshift-checks/pre-install.yml +++ b/playbooks/common/openshift-checks/pre-install.yml @@ -1,11 +1,13 @@ --- -- name: run OpenShift pre-install checks +- name: OpenShift Health Checks hosts: oo_all_hosts + roles: - openshift_health_checker vars: - r_openshift_health_checker_playbook_context: pre-install post_tasks: - - action: openshift_health_check + - name: Run health checks (@preflight) + action: openshift_health_check args: checks: ['@preflight'] diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 395eb51f1..3b4d6f9a6 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -1,31 +1,5 @@ --- -# TODO: refactor this into its own include -# and pass a variable for ctx -- name: Verify Requirements - hosts: oo_all_hosts - roles: - - openshift_health_checker - vars: - - r_openshift_health_checker_playbook_context: install - post_tasks: - - - name: Verify Requirements - EL - when: ansible_distribution != "Fedora" - action: openshift_health_check - args: - checks: - - disk_availability - - memory_availability - - package_availability - - package_version - - docker_image_availability - - docker_storage - - name: Verify Requirements - Fedora - when: ansible_distribution == "Fedora" - action: openshift_health_check - args: - checks: - - docker_image_availability +- include: ../openshift-checks/install.yml - include: ../openshift-etcd/config.yml @@ -53,7 +27,7 @@ when: openshift_logging_install_logging | default(false) | bool - include: service_catalog.yml - when: openshift_enable_service_catalog | default(false) | bool + when: openshift_enable_service_catalog | default(true) | bool - include: ../openshift-management/config.yml when: openshift_management_install_management | default(false) | bool diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml index 0f563adb7..91223d368 100644 --- a/playbooks/common/openshift-cluster/initialize_facts.yml +++ b/playbooks/common/openshift-cluster/initialize_facts.yml @@ -10,6 +10,7 @@ - name: load openshift_facts module include_role: name: openshift_facts + static: yes # TODO: Should this role be refactored into health_checks?? - name: Run openshift_sanitize_inventory to set variables diff --git a/playbooks/common/openshift-cluster/install_docker_gc.yml b/playbooks/common/openshift-cluster/install_docker_gc.yml new file mode 100644 index 000000000..1e3dfee07 --- /dev/null +++ b/playbooks/common/openshift-cluster/install_docker_gc.yml @@ -0,0 +1,7 @@ +--- +- name: Install docker gc + hosts: oo_first_master + gather_facts: false + tasks: + - include_role: + name: openshift_docker_gc diff --git a/playbooks/common/openshift-cluster/openshift_default_storage_class.yml b/playbooks/common/openshift-cluster/openshift_default_storage_class.yml index 4b4f19690..62fe0dd60 100644 --- a/playbooks/common/openshift-cluster/openshift_default_storage_class.yml +++ b/playbooks/common/openshift-cluster/openshift_default_storage_class.yml @@ -3,4 +3,4 @@ hosts: oo_first_master roles: - role: openshift_default_storage_class - when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce') + when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce' or openshift_cloudprovider_kind == 'openstack') diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml index c1536eb36..281ccce2e 100644 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -24,6 +24,11 @@ - include: openshift_prometheus.yml when: openshift_hosted_prometheus_deploy | default(False) | bool +- include: install_docker_gc.yml + when: + - openshift_use_crio | default(False) | bool + - openshift_crio_enable_docker_gc | default(False) | bool + - name: Hosted Install Checkpoint End hosts: oo_all_hosts gather_facts: false diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml index 2068ed199..e22c8cbdb 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml @@ -43,11 +43,6 @@ when: (g_master_config_output.content|b64decode|from_yaml).oauthConfig.masterCA != 'ca-bundle.crt' - modify_yaml: dest: "{{ openshift.common.config_base }}/master/master-config.yaml" - yaml_key: servingInfo.clientCA - yaml_value: ca.crt - when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca.crt' - - modify_yaml: - dest: "{{ openshift.common.config_base }}/master/master-config.yaml" yaml_key: etcdClientInfo.ca yaml_value: ca-bundle.crt when: @@ -67,6 +62,13 @@ when: - groups.oo_etcd_to_config | default([]) | length == 0 - (g_master_config_output.content|b64decode|from_yaml).etcdConfig.servingInfo.clientCA != 'ca-bundle.crt' + # Set servingInfo.clientCA to client-ca-bundle.crt in order to roll the CA certificate. + # This change will be reverted in playbooks/byo/openshift-cluster/redeploy-certificates.yml + - modify_yaml: + dest: "{{ openshift.common.config_base }}/master/master-config.yaml" + yaml_key: servingInfo.clientCA + yaml_value: client-ca-bundle.crt + when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'client-ca-bundle.crt' - name: Copy current OpenShift CA to legacy directory hosts: oo_masters_to_config @@ -155,6 +157,7 @@ - ca.key - ca-bundle.crt - ca.serial.txt + - client-ca-bundle.crt delegate_to: "{{ openshift_ca_host }}" run_once: true changed_when: false @@ -173,6 +176,7 @@ - ca.key - ca-bundle.crt - ca.serial.txt + - client-ca-bundle.crt - name: Update master client kubeconfig CA data kubeclient_ca: client_path: "{{ openshift.common.config_base }}/master/openshift-master.kubeconfig" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml index 30e719d8f..bda245fe1 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml @@ -112,6 +112,8 @@ - include: ../cleanup_unused_images.yml - include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_5/master_config_upgrade.yml" - include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml index 920dc2ffc..dd109cfa9 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -116,6 +116,8 @@ - include: ../cleanup_unused_images.yml - include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_6/master_config_upgrade.yml" - include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml index 81f6dc8a4..f4862e321 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml @@ -119,9 +119,9 @@ tasks: - include: ../cleanup_unused_images.yml -#TODO: Why doesn't this compose using ./upgrade_control_plane rather than -# ../upgrade_control_plane? - include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_7/master_config_upgrade.yml" # All controllers must be stopped at the same time then restarted - name: Cycle all controller services to force new leader election mode diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml index d5a8379d7..b905d6d86 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -129,7 +129,7 @@ # All controllers must be stopped at the same time then restarted - name: Cycle all controller services to force new leader election mode - hosts: oo_etcd_to_config + hosts: oo_masters_to_config gather_facts: no tasks: - name: Stop {{ openshift.common.service_type }}-master-controllers diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml index 8e4f99c91..022b4b4fb 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml @@ -15,7 +15,7 @@ - name: Confirm OpenShift authorization objects are in sync command: > {{ openshift.common.client_binary }} adm migrate authorization - when: openshift_version | version_compare('3.7','<') + when: openshift_upgrade_target | version_compare('3.8','<') changed_when: false register: l_oc_result until: l_oc_result.rc == 0 diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml index 80cda9e21..c2ae5f313 100644 --- a/playbooks/common/openshift-glusterfs/config.yml +++ b/playbooks/common/openshift-glusterfs/config.yml @@ -17,6 +17,11 @@ tasks_from: firewall.yml when: - openshift_storage_glusterfs_is_native | default(True) | bool + - include_role: + name: openshift_storage_glusterfs + tasks_from: kernel_modules.yml + when: + - openshift_storage_glusterfs_is_native | default(True) | bool - name: Open firewall ports for GlusterFS registry nodes hosts: glusterfs_registry @@ -26,6 +31,11 @@ tasks_from: firewall.yml when: - openshift_storage_glusterfs_registry_is_native | default(True) | bool + - include_role: + name: openshift_storage_glusterfs + tasks_from: kernel_modules.yml + when: + - openshift_storage_glusterfs_registry_is_native | default(True) | bool - name: Configure GlusterFS hosts: oo_first_master diff --git a/playbooks/common/openshift-management/add_container_provider.yml b/playbooks/common/openshift-management/add_container_provider.yml new file mode 100644 index 000000000..facb3a5b9 --- /dev/null +++ b/playbooks/common/openshift-management/add_container_provider.yml @@ -0,0 +1,8 @@ +--- +- name: Add Container Provider to Management + hosts: oo_first_master + tasks: + - name: Run the Management Integration Tasks + include_role: + name: openshift_management + tasks_from: add_container_provider diff --git a/playbooks/common/openshift-management/uninstall.yml b/playbooks/common/openshift-management/uninstall.yml index 698d93405..9f35cc276 100644 --- a/playbooks/common/openshift-management/uninstall.yml +++ b/playbooks/common/openshift-management/uninstall.yml @@ -1,6 +1,6 @@ --- - name: Uninstall CFME - hosts: masters + hosts: masters[0] tasks: - name: Run the CFME Uninstall Role Tasks include_role: diff --git a/playbooks/common/openshift-master/additional_config.yml b/playbooks/common/openshift-master/additional_config.yml index e1472ce38..350557f19 100644 --- a/playbooks/common/openshift-master/additional_config.yml +++ b/playbooks/common/openshift-master/additional_config.yml @@ -28,7 +28,7 @@ when: openshift_use_manageiq | default(true) | bool - role: cockpit when: - - openshift.common.is_atomic + - not openshift.common.is_atomic | bool - deployment_type == 'openshift-enterprise' - osm_use_cockpit is undefined or osm_use_cockpit | bool - openshift.common.deployment_subtype != 'registry' diff --git a/playbooks/common/openshift-master/revert-client-ca.yml b/playbooks/common/openshift-master/revert-client-ca.yml new file mode 100644 index 000000000..9ae23bf5b --- /dev/null +++ b/playbooks/common/openshift-master/revert-client-ca.yml @@ -0,0 +1,17 @@ +--- +- name: Set servingInfo.clientCA = ca.crt in master config + hosts: oo_masters_to_config + tasks: + - name: Read master config + slurp: + src: "{{ openshift.common.config_base }}/master/master-config.yaml" + register: g_master_config_output + + # servingInfo.clientCA may be set as the client-ca-bundle.crt from + # CA redeployment and this task reverts that change. + - name: Set servingInfo.clientCA = ca.crt in master config + modify_yaml: + dest: "{{ openshift.common.config_base }}/master/master-config.yaml" + yaml_key: servingInfo.clientCA + yaml_value: ca.crt + when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca.crt' diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml index f4dc9df8a..05b37d59f 100644 --- a/playbooks/common/openshift-master/scaleup.yml +++ b/playbooks/common/openshift-master/scaleup.yml @@ -22,8 +22,13 @@ - name: restart master api service: name={{ openshift.common.service_type }}-master-controllers state=restarted notify: verify api server + # We retry the controllers because the API may not be 100% initialized yet. - name: restart master controllers - service: name={{ openshift.common.service_type }}-master-controllers state=restarted + command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" + retries: 3 + delay: 5 + register: result + until: result.rc == 0 - name: verify api server command: > curl --silent --tlsv1.2 diff --git a/playbooks/common/openshift-master/tasks/wire_aggregator.yml b/playbooks/common/openshift-master/tasks/wire_aggregator.yml index 560eea785..df3ea27b4 100644 --- a/playbooks/common/openshift-master/tasks/wire_aggregator.yml +++ b/playbooks/common/openshift-master/tasks/wire_aggregator.yml @@ -179,8 +179,13 @@ - yedit_output.changed - openshift.master.cluster_method == 'native' +# We retry the controllers because the API may not be 100% initialized yet. - name: restart master controllers - systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted + command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" + retries: 3 + delay: 5 + register: result + until: result.rc == 0 when: - yedit_output.changed - openshift.master.cluster_method == 'native' diff --git a/playbooks/common/openshift-node/clean_image.yml b/playbooks/common/openshift-node/clean_image.yml new file mode 100644 index 000000000..38753d0af --- /dev/null +++ b/playbooks/common/openshift-node/clean_image.yml @@ -0,0 +1,10 @@ +--- +- name: Configure nodes + hosts: oo_nodes_to_config:!oo_containerized_master_nodes + tasks: + - name: Remove any ansible facts created during AMI creation + file: + path: "/etc/ansible/facts.d/{{ item }}" + state: absent + with_items: + - openshift.fact diff --git a/playbooks/common/openshift-node/image_prep.yml b/playbooks/common/openshift-node/image_prep.yml index 00d167c22..30651a1df 100644 --- a/playbooks/common/openshift-node/image_prep.yml +++ b/playbooks/common/openshift-node/image_prep.yml @@ -19,3 +19,6 @@ - name: Re-enable excluders include: enable_excluders.yml + +- name: Remove any undesired artifacts from build + include: clean_image.yml diff --git a/roles/ansible_service_broker/defaults/main.yml b/roles/ansible_service_broker/defaults/main.yml index dc05b03b5..34110ca99 100644 --- a/roles/ansible_service_broker/defaults/main.yml +++ b/roles/ansible_service_broker/defaults/main.yml @@ -14,3 +14,4 @@ ansible_service_broker_launch_apb_on_bind: false ansible_service_broker_image_pull_policy: IfNotPresent ansible_service_broker_sandbox_role: edit ansible_service_broker_auto_escalate: false +ansible_service_broker_local_registry_whitelist: [] diff --git a/roles/ansible_service_broker/tasks/generate_certs.yml b/roles/ansible_service_broker/tasks/generate_certs.yml new file mode 100644 index 000000000..85e67e00c --- /dev/null +++ b/roles/ansible_service_broker/tasks/generate_certs.yml @@ -0,0 +1,35 @@ +--- + +- when: ansible_service_broker_certs_dir is undefined + block: + - name: Create ansible-service-broker cert directory + file: + path: "{{ openshift.common.config_base }}/ansible-service-broker" + state: directory + mode: 0755 + check_mode: no + + - name: Create self signing ca cert + command: 'openssl req -nodes -x509 -newkey rsa:4096 -keyout {{ openshift.common.config_base }}/ansible-service-broker/key.pem -out {{ openshift.common.config_base }}/ansible-service-broker/cert.pem -days 365 -subj "/CN=asb-etcd.openshift-ansible-service-broker.svc"' + args: + creates: '{{ openshift.common.config_base }}/ansible-service-broker/cert.pem' + + - name: Create self signed client cert + command: '{{ item.cmd }}' + args: + creates: '{{ item.creates }}' + with_items: + - cmd: openssl genrsa -out {{ openshift.common.config_base }}/ansible-service-broker/client.key 2048 + creates: '{{ openshift.common.config_base }}/ansible-service-broker/client.key' + - cmd: 'openssl req -new -key {{ openshift.common.config_base }}/ansible-service-broker/client.key -out {{ openshift.common.config_base }}/ansible-service-broker/client.csr -subj "/CN=client"' + creates: '{{ openshift.common.config_base }}/ansible-service-broker/client.csr' + - cmd: openssl x509 -req -in {{ openshift.common.config_base }}/ansible-service-broker/client.csr -CA {{ openshift.common.config_base }}/ansible-service-broker/cert.pem -CAkey {{ openshift.common.config_base }}/ansible-service-broker/key.pem -CAcreateserial -out {{ openshift.common.config_base }}/ansible-service-broker/client.pem -days 1024 + creates: '{{ openshift.common.config_base }}/ansible-service-broker/client.pem' + + - set_fact: + ansible_service_broker_certs_dir: "{{ openshift.common.config_base }}/ansible-service-broker" + +- set_fact: + etcd_ca_cert: "{{ lookup('file', '{{ ansible_service_broker_certs_dir }}/cert.pem') }}" + etcd_client_cert: "{{ lookup('file', '{{ ansible_service_broker_certs_dir }}/client.pem') }}" + etcd_client_key: "{{ lookup('file', '{{ ansible_service_broker_certs_dir }}/client.key') }}" diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml index 66c3d9cc4..90a4418fb 100644 --- a/roles/ansible_service_broker/tasks/install.yml +++ b/roles/ansible_service_broker/tasks/install.yml @@ -32,6 +32,7 @@ - include: validate_facts.yml +- include: generate_certs.yml # Deployment of ansible-service-broker starts here - name: create openshift-ansible-service-broker project @@ -68,6 +69,9 @@ - apiGroups: ["authentication.k8s.io"] resources: ["tokenreviews"] verbs: ["create"] + - apiGroups: ["image.openshift.io", ""] + resources: ["images"] + verbs: ["get", "list"] - name: Create asb-access cluster role oc_clusterrole: @@ -116,6 +120,24 @@ kubernetes.io/service-account.name: asb-client type: kubernetes.io/service-account-token +- name: Create etcd-auth secret + oc_secret: + name: etcd-auth-secret + namespace: openshift-ansible-service-broker + contents: + - path: ca.crt + data: '{{ etcd_ca_cert }}' + +- name: Create broker-etcd-auth secret + oc_secret: + name: broker-etcd-auth-secret + namespace: openshift-ansible-service-broker + contents: + - path: client.crt + data: '{{ etcd_client_cert }}' + - path: client.key + data: '{{ etcd_client_key }}' + - oc_secret: state: list namespace: openshift-ansible-service-broker @@ -123,7 +145,7 @@ register: asb_client_secret - set_fact: - service_ca_crt: asb_client_secret.results.results.0.data['service-ca.crt'] + service_ca_crt: "{{ asb_client_secret.results.results.0.data['service-ca.crt'] }}" # Using oc_obj because oc_service doesn't seem to allow annotations # TODO: Extend oc_service to allow annotations @@ -156,6 +178,34 @@ app: openshift-ansible-service-broker service: asb +- name: create asb-etcd service + oc_obj: + name: asb-etcd + namespace: openshift-ansible-service-broker + state: present + kind: Service + content: + path: /tmp/asbetcdsvcout + data: + apiVersion: v1 + kind: Service + metadata: + name: asb-etcd + labels: + app: etcd + service: asb-etcd + annotations: + service.alpha.openshift.io/serving-cert-secret-name: etcd-tls + spec: + ports: + - name: port-2379 + port: 2379 + targetPort: 2379 + protocol: TCP + selector: + app: etcd + service: asb-etcd + - name: create route for ansible-service-broker service oc_route: name: asb-1338 @@ -227,6 +277,8 @@ mountPath: /etc/ansible-service-broker - name: asb-tls mountPath: /etc/tls/private + - name: asb-etcd-auth + mountPath: /var/run/asb-etcd-auth ports: - containerPort: 1338 protocol: TCP @@ -249,7 +301,50 @@ scheme: HTTPS initialDelaySeconds: 15 timeoutSeconds: 1 + volumes: + - name: config-volume + configMap: + name: broker-config + items: + - key: broker-config + path: config.yaml + - name: asb-tls + secret: + secretName: asb-tls + - name: asb-etcd-auth + secret: + secretName: broker-etcd-auth-secret +- name: Create asb-etcd deployment config + oc_obj: + name: etcd + namespace: openshift-ansible-service-broker + state: present + kind: DeploymentConfig + content: + path: /tmp/dcout + data: + apiVersion: v1 + kind: DeploymentConfig + metadata: + name: asb-etcd + labels: + app: etcd + service: asb-etcd + spec: + replicas: 1 + selector: + app: etcd + strategy: + type: Rolling + template: + metadata: + labels: + app: etcd + service: asb-etcd + spec: + serviceAccount: asb + containers: - image: "{{ ansible_service_broker_etcd_image }}" name: etcd imagePullPolicy: IfNotPresent @@ -258,8 +353,12 @@ args: - "{{ ansible_service_broker_etcd_image_etcd_path }}" - "--data-dir=/data" - - "--listen-client-urls=http://0.0.0.0:2379" - - "--advertise-client-urls=http://0.0.0.0:2379" + - "--listen-client-urls=https://0.0.0.0:2379" + - "--advertise-client-urls=https://0.0.0.0:2379" + - "--client-cert-auth" + - "--trusted-ca-file=/var/run/etcd-auth-secret/ca.crt" + - "--cert-file=/etc/tls/private/tls.crt" + - "--key-file=/etc/tls/private/tls.key" ports: - containerPort: 2379 protocol: TCP @@ -267,21 +366,22 @@ - name: ETCDCTL_API value: "3" volumeMounts: - - mountPath: /data - name: etcd + - name: etcd + mountPath: /data + - name: etcd-tls + mountPath: /etc/tls/private + - name: etcd-auth + mountPath: /var/run/etcd-auth-secret volumes: - name: etcd persistentVolumeClaim: claimName: etcd - - name: config-volume - configMap: - name: broker-config - items: - - key: broker-config - path: config.yaml - - name: asb-tls + - name: etcd-tls secret: - secretName: asb-tls + secretName: etcd-tls + - name: etcd-auth + secret: + secretName: etcd-auth-secret # TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following: @@ -307,16 +407,19 @@ - type: {{ ansible_service_broker_registry_type }} name: {{ ansible_service_broker_registry_name }} url: {{ ansible_service_broker_registry_url }} - user: {{ ansible_service_broker_registry_user }} - pass: {{ ansible_service_broker_registry_password }} org: {{ ansible_service_broker_registry_organization }} tag: {{ ansible_service_broker_registry_tag }} white_list: {{ ansible_service_broker_registry_whitelist }} + - type: local_registry + namespaces: ['openshift'] + white_list: {{ ansible_service_broker_local_registry_whitelist }} dao: - etcd_host: 0.0.0.0 + etcd_host: asb-etcd.openshift-ansible-service-broker.svc etcd_port: 2379 + etcd_ca_file: /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt + etcd_client_cert: /var/run/asb-etcd-auth/client.crt + etcd_client_key: /var/run/asb-etcd-auth/client.key log: - logfile: /var/log/ansible-service-broker/asb.log stdout: true level: {{ ansible_service_broker_log_level }} color: true @@ -340,6 +443,15 @@ - type: basic enabled: false +- oc_secret: + name: asb-registry-auth + namespace: openshift-ansible-service-broker + state: present + contents: + - path: username + data: "{{ ansible_service_broker_registry_user }}" + - path: password + data: "{{ ansible_service_broker_registry_password }}" - name: Create the Broker resource in the catalog oc_obj: diff --git a/roles/ansible_service_broker/tasks/main.yml b/roles/ansible_service_broker/tasks/main.yml index d8695bd3a..2ed156728 100644 --- a/roles/ansible_service_broker/tasks/main.yml +++ b/roles/ansible_service_broker/tasks/main.yml @@ -2,7 +2,7 @@ # do any asserts here - include: install.yml - when: ansible_service_broker_install | default(false) | bool + when: ansible_service_broker_install | default(true) | bool - include: remove.yml when: ansible_service_broker_remove | default(false) | bool diff --git a/roles/ansible_service_broker/tasks/remove.yml b/roles/ansible_service_broker/tasks/remove.yml index 51b86fb26..a1ac740e0 100644 --- a/roles/ansible_service_broker/tasks/remove.yml +++ b/roles/ansible_service_broker/tasks/remove.yml @@ -46,18 +46,42 @@ resource_name: asb-access user: "system:serviceaccount:openshift-ansible-service-broker:asb-client" +- name: remove asb-registry auth secret + oc_secret: + state: absent + name: asb-registry-auth + namespace: openshift-ansible-service-broker + - name: remove asb-client token secret oc_secret: state: absent name: asb-client namespace: openshift-ansible-service-broker +- name: Remove etcd-auth secret + oc_secret: + state: absent + name: etcd-auth-secret + namespace: openshift-ansible-service-broker + +- name: Remove broker-etcd-auth secret + oc_secret: + state: absent + name: broker-etcd-auth-secret + namespace: openshift-ansible-service-broker + - name: remove ansible-service-broker service oc_service: name: asb namespace: openshift-ansible-service-broker state: absent +- name: remove asb-etcd service + oc_service: + state: absent + name: asb-etcd + namespace: openshift-ansible-service-broker + - name: remove etcd service oc_service: name: etcd @@ -83,6 +107,14 @@ kind: DeploymentConfig state: absent +- name: remove Ansible Service Broker etcd deployment config + oc_obj: + name: asb-etcd + namespace: openshift-ansible-service-broker + kind: DeploymentConfig + state: absent + + - name: remove secret for broker auth oc_obj: name: asb-client diff --git a/roles/ansible_service_broker/tasks/validate_facts.yml b/roles/ansible_service_broker/tasks/validate_facts.yml index 604d24e1d..a2345551b 100644 --- a/roles/ansible_service_broker/tasks/validate_facts.yml +++ b/roles/ansible_service_broker/tasks/validate_facts.yml @@ -1,11 +1,9 @@ --- - name: validate Dockerhub registry settings - fail: msg="To use the dockerhub registry, you must provide the ansible_service_broker_registry_user. ansible_service_broker_registry_password, and ansible_service_broker_registry_organization parameters" + fail: msg="To use the dockerhub registry, you must provide the ansible_service_broker_registry_organization" when: - ansible_service_broker_registry_type == 'dockerhub' - - not (ansible_service_broker_registry_user and - ansible_service_broker_registry_password and - ansible_service_broker_registry_organization) + - not ansible_service_broker_registry_organization - name: validate RHCC registry settings diff --git a/roles/ansible_service_broker/vars/default_images.yml b/roles/ansible_service_broker/vars/default_images.yml index 8438e993f..248e0363d 100644 --- a/roles/ansible_service_broker/vars/default_images.yml +++ b/roles/ansible_service_broker/vars/default_images.yml @@ -12,6 +12,6 @@ __ansible_service_broker_registry_name: dh __ansible_service_broker_registry_url: null __ansible_service_broker_registry_user: null __ansible_service_broker_registry_password: null -__ansible_service_broker_registry_organization: null +__ansible_service_broker_registry_organization: ansibleplaybookbundle __ansible_service_broker_registry_tag: latest __ansible_service_broker_registry_whitelist: [] diff --git a/roles/ansible_service_broker/vars/openshift-enterprise.yml b/roles/ansible_service_broker/vars/openshift-enterprise.yml index fc58b4fd8..c203f596e 100644 --- a/roles/ansible_service_broker/vars/openshift-enterprise.yml +++ b/roles/ansible_service_broker/vars/openshift-enterprise.yml @@ -3,7 +3,7 @@ __ansible_service_broker_image_prefix: registry.access.redhat.com/openshift3/ose- __ansible_service_broker_image_tag: v3.7 -__ansible_service_broker_etcd_image_prefix: rhel7/ +__ansible_service_broker_etcd_image_prefix: registry.access.redhat.com/rhel7/ __ansible_service_broker_etcd_image_tag: latest __ansible_service_broker_etcd_image_etcd_path: /bin/etcd diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml index 1c830cb4e..c086c28df 100644 --- a/roles/docker/defaults/main.yml +++ b/roles/docker/defaults/main.yml @@ -20,4 +20,19 @@ l2_docker_additional_registries: "{% if openshift_docker_additional_registries i l2_docker_blocked_registries: "{% if openshift_docker_blocked_registries is string %}{% if openshift_docker_blocked_registries == '' %}[]{% elif ',' in openshift_docker_blocked_registries %}{{ openshift_docker_blocked_registries.split(',') | list }}{% else %}{{ [ openshift_docker_blocked_registries ] }}{% endif %}{% else %}{{ openshift_docker_blocked_registries }}{% endif %}" l2_docker_insecure_registries: "{% if openshift_docker_insecure_registries is string %}{% if openshift_docker_insecure_registries == '' %}[]{% elif ',' in openshift_docker_insecure_registries %}{{ openshift_docker_insecure_registries.split(',') | list }}{% else %}{{ [ openshift_docker_insecure_registries ] }}{% endif %}{% else %}{{ openshift_docker_insecure_registries }}{% endif %}" +openshift_docker_use_etc_containers: False containers_registries_conf_path: /etc/containers/registries.conf + +r_crio_firewall_enabled: "{{ os_firewall_enabled | default(True) }}" +r_crio_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" + +r_crio_os_firewall_deny: [] +r_crio_os_firewall_allow: +- service: crio + port: 10010/tcp + + +openshift_docker_is_node_or_master: "{{ True if inventory_hostname in (groups['oo_masters_to_config']|default([])) or inventory_hostname in (groups['oo_nodes_to_config']|default([])) else False | bool }}" + +docker_alt_storage_path: /var/lib/containers/docker +docker_default_storage_path: /var/lib/docker diff --git a/roles/docker/meta/main.yml b/roles/docker/meta/main.yml index b773a417c..62b8a2eb5 100644 --- a/roles/docker/meta/main.yml +++ b/roles/docker/meta/main.yml @@ -11,3 +11,4 @@ galaxy_info: - 7 dependencies: - role: lib_openshift +- role: lib_os_firewall diff --git a/roles/docker/tasks/crio_firewall.yml b/roles/docker/tasks/crio_firewall.yml new file mode 100644 index 000000000..fbd1ff515 --- /dev/null +++ b/roles/docker/tasks/crio_firewall.yml @@ -0,0 +1,40 @@ +--- +- when: r_crio_firewall_enabled | bool and not r_crio_use_firewalld | bool + block: + - name: Add iptables allow rules + os_firewall_manage_iptables: + name: "{{ item.service }}" + action: add + protocol: "{{ item.port.split('/')[1] }}" + port: "{{ item.port.split('/')[0] }}" + when: item.cond | default(True) + with_items: "{{ r_crio_os_firewall_allow }}" + + - name: Remove iptables rules + os_firewall_manage_iptables: + name: "{{ item.service }}" + action: remove + protocol: "{{ item.port.split('/')[1] }}" + port: "{{ item.port.split('/')[0] }}" + when: item.cond | default(True) + with_items: "{{ r_crio_os_firewall_deny }}" + +- when: r_crio_firewall_enabled | bool and r_crio_use_firewalld | bool + block: + - name: Add firewalld allow rules + firewalld: + port: "{{ item.port }}" + permanent: true + immediate: true + state: enabled + when: item.cond | default(True) + with_items: "{{ r_crio_os_firewall_allow }}" + + - name: Remove firewalld allow rules + firewalld: + port: "{{ item.port }}" + permanent: true + immediate: true + state: disabled + when: item.cond | default(True) + with_items: "{{ r_crio_os_firewall_deny }}" diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 5ea73568a..3c814d8d8 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -25,6 +25,15 @@ - not l_use_system_container - not l_use_crio_only +- name: Ensure /var/lib/containers exists + file: + path: /var/lib/containers + state: directory + +- name: Fix SELinux Permissions on /var/lib/containers + command: "restorecon -R /var/lib/containers/" + changed_when: false + - name: Use System Container Docker if Requested include: systemcontainer_docker.yml when: @@ -35,4 +44,49 @@ include: systemcontainer_crio.yml when: - l_use_crio - - inventory_hostname in groups['oo_masters_to_config'] or inventory_hostname in groups['oo_nodes_to_config'] + - openshift_docker_is_node_or_master | bool + +- name: stat the docker data dir + stat: + path: "{{ docker_default_storage_path }}" + register: dockerstat + +- when: + - l_use_crio + - dockerstat.stat.islink is defined and not (dockerstat.stat.islink | bool) + block: + - name: stop the current running docker + systemd: + state: stopped + name: "{{ openshift.docker.service_name }}" + + - name: "Ensure {{ docker_alt_storage_path }} exists" + file: + path: "{{ docker_alt_storage_path }}" + state: directory + + - name: "Set the selinux context on {{ docker_alt_storage_path }}" + command: "semanage fcontext -a -e {{ docker_default_storage_path }} {{ docker_alt_storage_path }}" + register: results + failed_when: + - results.rc == 1 + - "'already exists' not in results.stderr" + + - name: "restorecon the {{ docker_alt_storage_path }}" + command: "restorecon -r {{ docker_alt_storage_path }}" + + - name: Remove the old docker location + file: + state: absent + path: "{{ docker_default_storage_path }}" + + - name: Setup the link + file: + state: link + src: "{{ docker_alt_storage_path }}" + path: "{{ docker_default_storage_path }}" + + - name: start docker + systemd: + state: started + name: "{{ openshift.docker.service_name }}" diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml index b16413f72..c1aedf879 100644 --- a/roles/docker/tasks/package_docker.yml +++ b/roles/docker/tasks/package_docker.yml @@ -81,6 +81,7 @@ template: dest: "{{ containers_registries_conf_path }}" src: registries.conf + when: openshift_docker_use_etc_containers | bool notify: - restart docker diff --git a/roles/docker/tasks/registry_auth.yml b/roles/docker/tasks/registry_auth.yml index 65ed60efa..d05b7f2b8 100644 --- a/roles/docker/tasks/registry_auth.yml +++ b/roles/docker/tasks/registry_auth.yml @@ -7,6 +7,10 @@ - name: Create credentials for docker cli registry auth command: "docker --config={{ docker_cli_auth_config_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}" + register: openshift_docker_credentials_create_res + retries: 3 + delay: 5 + until: openshift_docker_credentials_create_res.rc == 0 when: - oreg_auth_user is defined - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml index 13bbd359e..1e2d64293 100644 --- a/roles/docker/tasks/systemcontainer_crio.yml +++ b/roles/docker/tasks/systemcontainer_crio.yml @@ -3,16 +3,16 @@ # TODO: Much of this file is shared with container engine tasks - set_fact: l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l2_docker_insecure_registries)) }}" - when: l2_docker_insecure_registries + when: l2_docker_insecure_registries | bool - set_fact: l_crio_registries: "{{ l2_docker_additional_registries + ['docker.io'] }}" - when: l2_docker_additional_registries + when: l2_docker_additional_registries | bool - set_fact: l_crio_registries: "{{ ['docker.io'] }}" - when: not l2_docker_additional_registries + when: not (l2_docker_additional_registries | bool) - set_fact: l_additional_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l_crio_registries)) }}" - when: l2_docker_additional_registries + when: l2_docker_additional_registries | bool - set_fact: l_openshift_image_tag: "{{ openshift_image_tag | string }}" @@ -62,7 +62,7 @@ shell: lsmod | grep overlay register: l_has_overlay_in_kernel ignore_errors: yes - + failed_when: false - when: l_has_overlay_in_kernel.rc != 0 block: @@ -161,6 +161,10 @@ path: /etc/cni/net.d/ state: directory +- name: setup firewall for CRI-O + include: crio_firewall.yml + static: yes + - name: Configure the CNI network template: dest: /etc/cni/net.d/openshift-sdn.conf diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml index 726e8ada7..aa3b35ddd 100644 --- a/roles/docker/tasks/systemcontainer_docker.yml +++ b/roles/docker/tasks/systemcontainer_docker.yml @@ -173,4 +173,6 @@ - set_fact: docker_service_status_changed: "{{ r_docker_systemcontainer_docker_start_result | changed }}" +- include: registry_auth.yml + - meta: flush_handlers diff --git a/roles/docker/templates/crio.conf.j2 b/roles/docker/templates/crio.conf.j2 index b715c2ffa..93014a80d 100644 --- a/roles/docker/templates/crio.conf.j2 +++ b/roles/docker/templates/crio.conf.j2 @@ -108,7 +108,7 @@ pids_limit = 1024 # log_size_max is the max limit for the container log size in bytes. # Negative values indicate that no limit is imposed. -log_size_max = -1 +log_size_max = 52428800 # The "crio.image" table contains settings pertaining to the # management of OCI images. diff --git a/roles/etcd/tasks/migration/check.yml b/roles/etcd/tasks/migration/check.yml index 0804d9e1c..5c45e5ae1 100644 --- a/roles/etcd/tasks/migration/check.yml +++ b/roles/etcd/tasks/migration/check.yml @@ -3,6 +3,17 @@ # Check the cluster is healthy - include: check_cluster_health.yml +# Check if there is at least one v2 snapshot +- name: Check if there is at least one v2 snapshot + find: + paths: "{{ etcd_data_dir }}/member/snap" + patterns: '*.snap' + register: snapshots_result + +- fail: + msg: "Before the migration can proceed the etcd member must write down at least one snapshot under {{ etcd_data_dir }}/member/snap directory." + when: snapshots_result.matched | int == 0 + # Check if the member has v3 data already # Run the migration only if the data are v2 - name: Check if there are any v3 data diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py index 55c44bb84..b17358882 100644 --- a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py +++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py @@ -72,6 +72,7 @@ class CallbackModule(CallbackBase): # Set the order of the installer phases installer_phases = [ 'installer_phase_initialize', + 'installer_phase_health', 'installer_phase_etcd', 'installer_phase_nfs', 'installer_phase_loadbalancer', @@ -93,6 +94,10 @@ class CallbackModule(CallbackBase): 'title': 'Initialization', 'playbook': '' }, + 'installer_phase_health': { + 'title': 'Health Check', + 'playbook': 'playbooks/byo/openshift-checks/pre-install.yml' + }, 'installer_phase_etcd': { 'title': 'etcd Install', 'playbook': 'playbooks/byo/openshift-etcd/config.yml' @@ -166,11 +171,6 @@ class CallbackModule(CallbackBase): self._display.display( '\tThis phase can be restarted by running: {}'.format( phase_attributes[phase]['playbook'])) - else: - # Phase was not found in custom stats - self._display.display( - '{}{}: {}'.format(phase_title, ' ' * padding, 'Not Started'), - color=C.COLOR_SKIP) self._display.display("", screen_only=True) diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py index 0614f359d..62bda33ad 100644 --- a/roles/lib_openshift/library/oc_secret.py +++ b/roles/lib_openshift/library/oc_secret.py @@ -1633,7 +1633,7 @@ class OCSecret(OpenShiftCLI): This receives a list of file names and converts it into a secret. The secret is then written to disk and passed into the `oc replace` command. ''' - secret = self.prep_secret(files, force) + secret = self.prep_secret(files, force=force) if secret['returncode'] != 0: return secret diff --git a/roles/lib_openshift/library/oc_storageclass.py b/roles/lib_openshift/library/oc_storageclass.py index e88f3ae8d..7e7d0fa60 100644 --- a/roles/lib_openshift/library/oc_storageclass.py +++ b/roles/lib_openshift/library/oc_storageclass.py @@ -1664,7 +1664,7 @@ def main(): name=dict(default=None, type='str'), annotations=dict(default=None, type='dict'), parameters=dict(default=None, type='dict'), - provisioner=dict(required=True, type='str', choices=['aws-ebs', 'gce-pd', 'glusterfs', 'cinder']), + provisioner=dict(required=True, type='str'), api_version=dict(default='v1', type='str'), default_storage_class=dict(default="false", type='str'), ), diff --git a/roles/lib_openshift/src/ansible/oc_storageclass.py b/roles/lib_openshift/src/ansible/oc_storageclass.py index e9f3ebbd3..a8f371661 100644 --- a/roles/lib_openshift/src/ansible/oc_storageclass.py +++ b/roles/lib_openshift/src/ansible/oc_storageclass.py @@ -14,7 +14,7 @@ def main(): name=dict(default=None, type='str'), annotations=dict(default=None, type='dict'), parameters=dict(default=None, type='dict'), - provisioner=dict(required=True, type='str', choices=['aws-ebs', 'gce-pd', 'glusterfs', 'cinder']), + provisioner=dict(required=True, type='str'), api_version=dict(default='v1', type='str'), default_storage_class=dict(default="false", type='str'), ), diff --git a/roles/lib_openshift/src/class/oc_secret.py b/roles/lib_openshift/src/class/oc_secret.py index 5322d6241..89e70b6b2 100644 --- a/roles/lib_openshift/src/class/oc_secret.py +++ b/roles/lib_openshift/src/class/oc_secret.py @@ -67,7 +67,7 @@ class OCSecret(OpenShiftCLI): This receives a list of file names and converts it into a secret. The secret is then written to disk and passed into the `oc replace` command. ''' - secret = self.prep_secret(files, force) + secret = self.prep_secret(files, force=force) if secret['returncode'] != 0: return secret diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml index 21da6b953..410b739e9 100644 --- a/roles/nuage_master/handlers/main.yaml +++ b/roles/nuage_master/handlers/main.yaml @@ -7,8 +7,13 @@ openshift.master.cluster_method == 'native' # TODO: need to fix up ignore_errors here +# We retry the controllers because the API may not be 100% initialized yet. - name: restart master controllers - systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted + command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" + retries: 3 + delay: 5 + register: result + until: result.rc == 0 when: > (openshift_master_ha | bool) and (not master_controllers_service_status_changed | default(false)) and diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml index 5371588cf..9f3c14bad 100644 --- a/roles/openshift_aws/defaults/main.yml +++ b/roles/openshift_aws/defaults/main.yml @@ -12,7 +12,6 @@ openshift_aws_clusterid: default openshift_aws_region: us-east-1 openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}" openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}" -openshift_aws_kubernetes_cluster_status: "{{ openshift_aws_clusterid }}" openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external" openshift_aws_iam_cert_path: '' @@ -48,7 +47,14 @@ openshift_aws_elb_health_check: unhealthy_threshold: 2 healthy_threshold: 2 -openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}" +openshift_aws_elb_basename: "{{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}" +openshift_aws_elb_name_dict: + master: + external: "{{ openshift_aws_elb_basename }}-external" + internal: "{{ openshift_aws_elb_basename }}-internal" + infra: + external: "{{ openshift_aws_elb_basename }}" + openshift_aws_elb_idle_timout: 400 openshift_aws_elb_scheme: internet-facing openshift_aws_elb_cert_arn: '' @@ -75,6 +81,18 @@ openshift_aws_elb_listeners: load_balancer_port: 443 instance_protocol: tcp instance_port: 443 + infra: + external: + - protocol: tcp + load_balancer_port: 80 + instance_protocol: tcp + instance_port: 443 + proxy_protocol: True + - protocol: tcp + load_balancer_port: 443 + instance_protocol: tcp + instance_port: 443 + proxy_protocol: True openshift_aws_node_group_config_master_volumes: - device_name: /dev/sdb @@ -88,7 +106,7 @@ openshift_aws_node_group_config_node_volumes: device_type: gp2 delete_on_termination: True -openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags(openshift_aws_kubernetes_cluster_status) }}" +openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags }}" openshift_aws_node_group_termination_policy: Default openshift_aws_node_group_replace_instances: [] openshift_aws_node_group_replace_all_instances: False @@ -114,6 +132,7 @@ openshift_aws_node_group_config: wait_for_instances: True termination_policy: "{{ openshift_aws_node_group_termination_policy }}" replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" + elbs: "{{ openshift_aws_elb_name_dict[openshift_aws_node_group_type].keys()| map('extract', openshift_aws_elb_name_dict[openshift_aws_node_group_type]) | list }}" compute: instance_type: m4.xlarge ami: "{{ openshift_aws_ami }}" @@ -148,21 +167,22 @@ openshift_aws_node_group_config: type: infra termination_policy: "{{ openshift_aws_node_group_termination_policy }}" replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" + elbs: "{{ openshift_aws_elb_name_dict[openshift_aws_node_group_type].keys()| map('extract', openshift_aws_elb_name_dict[openshift_aws_node_group_type]) | list }}" + +openshift_aws_elb_tags: "{{ openshift_aws_clusterid | build_instance_tags }}" +openshift_aws_elb_az_load_balancing: False openshift_aws_elb_security_groups: -- "{{ openshift_aws_clusterid }}" -- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}" +- "{{ openshift_aws_clusterid }}" # default sg +- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}" # node type sg +- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}_k8s" # node type sg k8s openshift_aws_elb_instance_filter: "tag:clusterid": "{{ openshift_aws_clusterid }}" "tag:host-type": "{{ openshift_aws_node_group_type }}" instance-state-name: running -openshift_aws_launch_config_security_groups: -- "{{ openshift_aws_clusterid }}" # default sg -- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}" # node type sg -- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}_k8s" # node type sg k8s - +openshift_aws_security_groups_tags: "{{ openshift_aws_clusterid | build_instance_tags }}" openshift_aws_node_security_groups: default: name: "{{ openshift_aws_clusterid }}" diff --git a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py index 06e1f9602..a9893c0a7 100644 --- a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py +++ b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py @@ -9,17 +9,17 @@ class FilterModule(object): ''' Custom ansible filters for use by openshift_aws role''' @staticmethod - def build_instance_tags(clusterid, status='owned'): + def build_instance_tags(clusterid): ''' This function will return a dictionary of the instance tags. The main desire to have this inside of a filter_plugin is that we need to build the following key. - {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": 'owned'} + {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": "{{ openshift_aws_clusterid}}"} ''' tags = {'clusterid': clusterid, - 'kubernetes.io/cluster/{}'.format(clusterid): status} + 'kubernetes.io/cluster/{}'.format(clusterid): clusterid} return tags diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml index 0dac1c23d..0aac40ddd 100644 --- a/roles/openshift_aws/tasks/build_node_group.yml +++ b/roles/openshift_aws/tasks/build_node_group.yml @@ -21,10 +21,6 @@ - "'results' in amiout" - amiout.results|length > 0 -- when: openshift_aws_create_security_groups - name: "Create {{ openshift_aws_node_group_type }} security groups" - include: security_group.yml - - when: openshift_aws_create_launch_config name: "Create {{ openshift_aws_node_group_type }} launch config" include: launch_config.yml diff --git a/roles/openshift_aws/tasks/elb.yml b/roles/openshift_aws/tasks/elb.yml index 7bc3184df..56abe9dd7 100644 --- a/roles/openshift_aws/tasks/elb.yml +++ b/roles/openshift_aws/tasks/elb.yml @@ -9,12 +9,6 @@ - name: debug debug: var=vpcout -- name: fetch the remote instances - ec2_remote_facts: - region: "{{ openshift_aws_region }}" - filters: "{{ openshift_aws_elb_instance_filter }}" - register: instancesout - - name: fetch the default subnet id ec2_vpc_subnet_facts: region: "{{ openshift_aws_region }}" @@ -23,7 +17,7 @@ vpc-id: "{{ vpcout.vpcs[0].id }}" register: subnetout -- name: +- name: dump the elb listeners debug: msg: "{{ openshift_aws_elb_listeners[openshift_aws_node_group_type][openshift_aws_elb_direction] if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type @@ -33,6 +27,7 @@ ec2_elb_lb: name: "{{ l_openshift_aws_elb_name }}" state: present + cross_az_load_balancing: "{{ openshift_aws_elb_az_load_balancing }}" security_group_names: "{{ openshift_aws_elb_security_groups }}" idle_timeout: "{{ openshift_aws_elb_idle_timout }}" region: "{{ openshift_aws_region }}" @@ -43,25 +38,9 @@ if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type else openshift_aws_elb_listeners }}" scheme: "{{ openshift_aws_elb_scheme }}" - tags: - KubernetesCluster: "{{ openshift_aws_clusterid }}" + tags: "{{ openshift_aws_elb_tags }}" register: new_elb -# It is necessary to ignore_errors here because the instances are not in 'ready' -# state when first added to ELB -- name: "Add instances to ELB {{ l_openshift_aws_elb_name }}" - ec2_elb: - instance_id: "{{ item.id }}" - ec2_elbs: "{{ l_openshift_aws_elb_name }}" - state: present - region: "{{ openshift_aws_region }}" - wait: False - with_items: "{{ instancesout.instances }}" - ignore_errors: True - retries: 10 - register: elb_call - until: elb_call|succeeded - - debug: msg: "{{ item }}" with_items: diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml index 8b7b02a0e..94aca5a35 100644 --- a/roles/openshift_aws/tasks/launch_config.yml +++ b/roles/openshift_aws/tasks/launch_config.yml @@ -19,7 +19,7 @@ - name: fetch the security groups for launch config ec2_group_facts: filters: - group-name: "{{ openshift_aws_launch_config_security_groups }}" + group-name: "{{ openshift_aws_elb_security_groups }}" vpc-id: "{{ vpcout.vpcs[0].id }}" region: "{{ openshift_aws_region }}" register: ec2sgs diff --git a/roles/openshift_aws/tasks/master_facts.yml b/roles/openshift_aws/tasks/master_facts.yml index 737cfc7a6..1c99229ff 100644 --- a/roles/openshift_aws/tasks/master_facts.yml +++ b/roles/openshift_aws/tasks/master_facts.yml @@ -3,20 +3,18 @@ ec2_elb_facts: region: "{{ openshift_aws_region }}" names: - - "{{ item }}" - with_items: - - "{{ openshift_aws_elb_name }}-external" - - "{{ openshift_aws_elb_name }}-internal" + - "{{ openshift_aws_elb_name_dict[openshift_aws_node_group_type]['internal'] }}" delegate_to: localhost register: elbs - debug: var=elbs + run_once: true - name: set fact set_fact: - openshift_master_cluster_hostname: "{{ elbs.results[1].elbs[0].dns_name }}" + openshift_master_cluster_hostname: "{{ elbs.elbs[0].dns_name }}" osm_custom_cors_origins: - - "{{ elbs.results[1].elbs[0].dns_name }}" + - "{{ elbs.elbs[0].dns_name }}" - "console.{{ openshift_aws_clusterid | default('default') }}.openshift.com" - "api.{{ openshift_aws_clusterid | default('default') }}.openshift.com" with_items: "{{ groups['masters'] }}" diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml index a8518d43a..e99017b9f 100644 --- a/roles/openshift_aws/tasks/provision.yml +++ b/roles/openshift_aws/tasks/provision.yml @@ -7,6 +7,38 @@ name: create s3 bucket for registry include: s3.yml +- when: openshift_aws_create_security_groups + block: + - name: "Create {{ openshift_aws_node_group_type }} security groups" + include: security_group.yml + + - name: "Create {{ openshift_aws_node_group_type }} security groups" + include: security_group.yml + vars: + openshift_aws_node_group_type: infra + +- name: create our master internal load balancer + include: elb.yml + vars: + openshift_aws_elb_direction: internal + openshift_aws_elb_scheme: internal + l_openshift_aws_elb_name: "{{ openshift_aws_elb_name_dict[openshift_aws_node_group_type]['internal'] }}" + +- name: create our master external load balancer + include: elb.yml + vars: + openshift_aws_elb_direction: external + openshift_aws_elb_scheme: internet-facing + l_openshift_aws_elb_name: "{{ openshift_aws_elb_name_dict[openshift_aws_node_group_type]['external'] }}" + +- name: create our infra node external load balancer + include: elb.yml + vars: + l_openshift_aws_elb_name: "{{ openshift_aws_elb_name_dict['infra']['external'] }}" + openshift_aws_elb_direction: external + openshift_aws_elb_scheme: internet-facing + openshift_aws_node_group_type: infra + - name: include scale group creation for master include: build_node_group.yml @@ -22,20 +54,6 @@ delay: 3 until: instancesout.instances|length > 0 -- name: create our master internal load balancers - include: elb.yml - vars: - openshift_aws_elb_direction: internal - l_openshift_aws_elb_name: "{{ openshift_aws_elb_name }}-internal" - openshift_aws_elb_scheme: internal - -- name: create our master external load balancers - include: elb.yml - vars: - openshift_aws_elb_direction: external - l_openshift_aws_elb_name: "{{ openshift_aws_elb_name }}-external" - openshift_aws_elb_scheme: internet-facing - - name: wait for ssh to become available wait_for: port: 22 diff --git a/roles/openshift_aws/tasks/seal_ami.yml b/roles/openshift_aws/tasks/seal_ami.yml index d319fdd1a..0cb749dcc 100644 --- a/roles/openshift_aws/tasks/seal_ami.yml +++ b/roles/openshift_aws/tasks/seal_ami.yml @@ -1,11 +1,4 @@ --- -- name: Remove any ansible facts created during AMI creation - file: - path: "/etc/ansible/facts.d/{{ item }}" - state: absent - with_items: - - openshift.fact - - name: fetch newly created instances ec2_remote_facts: region: "{{ openshift_aws_region }}" diff --git a/roles/openshift_aws/tasks/security_group.yml b/roles/openshift_aws/tasks/security_group.yml index 161e72fb4..e1fb99b02 100644 --- a/roles/openshift_aws/tasks/security_group.yml +++ b/roles/openshift_aws/tasks/security_group.yml @@ -38,8 +38,7 @@ - name: tag sg groups with proper tags ec2_tag: - tags: - KubernetesCluster: "{{ openshift_aws_clusterid }}" + tags: "{{ openshift_aws_security_groups_tags }}" resource: "{{ item.group_id }}" region: "{{ openshift_aws_region }}" with_items: "{{ k8s_sg_create.results }}" diff --git a/roles/openshift_aws/templates/user_data.j2 b/roles/openshift_aws/templates/user_data.j2 index ed9c0ed0b..76aebdcea 100644 --- a/roles/openshift_aws/templates/user_data.j2 +++ b/roles/openshift_aws/templates/user_data.j2 @@ -9,7 +9,7 @@ write_files: content: | openshift_group_type: {{ openshift_aws_node_group_type }} {% if openshift_aws_node_group_type != 'master' %} -- path: /etc/origin/node/csr_kubeconfig +- path: /etc/origin/node/bootstrap.kubeconfig owner: 'root:root' permissions: '0640' encoding: b64 diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml index 419679bc2..587526d07 100644 --- a/roles/openshift_ca/tasks/main.yml +++ b/roles/openshift_ca/tasks/main.yml @@ -18,9 +18,7 @@ - name: Reload generated facts openshift_facts: - when: install_result | changed - delegate_to: "{{ openshift_ca_host }}" - run_once: true + when: hostvars[openshift_ca_host].install_result | changed - name: Create openshift_ca_config_dir if it does not exist file: @@ -108,6 +106,36 @@ delegate_to: "{{ openshift_ca_host }}" run_once: true +# Create client-ca-bundle.crt containing old and new OpenShift CA +# certificates. This bundle will be used when rolling the OpenShift CA +# certificate. +- name: Create client-ca-bundle.crt + block: + - command: mktemp -d /tmp/openshift-ansible-XXXXXX + register: openshift_ca_clientconfig_tmpdir + delegate_to: "{{ openshift_ca_host }}" + - copy: + src: "{{ item }}" + dest: "{{ openshift_ca_clientconfig_tmpdir.stdout }}/" + remote_src: true + with_items: "{{ g_master_legacy_ca_result.files | default([]) | oo_collect('path') }}" + delegate_to: "{{ openshift_ca_host }}" + run_once: true + - copy: + src: "{{ openshift_ca_config_dir }}/ca.crt" + dest: "{{ openshift_ca_clientconfig_tmpdir.stdout }}/" + remote_src: true + delegate_to: "{{ openshift_ca_host }}" + run_once: true + - assemble: + src: "{{ openshift_ca_clientconfig_tmpdir.stdout }}" + dest: "{{ openshift_ca_config_dir }}/client-ca-bundle.crt" + mode: 0644 + owner: root + group: root + delegate_to: "{{ openshift_ca_host }}" + run_once: true + - name: Test local loopback context command: > {{ hostvars[openshift_ca_host].openshift.common.client_binary }} config view diff --git a/roles/openshift_cli/library/openshift_container_binary_sync.py b/roles/openshift_cli/library/openshift_container_binary_sync.py index b40c49701..08045794a 100644 --- a/roles/openshift_cli/library/openshift_container_binary_sync.py +++ b/roles/openshift_cli/library/openshift_container_binary_sync.py @@ -36,7 +36,7 @@ class BinarySyncer(object): self.changed = False self.output = [] self.bin_dir = '/usr/local/bin' - self.image = image + self._image = image self.tag = tag self.backend = backend self.temp_dir = None # TBD @@ -142,6 +142,33 @@ class BinarySyncer(object): self.output.append("Moved %s to %s." % (src_path, dest_path)) self.changed = True + @property + def raw_image(self): + """ + Returns the image as it was originally passed in to the instance. + + .. note:: + This image string will only work directly with the atomic command. + + :returns: The original image passed in. + :rtype: str + """ + return self._image + + @property + def image(self): + """ + Returns the image without atomic prefixes used to map to skopeo args. + + :returns: The image string without prefixes + :rtype: str + """ + image = self._image + for remove in ('oci:', 'http:', 'https:'): + if image.startswith(remove): + image = image.replace(remove, '') + return image + def main(): module = AnsibleModule( # noqa: F405 diff --git a/roles/openshift_default_storage_class/defaults/main.yml b/roles/openshift_default_storage_class/defaults/main.yml index bdece7640..014c06641 100644 --- a/roles/openshift_default_storage_class/defaults/main.yml +++ b/roles/openshift_default_storage_class/defaults/main.yml @@ -13,6 +13,12 @@ openshift_storageclass_defaults: parameters: type: pd-standard + openstack: + name: standard + provisioner: cinder + parameters: + fstype: xfs + openshift_storageclass_default: "true" openshift_storageclass_name: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['name'] }}" openshift_storageclass_provisioner: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['provisioner'] }}" diff --git a/roles/openshift_default_storage_class/tasks/main.yml b/roles/openshift_default_storage_class/tasks/main.yml index 172e2ac25..281ec8ed5 100644 --- a/roles/openshift_default_storage_class/tasks/main.yml +++ b/roles/openshift_default_storage_class/tasks/main.yml @@ -1,5 +1,5 @@ --- -# Install default storage classes in GCE & AWS +# Install default storage classes in GCE & AWS & OPENSTACK - name: Ensure storageclass object oc_storageclass: name: "{{ openshift_storageclass_name }}" diff --git a/roles/openshift_docker_gc/defaults/main.yml b/roles/openshift_docker_gc/defaults/main.yml new file mode 100644 index 000000000..9d79de8a1 --- /dev/null +++ b/roles/openshift_docker_gc/defaults/main.yml @@ -0,0 +1,3 @@ +--- +r_enable_docker_gc: "{{ openshift_crio_enable_docker_gc | default(False) }}" +r_docker_gc_node_selectors: "{{ openshift_crio_docker_gc_node_selector | default({}) }}" diff --git a/roles/openshift_docker_gc/meta/main.yml b/roles/openshift_docker_gc/meta/main.yml new file mode 100644 index 000000000..f88a7c533 --- /dev/null +++ b/roles/openshift_docker_gc/meta/main.yml @@ -0,0 +1,13 @@ +--- +galaxy_info: + author: OpenShift + description: docker garbage collection + company: Red Hat, Inc + license: ASL 2.0 + min_ansible_version: 2.2 + platforms: + - name: EL + versions: + - 7 +dependencies: +- role: lib_openshift diff --git a/roles/openshift_docker_gc/tasks/main.yaml b/roles/openshift_docker_gc/tasks/main.yaml new file mode 100644 index 000000000..9ba551479 --- /dev/null +++ b/roles/openshift_docker_gc/tasks/main.yaml @@ -0,0 +1,27 @@ +--- +- name: Create docker-gc tempdir + command: mktemp -d + register: templates_tmpdir + +# NOTE: oc_adm_policy_user does not support -z (yet) +- name: Add dockergc as priviledged + shell: oc adm policy add-scc-to-user -z dockergc privileged +# oc_adm_policy_user: +# user: dockergc +# resource_kind: scc +# resource_name: privileged +# state: present + +- name: Create dockergc DaemonSet + become: yes + template: + src: dockergc-ds.yaml.j2 + dest: "{{ templates_tmpdir.stdout }}/dockergc-ds.yaml" + +- name: Apply dockergc DaemonSet + oc_obj: + state: present + kind: DaemonSet + name: "dockergc" + files: + - "{{ templates_tmpdir.stdout }}/dockergc-ds.yaml" diff --git a/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2 b/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2 new file mode 100644 index 000000000..53e8b448b --- /dev/null +++ b/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2 @@ -0,0 +1,58 @@ +apiVersion: v1 +kind: List +items: +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: dockergc + # You must grant privileged via: oadm policy add-scc-to-user -z dockergc privileged + # in order for the dockergc to access the docker socket and root directory +- apiVersion: extensions/v1beta1 + kind: DaemonSet + metadata: + name: dockergc + labels: + app: dockergc + spec: + template: + metadata: + labels: + app: dockergc + name: dockergc + spec: +{# Only set nodeSelector if the dict is not empty #} +{% if r_docker_gc_node_selectors %} + nodeSelector: +{% for k,v in r_docker_gc_node_selectors.items() %} + {{ k }}: {{ v }}{% endfor %}{% endif %} + + serviceAccountName: dockergc + containers: + - image: openshift/origin:latest + args: + - "ex" + - "dockergc" + - "--image-gc-low-threshold=60" + - "--image-gc-high-threshold=80" + - "--minimum-ttl-duration=1h0m0s" + securityContext: + privileged: true + name: dockergc + resources: + requests: + memory: 30Mi + cpu: 50m + volumeMounts: + - name: docker-root + readOnly: true + mountPath: /var/lib/docker + - name: docker-socket + readOnly: false + mountPath: /var/run/docker.sock + volumes: + - name: docker-root + hostPath: + path: /var/lib/docker + - name: docker-socket + hostPath: + path: /var/run/docker.sock diff --git a/roles/openshift_examples/files/examples/v3.6/xpaas-streams/jboss-image-streams.json b/roles/openshift_examples/files/examples/v3.6/xpaas-streams/jboss-image-streams.json index 0bb56452b..af66b9ea4 100644 --- a/roles/openshift_examples/files/examples/v3.6/xpaas-streams/jboss-image-streams.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-streams/jboss-image-streams.json @@ -31,6 +31,10 @@ "sampleContextDir": "tomcat-websocket-chat", "version": "1.1", "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat7-openshift:1.1" } }, { @@ -44,6 +48,10 @@ "sampleContextDir": "tomcat-websocket-chat", "version": "1.2", "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat7-openshift:1.2" } }, { @@ -56,6 +64,10 @@ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git", "sampleContextDir": "tomcat-websocket-chat", "version": "1.3" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat7-openshift:1.3" } } ] @@ -84,6 +96,10 @@ "sampleContextDir": "tomcat-websocket-chat", "version": "1.1", "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat8-openshift:1.1" } }, { @@ -97,6 +113,10 @@ "sampleContextDir": "tomcat-websocket-chat", "version": "1.2", "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat8-openshift:1.2" } }, { @@ -109,6 +129,10 @@ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git", "sampleContextDir": "tomcat-websocket-chat", "version": "1.3" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat8-openshift:1.3" } } ] @@ -137,6 +161,10 @@ "sampleContextDir": "tomcat-websocket-chat", "version": "1.0", "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-webserver-3/webserver31-tomcat7-openshift:1.0" } } ] @@ -165,6 +193,10 @@ "sampleContextDir": "tomcat-websocket-chat", "version": "1.0", "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-webserver-3/webserver31-tomcat8-openshift:1.0" } } ] @@ -194,6 +226,10 @@ "sampleRef": "6.4.x", "version": "1.1", "openshift.io/display-name": "Red Hat JBoss EAP 6.4" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-eap-6/eap64-openshift:1.1" } }, { @@ -208,6 +244,10 @@ "sampleRef": "6.4.x", "version": "1.2", "openshift.io/display-name": "Red Hat JBoss EAP 6.4" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-eap-6/eap64-openshift:1.2" } }, { @@ -222,6 +262,10 @@ "sampleRef": "6.4.x", "version": "1.3", "openshift.io/display-name": "Red Hat JBoss EAP 6.4" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-eap-6/eap64-openshift:1.3" } }, { @@ -236,6 +280,10 @@ "sampleRef": "6.4.x", "version": "1.4", "openshift.io/display-name": "Red Hat JBoss EAP 6.4" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-eap-6/eap64-openshift:1.4" } }, { @@ -248,7 +296,12 @@ "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git", "sampleContextDir": "kitchensink", "sampleRef": "6.4.x", - "version": "1.5" + "version": "1.5", + "openshift.io/display-name": "Red Hat JBoss EAP 6.4" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-eap-6/eap64-openshift:1.5" } } ] @@ -278,6 +331,10 @@ "sampleRef": "7.0.0.GA", "version": "1.3", "openshift.io/display-name": "Red Hat JBoss EAP 7.0" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-eap-7/eap70-openshift:1.3" } }, { @@ -292,6 +349,10 @@ "sampleRef": "7.0.0.GA", "version": "1.4", "openshift.io/display-name": "Red Hat JBoss EAP 7.0" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-eap-7/eap70-openshift:1.4" } }, { @@ -304,7 +365,44 @@ "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git", "sampleContextDir": "kitchensink", "sampleRef": "7.0.0.GA", - "version": "1.5" + "version": "1.5", + "openshift.io/display-name": "Red Hat JBoss EAP 7.0" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-eap-7/eap70-openshift:1.5" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "jboss-eap71-openshift", + "annotations": { + "openshift.io/display-name": "Red Hat JBoss EAP 7.1" + } + }, + "spec": { + "dockerImageRepository": "registry.access.redhat.com/jboss-eap-7/eap71-openshift", + "tags": [ + { + "name": "1.0-TP", + "annotations": { + "description": "JBoss EAP 7.1 Tech Preview.", + "iconClass": "icon-jboss", + "tags": "builder,eap,javaee,java,jboss,xpaas", + "supports":"eap:7.1,javaee:7,java:8,xpaas:1.0", + "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git", + "sampleContextDir": "kitchensink", + "sampleRef": "7.0.0.GA", + "version": "1.0" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-eap-7-tech-preview/eap71-openshift:1.0" } } ] @@ -334,6 +432,10 @@ "sampleRef": "1.2", "version": "1.2", "openshift.io/display-name": "Red Hat JBoss BRMS 6.2 decision server" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-decisionserver-6/decisionserver62-openshift:1.2" } } ] @@ -363,6 +465,10 @@ "sampleRef": "1.3", "version": "1.3", "openshift.io/display-name": "Red Hat JBoss BRMS 6.3 decision server" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-decisionserver-6/decisionserver63-openshift:1.3" } }, { @@ -375,7 +481,12 @@ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git", "sampleContextDir": "decisionserver/hellorules", "sampleRef": "1.3", - "version": "1.4" + "version": "1.4", + "openshift.io/display-name": "Red Hat JBoss BRMS 6.3 decision server" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-decisionserver-6/decisionserver63-openshift:1.4" } } ] @@ -400,7 +511,12 @@ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git", "sampleContextDir": "decisionserver/hellorules", "sampleRef": "1.3", - "version": "1.0" + "version": "1.0", + "openshift.io/display-name": "Red Hat JBoss BRMS 6.4 decision server" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-decisionserver-6/decisionserver64-openshift:1.0" } } ] @@ -430,6 +546,10 @@ "sampleRef": "1.3", "version": "1.3", "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-processserver-6/processserver63-openshift:1.3" } }, { @@ -442,7 +562,12 @@ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git", "sampleContextDir": "processserver/library", "sampleRef": "1.3", - "version": "1.4" + "version": "1.4", + "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-processserver-6/processserver63-openshift:1.4" } } ] @@ -467,7 +592,12 @@ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git", "sampleContextDir": "processserver/library", "sampleRef": "1.3", - "version": "1.0" + "version": "1.0", + "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.4 intelligent process server" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-processserver-6/processserver64-openshift:1.0" } } ] @@ -494,6 +624,10 @@ "supports": "datagrid:6.5,xpaas:1.2", "version": "1.2", "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-datagrid-6/datagrid65-openshift:1.2" } }, { @@ -505,6 +639,10 @@ "supports": "datagrid:6.5,xpaas:1.4", "version": "1.3", "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-datagrid-6/datagrid65-openshift:1.3" } }, { @@ -514,7 +652,42 @@ "iconClass": "icon-jboss", "tags": "datagrid,jboss,xpaas", "supports":"datagrid:6.5,xpaas:1.4", - "version": "1.4" + "version": "1.4", + "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-datagrid-6/datagrid65-openshift:1.4" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "jboss-datagrid71-openshift", + "annotations": { + "openshift.io/display-name": "Red Hat JBoss Data Grid 7.1" + } + }, + "spec": { + "dockerImageRepository": "registry.access.redhat.com/jboss-datagrid-7/datagrid71-openshift", + "tags": [ + { + "name": "1.0", + "annotations": { + "description": "JBoss Data Grid 7.1 S2I images.", + "iconClass": "icon-jboss", + "tags": "datagrid,jboss,xpaas", + "supports": "datagrid:7.1,xpaas:1.0", + "version": "1.0", + "openshift.io/display-name": "Red Hat JBoss Data Grid 7.1" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-datagrid-7/datagrid71-openshift:1.0" } } ] @@ -540,6 +713,39 @@ "tags": "client,jboss,xpaas", "version": "1.0", "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5 Client Modules for EAP" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-datagrid-6/datagrid65-client-openshift:1.0" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "jboss-datagrid71-client-openshift", + "annotations": { + "openshift.io/display-name": "Red Hat JBoss Data Grid 7.1 Client Modules for EAP" + } + }, + "spec": { + "dockerImageRepository": "registry.access.redhat.com/jboss-datagrid-7/datagrid71-client-openshift", + "tags": [ + { + "name": "1.0", + "annotations": { + "description": "JBoss Data Grid 7.1 Client Modules for EAP.", + "iconClass": "icon-jboss", + "tags": "client,jboss,xpaas", + "version": "1.0", + "openshift.io/display-name": "Red Hat JBoss Data Grid 7.1 Client Modules for EAP" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-datagrid-7/datagrid71-client-openshift:1.0" } } ] @@ -566,6 +772,10 @@ "supports": "datavirt:6.3,xpaas:1.4", "version": "1.0", "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.3" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-datavirt-6/datavirt63-openshift:1.0" } }, { @@ -577,6 +787,10 @@ "supports": "datavirt:6.3,xpaas:1.4", "version": "1.1", "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.3" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-datavirt-6/datavirt63-openshift:1.1" } }, { @@ -586,7 +800,12 @@ "iconClass": "icon-jboss", "tags": "datavirt,jboss,xpaas", "supports":"datavirt:6.3,xpaas:1.4", - "version": "1.2" + "version": "1.2", + "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.3" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-datavirt-6/datavirt63-openshift:1.2" } } ] @@ -612,6 +831,10 @@ "tags": "client,jboss,xpaas", "version": "1.0", "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.5 JDBC Driver Modules for EAP" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-datavirt-6/datavirt63-driver-openshift:1.0" } } ] @@ -638,6 +861,10 @@ "supports": "amq:6.2,messaging,xpaas:1.1", "version": "1.1", "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-amq-6/amq62-openshift:1.1" } }, { @@ -649,6 +876,10 @@ "supports": "amq:6.2,messaging,xpaas:1.2", "version": "1.2", "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-amq-6/amq62-openshift:1.2" } }, { @@ -660,6 +891,10 @@ "supports": "amq:6.2,messaging,xpaas:1.3", "version": "1.3", "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-amq-6/amq62-openshift:1.3" } }, { @@ -669,7 +904,27 @@ "iconClass": "icon-jboss", "tags": "messaging,amq,jboss,xpaas", "supports":"amq:6.2,messaging,xpaas:1.4", - "version": "1.4" + "version": "1.4", + "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-amq-6/amq62-openshift:1.4" + } + }, + { + "name": "1.5", + "annotations": { + "description": "JBoss A-MQ 6.2 broker image.", + "iconClass": "icon-jboss", + "tags": "messaging,amq,jboss,xpaas", + "supports":"amq:6.2,messaging,xpaas:1.5", + "version": "1.5", + "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-amq-6/amq62-openshift:1.5" } } ] @@ -696,6 +951,25 @@ "supports": "amq:6.3,messaging,xpaas:1.0", "version": "1.0", "openshift.io/display-name": "Red Hat JBoss A-MQ 6.3" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-amq-6/amq63-openshift:1.0" + } + }, + { + "name": "1.1", + "annotations": { + "description": "JBoss A-MQ 6.3 broker image.", + "iconClass": "icon-jboss", + "tags": "messaging,amq,jboss,xpaas", + "supports": "amq:6.3,messaging,xpaas:1.1", + "version": "1.1", + "openshift.io/display-name": "Red Hat JBoss A-MQ 6.3" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-amq-6/amq63-openshift:1.1" } } ] @@ -723,6 +997,10 @@ "supports": "sso:7.0,xpaas:1.3", "version": "1.3", "openshift.io/display-name": "Red Hat Single Sign-On 7.0" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/redhat-sso-7/sso70-openshift:1.3" } }, { @@ -734,6 +1012,10 @@ "supports": "sso:7.0,xpaas:1.4", "version": "1.4", "openshift.io/display-name": "Red Hat Single Sign-On 7.0" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/redhat-sso-7/sso70-openshift:1.4" } } ] @@ -761,6 +1043,10 @@ "supports": "sso:7.1,xpaas:1.4", "version": "1.0", "openshift.io/display-name": "Red Hat Single Sign-On 7.1" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/redhat-sso-7/sso71-openshift:1.0" } }, { @@ -772,6 +1058,10 @@ "supports": "sso:7.1,xpaas:1.4", "version": "1.1", "openshift.io/display-name": "Red Hat Single Sign-On 7.1" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/redhat-sso-7/sso71-openshift:1.1" } } ] @@ -800,6 +1090,10 @@ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts", "sampleContextDir": "undertow-servlet", "version": "1.0" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift:1.0" } }, { @@ -813,6 +1107,10 @@ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts", "sampleContextDir": "undertow-servlet", "version": "1.1" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift:1.1" } } ] diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-streams/jboss-image-streams.json b/roles/openshift_examples/files/examples/v3.7/xpaas-streams/jboss-image-streams.json index 0aad7fae6..af66b9ea4 100644 --- a/roles/openshift_examples/files/examples/v3.7/xpaas-streams/jboss-image-streams.json +++ b/roles/openshift_examples/files/examples/v3.7/xpaas-streams/jboss-image-streams.json @@ -31,6 +31,10 @@ "sampleContextDir": "tomcat-websocket-chat", "version": "1.1", "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat7-openshift:1.1" } }, { @@ -44,6 +48,10 @@ "sampleContextDir": "tomcat-websocket-chat", "version": "1.2", "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 7" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat7-openshift:1.2" } }, { @@ -56,6 +64,10 @@ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git", "sampleContextDir": "tomcat-websocket-chat", "version": "1.3" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat7-openshift:1.3" } } ] @@ -84,6 +96,10 @@ "sampleContextDir": "tomcat-websocket-chat", "version": "1.1", "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat8-openshift:1.1" } }, { @@ -97,6 +113,10 @@ "sampleContextDir": "tomcat-websocket-chat", "version": "1.2", "openshift.io/display-name": "Red Hat JBoss Web Server 3.0 Tomcat 8" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat8-openshift:1.2" } }, { @@ -109,6 +129,10 @@ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git", "sampleContextDir": "tomcat-websocket-chat", "version": "1.3" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat8-openshift:1.3" } } ] @@ -137,6 +161,10 @@ "sampleContextDir": "tomcat-websocket-chat", "version": "1.0", "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 7" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-webserver-3/webserver31-tomcat7-openshift:1.0" } } ] @@ -165,6 +193,10 @@ "sampleContextDir": "tomcat-websocket-chat", "version": "1.0", "openshift.io/display-name": "Red Hat JBoss Web Server 3.1 Tomcat 8" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-webserver-3/webserver31-tomcat8-openshift:1.0" } } ] @@ -194,6 +226,10 @@ "sampleRef": "6.4.x", "version": "1.1", "openshift.io/display-name": "Red Hat JBoss EAP 6.4" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-eap-6/eap64-openshift:1.1" } }, { @@ -208,6 +244,10 @@ "sampleRef": "6.4.x", "version": "1.2", "openshift.io/display-name": "Red Hat JBoss EAP 6.4" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-eap-6/eap64-openshift:1.2" } }, { @@ -222,6 +262,10 @@ "sampleRef": "6.4.x", "version": "1.3", "openshift.io/display-name": "Red Hat JBoss EAP 6.4" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-eap-6/eap64-openshift:1.3" } }, { @@ -236,6 +280,10 @@ "sampleRef": "6.4.x", "version": "1.4", "openshift.io/display-name": "Red Hat JBoss EAP 6.4" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-eap-6/eap64-openshift:1.4" } }, { @@ -248,7 +296,12 @@ "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git", "sampleContextDir": "kitchensink", "sampleRef": "6.4.x", - "version": "1.5" + "version": "1.5", + "openshift.io/display-name": "Red Hat JBoss EAP 6.4" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-eap-6/eap64-openshift:1.5" } } ] @@ -278,6 +331,10 @@ "sampleRef": "7.0.0.GA", "version": "1.3", "openshift.io/display-name": "Red Hat JBoss EAP 7.0" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-eap-7/eap70-openshift:1.3" } }, { @@ -292,6 +349,10 @@ "sampleRef": "7.0.0.GA", "version": "1.4", "openshift.io/display-name": "Red Hat JBoss EAP 7.0" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-eap-7/eap70-openshift:1.4" } }, { @@ -304,7 +365,12 @@ "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git", "sampleContextDir": "kitchensink", "sampleRef": "7.0.0.GA", - "version": "1.5" + "version": "1.5", + "openshift.io/display-name": "Red Hat JBoss EAP 7.0" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-eap-7/eap70-openshift:1.5" } } ] @@ -366,6 +432,10 @@ "sampleRef": "1.2", "version": "1.2", "openshift.io/display-name": "Red Hat JBoss BRMS 6.2 decision server" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-decisionserver-6/decisionserver62-openshift:1.2" } } ] @@ -395,6 +465,10 @@ "sampleRef": "1.3", "version": "1.3", "openshift.io/display-name": "Red Hat JBoss BRMS 6.3 decision server" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-decisionserver-6/decisionserver63-openshift:1.3" } }, { @@ -407,7 +481,12 @@ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git", "sampleContextDir": "decisionserver/hellorules", "sampleRef": "1.3", - "version": "1.4" + "version": "1.4", + "openshift.io/display-name": "Red Hat JBoss BRMS 6.3 decision server" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-decisionserver-6/decisionserver63-openshift:1.4" } } ] @@ -432,7 +511,12 @@ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git", "sampleContextDir": "decisionserver/hellorules", "sampleRef": "1.3", - "version": "1.0" + "version": "1.0", + "openshift.io/display-name": "Red Hat JBoss BRMS 6.4 decision server" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-decisionserver-6/decisionserver64-openshift:1.0" } } ] @@ -462,6 +546,10 @@ "sampleRef": "1.3", "version": "1.3", "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-processserver-6/processserver63-openshift:1.3" } }, { @@ -474,7 +562,12 @@ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git", "sampleContextDir": "processserver/library", "sampleRef": "1.3", - "version": "1.4" + "version": "1.4", + "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.3 intelligent process server" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-processserver-6/processserver63-openshift:1.4" } } ] @@ -499,7 +592,12 @@ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git", "sampleContextDir": "processserver/library", "sampleRef": "1.3", - "version": "1.0" + "version": "1.0", + "openshift.io/display-name": "Red Hat JBoss BPM Suite 6.4 intelligent process server" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-processserver-6/processserver64-openshift:1.0" } } ] @@ -526,6 +624,10 @@ "supports": "datagrid:6.5,xpaas:1.2", "version": "1.2", "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-datagrid-6/datagrid65-openshift:1.2" } }, { @@ -537,6 +639,10 @@ "supports": "datagrid:6.5,xpaas:1.4", "version": "1.3", "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-datagrid-6/datagrid65-openshift:1.3" } }, { @@ -546,7 +652,12 @@ "iconClass": "icon-jboss", "tags": "datagrid,jboss,xpaas", "supports":"datagrid:6.5,xpaas:1.4", - "version": "1.4" + "version": "1.4", + "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-datagrid-6/datagrid65-openshift:1.4" } } ] @@ -573,6 +684,10 @@ "supports": "datagrid:7.1,xpaas:1.0", "version": "1.0", "openshift.io/display-name": "Red Hat JBoss Data Grid 7.1" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-datagrid-7/datagrid71-openshift:1.0" } } ] @@ -598,6 +713,10 @@ "tags": "client,jboss,xpaas", "version": "1.0", "openshift.io/display-name": "Red Hat JBoss Data Grid 6.5 Client Modules for EAP" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-datagrid-6/datagrid65-client-openshift:1.0" } } ] @@ -623,6 +742,10 @@ "tags": "client,jboss,xpaas", "version": "1.0", "openshift.io/display-name": "Red Hat JBoss Data Grid 7.1 Client Modules for EAP" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-datagrid-7/datagrid71-client-openshift:1.0" } } ] @@ -649,6 +772,10 @@ "supports": "datavirt:6.3,xpaas:1.4", "version": "1.0", "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.3" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-datavirt-6/datavirt63-openshift:1.0" } }, { @@ -660,6 +787,10 @@ "supports": "datavirt:6.3,xpaas:1.4", "version": "1.1", "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.3" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-datavirt-6/datavirt63-openshift:1.1" } }, { @@ -669,7 +800,12 @@ "iconClass": "icon-jboss", "tags": "datavirt,jboss,xpaas", "supports":"datavirt:6.3,xpaas:1.4", - "version": "1.2" + "version": "1.2", + "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.3" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-datavirt-6/datavirt63-openshift:1.2" } } ] @@ -695,6 +831,10 @@ "tags": "client,jboss,xpaas", "version": "1.0", "openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.5 JDBC Driver Modules for EAP" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-datavirt-6/datavirt63-driver-openshift:1.0" } } ] @@ -721,6 +861,10 @@ "supports": "amq:6.2,messaging,xpaas:1.1", "version": "1.1", "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-amq-6/amq62-openshift:1.1" } }, { @@ -732,6 +876,10 @@ "supports": "amq:6.2,messaging,xpaas:1.2", "version": "1.2", "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-amq-6/amq62-openshift:1.2" } }, { @@ -743,6 +891,10 @@ "supports": "amq:6.2,messaging,xpaas:1.3", "version": "1.3", "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-amq-6/amq62-openshift:1.3" } }, { @@ -752,7 +904,12 @@ "iconClass": "icon-jboss", "tags": "messaging,amq,jboss,xpaas", "supports":"amq:6.2,messaging,xpaas:1.4", - "version": "1.4" + "version": "1.4", + "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-amq-6/amq62-openshift:1.4" } }, { @@ -762,7 +919,12 @@ "iconClass": "icon-jboss", "tags": "messaging,amq,jboss,xpaas", "supports":"amq:6.2,messaging,xpaas:1.5", - "version": "1.5" + "version": "1.5", + "openshift.io/display-name": "Red Hat JBoss A-MQ 6.2" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-amq-6/amq62-openshift:1.5" } } ] @@ -789,6 +951,10 @@ "supports": "amq:6.3,messaging,xpaas:1.0", "version": "1.0", "openshift.io/display-name": "Red Hat JBoss A-MQ 6.3" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-amq-6/amq63-openshift:1.0" } }, { @@ -800,6 +966,10 @@ "supports": "amq:6.3,messaging,xpaas:1.1", "version": "1.1", "openshift.io/display-name": "Red Hat JBoss A-MQ 6.3" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/jboss-amq-6/amq63-openshift:1.1" } } ] @@ -827,6 +997,10 @@ "supports": "sso:7.0,xpaas:1.3", "version": "1.3", "openshift.io/display-name": "Red Hat Single Sign-On 7.0" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/redhat-sso-7/sso70-openshift:1.3" } }, { @@ -838,6 +1012,10 @@ "supports": "sso:7.0,xpaas:1.4", "version": "1.4", "openshift.io/display-name": "Red Hat Single Sign-On 7.0" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/redhat-sso-7/sso70-openshift:1.4" } } ] @@ -865,6 +1043,10 @@ "supports": "sso:7.1,xpaas:1.4", "version": "1.0", "openshift.io/display-name": "Red Hat Single Sign-On 7.1" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/redhat-sso-7/sso71-openshift:1.0" } }, { @@ -876,6 +1058,10 @@ "supports": "sso:7.1,xpaas:1.4", "version": "1.1", "openshift.io/display-name": "Red Hat Single Sign-On 7.1" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/redhat-sso-7/sso71-openshift:1.1" } } ] @@ -904,6 +1090,10 @@ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts", "sampleContextDir": "undertow-servlet", "version": "1.0" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift:1.0" } }, { @@ -917,6 +1107,10 @@ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts", "sampleContextDir": "undertow-servlet", "version": "1.1" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift:1.1" } } ] diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 33028fea4..a88945538 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -1289,7 +1289,7 @@ def get_container_openshift_version(facts): If containerized, see if we can determine the installed version via the systemd environment files. """ - for filename in ['/etc/sysconfig/%s-master', '/etc/sysconfig/%s-node']: + for filename in ['/etc/sysconfig/%s-master-controllers', '/etc/sysconfig/%s-node']: env_path = filename % facts['common']['service_type'] if not os.path.exists(env_path): continue diff --git a/roles/openshift_gcp/templates/provision.j2.sh b/roles/openshift_gcp/templates/provision.j2.sh index 5ed6d9f84..4d150bc74 100644 --- a/roles/openshift_gcp/templates/provision.j2.sh +++ b/roles/openshift_gcp/templates/provision.j2.sh @@ -313,11 +313,11 @@ fi # wait until all node groups are stable {% for node_group in openshift_gcp_node_group_config %} -{% if node_group.bootstrap | default(False) %} -# not waiting for {{ node_group.name }} due to bootstrapping -{% else %} +{% if node_group.wait_for_stable | default(False) or not (node_group.bootstrap | default(False)) %} # wait for stable {{ node_group.name }} ( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed wait-until-stable "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --timeout=600 ) & +{% else %} +# not waiting for {{ node_group.name }} due to bootstrapping {% endif %} {% endfor %} diff --git a/roles/openshift_gcp/templates/remove.j2.sh b/roles/openshift_gcp/templates/remove.j2.sh index a1e0affec..c9213b800 100644 --- a/roles/openshift_gcp/templates/remove.j2.sh +++ b/roles/openshift_gcp/templates/remove.j2.sh @@ -37,7 +37,7 @@ function teardown() { # scale down {{ node_group.name }} ( # performs a delete and scale down as one operation to ensure maximum parallelism - if ! instances=$( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed list-instances "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --format='value[terminator=","](instance)' ); then + if ! instances=$( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed list-instances "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --format='value[terminator=","](instance)' 2>/dev/null ); then exit 0 fi instances="${instances%?}" @@ -59,6 +59,21 @@ if gsutil ls -p "{{ openshift_gcp_project }}" "gs://{{ openshift_gcp_registry_bu fi ) & +# Project metadata prefixed with {{ openshift_gcp_prefix }} +( + for key in $( gcloud --project "{{ openshift_gcp_project }}" compute project-info describe --flatten=commonInstanceMetadata.items[] '--format=value(commonInstanceMetadata.items.key)' ); do + if [[ "${key}" == "{{ openshift_gcp_prefix }}"* ]]; then + gcloud --project "{{ openshift_gcp_project }}" compute project-info remove-metadata "--keys=${key}" + fi + done +) & + +# Instances and disks used for image building +( + teardown "{{ openshift_gcp_prefix }}build-image-instance" compute instances --zone "{{ openshift_gcp_zone }}" + teardown "{{ openshift_gcp_prefix }}build-image-instance" compute disks --zone "{{ openshift_gcp_zone }}" +) & + # DNS ( dns_zone="{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}" @@ -152,5 +167,12 @@ for i in `jobs -p`; do wait $i; done for i in `jobs -p`; do wait $i; done +# Images specifically located under this cluster prefix family +for name in $( gcloud --project "{{ openshift_gcp_project }}" compute images list "--filter=family={{ openshift_gcp_prefix }}images" '--format=value(name)' ); do + ( gcloud --project "{{ openshift_gcp_project }}" compute images delete "${name}" ) & +done + # Network -teardown "{{ openshift_gcp_network_name }}" compute networks +( teardown "{{ openshift_gcp_network_name }}" compute networks ) & + +for i in `jobs -p`; do wait $i; done
\ No newline at end of file diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py index 7956559c6..87e6146d4 100644 --- a/roles/openshift_health_checker/openshift_checks/disk_availability.py +++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py @@ -1,6 +1,7 @@ """Check that there is enough disk space in predefined paths.""" import tempfile +import os.path from openshift_checks import OpenShiftCheck, OpenShiftCheckException @@ -121,11 +122,21 @@ class DiskAvailability(OpenShiftCheck): return {} + def find_ansible_submounts(self, path): + """Return a list of ansible_mounts that are below the given path.""" + base = os.path.join(path, "") + return [ + mount + for mount in self.get_var("ansible_mounts") + if mount["mount"].startswith(base) + ] + def free_bytes(self, path): """Return the size available in path based on ansible_mounts.""" + submounts = sum(mnt.get('size_available', 0) for mnt in self.find_ansible_submounts(path)) mount = self.find_ansible_mount(path) try: - return mount['size_available'] + return mount['size_available'] + submounts except KeyError: raise OpenShiftCheckException( 'Unable to retrieve disk availability for "{path}".\n' diff --git a/roles/openshift_health_checker/openshift_checks/docker_storage.py b/roles/openshift_health_checker/openshift_checks/docker_storage.py index 0558ddf14..6808d8b2f 100644 --- a/roles/openshift_health_checker/openshift_checks/docker_storage.py +++ b/roles/openshift_health_checker/openshift_checks/docker_storage.py @@ -14,7 +14,7 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck): """ name = "docker_storage" - tags = ["pre-install", "health", "preflight"] + tags = ["health", "preflight"] dependencies = ["python-docker-py"] storage_drivers = ["devicemapper", "overlay", "overlay2"] diff --git a/roles/openshift_health_checker/test/disk_availability_test.py b/roles/openshift_health_checker/test/disk_availability_test.py index 29a325a17..7acdb40ec 100644 --- a/roles/openshift_health_checker/test/disk_availability_test.py +++ b/roles/openshift_health_checker/test/disk_availability_test.py @@ -96,6 +96,24 @@ def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks): 'size_available': 20 * 10**9 + 1, }], ), + ( + ['oo_masters_to_config'], + 0, + [{ + 'mount': '/', + 'size_available': 2 * 10**9, + }, { # not enough directly on /var + 'mount': '/var', + 'size_available': 10 * 10**9 + 1, + }, { + # but subdir mounts add up to enough + 'mount': '/var/lib/docker', + 'size_available': 20 * 10**9 + 1, + }, { + 'mount': '/var/lib/origin', + 'size_available': 20 * 10**9 + 1, + }], + ), ]) def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansible_mounts): task_vars = dict( @@ -104,9 +122,10 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib ansible_mounts=ansible_mounts, ) - result = DiskAvailability(fake_execute_module, task_vars).run() + check = DiskAvailability(fake_execute_module, task_vars) + check.run() - assert not result.get('failed', False) + assert not check.failures @pytest.mark.parametrize('name,group_names,configured_min,ansible_mounts,expect_chunks', [ diff --git a/roles/openshift_hosted/tasks/router.yml b/roles/openshift_hosted/tasks/router.yml index 2aceef9e4..dd7053656 100644 --- a/roles/openshift_hosted/tasks/router.yml +++ b/roles/openshift_hosted/tasks/router.yml @@ -29,7 +29,9 @@ src: "{{ item }}" with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificate') | oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}" - when: ( not openshift_hosted_router_create_certificate | bool ) or openshift_hosted_router_certificate != {} + when: ( not openshift_hosted_router_create_certificate | bool ) or openshift_hosted_router_certificate != {} or + ( openshift_hosted_routers | oo_collect(attribute='certificate') | oo_select_keys_from_list(['keyfile', 'certfile', 'cafile'])|length > 0 ) + # This is for when we desire a cluster signed cert # The certificate is generated and placed in master_config_dir/ @@ -42,8 +44,8 @@ hostnames: - "{{ openshift_master_default_subdomain | default('router.default.svc.cluster.local') }}" - "*.{{ openshift_master_default_subdomain | default('router.default.svc.cluster.local') }}" - cert: "{{ ('/etc/origin/master/' ~ (item.certificate.certfile | basename)) if 'certfile' in item.certificate else ((openshift_master_config_dir) ~ '/openshift-router.crt') }}" - key: "{{ ('/etc/origin/master/' ~ (item.certificate.keyfile | basename)) if 'keyfile' in item.certificate else ((openshift_master_config_dir) ~ '/openshift-router.key') }}" + cert: "{{ openshift_master_config_dir ~ '/openshift-router.crt' }}" + key: "{{ openshift_master_config_dir ~ '/openshift-router.key' }}" with_items: "{{ openshift_hosted_routers }}" - name: set the openshift_hosted_router_certificate @@ -55,6 +57,7 @@ when: - openshift_hosted_router_create_certificate | bool - openshift_hosted_router_certificate == {} + - openshift_hosted_routers | oo_collect(attribute='certificate') | oo_select_keys_from_list(['keyfile', 'certfile', 'cafile'])|length == 0 - name: Create the router service account(s) oc_serviceaccount: diff --git a/roles/openshift_hosted_metrics/handlers/main.yml b/roles/openshift_hosted_metrics/handlers/main.yml index ce7688581..88b893448 100644 --- a/roles/openshift_hosted_metrics/handlers/main.yml +++ b/roles/openshift_hosted_metrics/handlers/main.yml @@ -4,8 +4,13 @@ when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' notify: Verify API Server +# We retry the controllers because the API may not be 100% initialized yet. - name: restart master controllers - systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted + command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" + retries: 3 + delay: 5 + register: result + until: result.rc == 0 when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' - name: Verify API Server diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index 0ea34faf2..6c5bb8693 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -297,6 +297,8 @@ oc delete pod --selector=<ds_selector> Changelog --------- +Tue Oct 26, 2017 +- Make CPU request equal limit if limit is greater then request Tue Oct 10, 2017 - Default imagePullPolicy changed from Always to IfNotPresent diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py index 959573635..e1a5ea726 100644 --- a/roles/openshift_logging/filter_plugins/openshift_logging.py +++ b/roles/openshift_logging/filter_plugins/openshift_logging.py @@ -3,6 +3,7 @@ ''' import random +import re def es_storage(os_logging_facts, dc_name, pvc_claim, root='elasticsearch'): @@ -17,6 +18,31 @@ def es_storage(os_logging_facts, dc_name, pvc_claim, root='elasticsearch'): return dict(kind='emptydir') +def min_cpu(left, right): + '''Return the minimum cpu value of the two values given''' + message = "Unable to evaluate {} cpu value is specified correctly '{}'. Exp whole, decimal or int followed by M" + pattern = re.compile(r"^(\d*\.?\d*)([Mm])?$") + millis_per_core = 1000 + if not right: + return left + m_left = pattern.match(left) + if not m_left: + raise RuntimeError(message.format("left", left)) + m_right = pattern.match(right) + if not m_right: + raise RuntimeError(message.format("right", right)) + left_value = float(m_left.group(1)) + right_value = float(m_right.group(1)) + if m_left.group(2) not in ["M", "m"]: + left_value = left_value * millis_per_core + if m_right.group(2) not in ["M", "m"]: + right_value = right_value * millis_per_core + response = left + if left_value != min(left_value, right_value): + response = right + return response + + def walk(source, path, default, delimiter='.'): '''Walk the sourch hash given the path and return the value or default if not found''' if not isinstance(source, dict): @@ -87,6 +113,7 @@ class FilterModule(object): 'random_word': random_word, 'entry_from_named_pair': entry_from_named_pair, 'map_from_pairs': map_from_pairs, + 'min_cpu': min_cpu, 'es_storage': es_storage, 'serviceaccount_name': serviceaccount_name, 'serviceaccount_namespace': serviceaccount_namespace, diff --git a/roles/openshift_logging/filter_plugins/test b/roles/openshift_logging/filter_plugins/test index 3ad956cca..bac25c012 100644 --- a/roles/openshift_logging/filter_plugins/test +++ b/roles/openshift_logging/filter_plugins/test @@ -1,7 +1,22 @@ import unittest from openshift_logging import walk +from openshift_logging import min_cpu class TestFilterMethods(unittest.TestCase): + + + def test_min_cpu_for_none(self): + source = "1000M" + self.assertEquals(min_cpu(source, None), "1000M") + + def test_min_cpu_for_millis(self): + source = "1" + self.assertEquals(min_cpu(source, "0.1"), "0.1") + + + def test_min_cpu_for_whole(self): + source = "120M" + self.assertEquals(min_cpu(source, "2"), "120M") def test_walk_find_key(self): diff --git a/roles/openshift_logging/handlers/main.yml b/roles/openshift_logging/handlers/main.yml index ce7688581..acc838bd1 100644 --- a/roles/openshift_logging/handlers/main.yml +++ b/roles/openshift_logging/handlers/main.yml @@ -4,8 +4,13 @@ when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' notify: Verify API Server +# We retry the controllers because the API may not be 100% initialized yet. - name: restart master controllers - systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted + command: "{{ openshift.common.service_type }}-master-controllers" + retries: 3 + delay: 5 + register: result + until: result.rc == 0 when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' - name: Verify API Server diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index 668a3f7e7..cec295d65 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -36,6 +36,14 @@ - openshift_logging_label_key != "" - openshift_logging_label_value is defined +- name: Annotate Logging Project to allow overcommit + oc_edit: + kind: ns + name: "{{ openshift_logging_namespace }}" + separator: '#' + content: + metadata#annotations#quota.openshift.io/cluster-resource-override-enabled: "false" + - name: Create logging cert directory file: path: "{{ openshift.common.config_base }}/logging" @@ -70,7 +78,7 @@ generated_certs_dir: "{{openshift.common.config_base}}/logging" openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}" openshift_logging_elasticsearch_deployment_name: "{{ outer_item.0.name }}" - openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix ~ '-' ~ outer_item.2 if outer_item.1 is none else outer_item.1 }}" + openshift_logging_elasticsearch_pvc_name: "{{ outer_item.0.volumes['elasticsearch-storage'].persistentVolumeClaim.claimName if outer_item.0.volumes['elasticsearch-storage'].persistentVolumeClaim is defined else openshift_logging_es_pvc_prefix ~ '-' ~ outer_item.2 if outer_item.1 is none else outer_item.1 }}" openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}" openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}" @@ -128,7 +136,7 @@ generated_certs_dir: "{{openshift.common.config_base}}/logging" openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}" openshift_logging_elasticsearch_deployment_name: "{{ outer_item.0.name }}" - openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_ops_pvc_prefix ~ '-' ~ outer_item.2 if outer_item.1 is none else outer_item.1 }}" + openshift_logging_elasticsearch_pvc_name: "{{ outer_item.0.volumes['elasticsearch-storage'].persistentVolumeClaim.claimName if outer_item.0.volumes['elasticsearch-storage'].persistentVolumeClaim is defined else openshift_logging_es_ops_pvc_prefix ~ '-' ~ outer_item.2 if outer_item.1 is none else outer_item.1 }}" openshift_logging_elasticsearch_ops_deployment: true openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}" diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml index b4ddf45d9..fcaf18ed4 100644 --- a/roles/openshift_logging_curator/tasks/main.yaml +++ b/roles/openshift_logging_curator/tasks/main.yaml @@ -90,7 +90,7 @@ es_host: "{{ openshift_logging_curator_es_host }}" es_port: "{{ openshift_logging_curator_es_port }}" curator_cpu_limit: "{{ openshift_logging_curator_cpu_limit }}" - curator_cpu_request: "{{ openshift_logging_curator_cpu_request }}" + curator_cpu_request: "{{ openshift_logging_curator_cpu_request | min_cpu(openshift_logging_curator_cpu_limit | default(none)) }}" curator_memory_limit: "{{ openshift_logging_curator_memory_limit }}" curator_replicas: "{{ openshift_logging_curator_replicas | default (1) }}" curator_node_selector: "{{openshift_logging_curator_nodeselector | default({})}}" diff --git a/roles/openshift_logging_elasticsearch/files/es_migration.sh b/roles/openshift_logging_elasticsearch/files/es_migration.sh deleted file mode 100644 index 339b5a1b2..000000000 --- a/roles/openshift_logging_elasticsearch/files/es_migration.sh +++ /dev/null @@ -1,79 +0,0 @@ -CA=${1:-/etc/openshift/logging/ca.crt} -KEY=${2:-/etc/openshift/logging/system.admin.key} -CERT=${3:-/etc/openshift/logging/system.admin.crt} -openshift_logging_es_host=${4:-logging-es} -openshift_logging_es_port=${5:-9200} -namespace=${6:-logging} - -# for each index in _cat/indices -# skip indices that begin with . - .kibana, .operations, etc. -# skip indices that contain a uuid -# get a list of unique project -# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices -# we are interested in - the awk will strip that part off -function get_list_of_indices() { - curl -s --cacert $CA --key $KEY --cert $CERT https://$openshift_logging_es_host:$openshift_logging_es_port/_cat/indices | \ - awk -v daterx='[.]20[0-9]{2}[.][0-1]?[0-9][.][0-9]{1,2}$' \ - '$3 !~ "^[.]" && $3 !~ "^[^.]+[.][^.]+"daterx && $3 !~ "^project." && $3 ~ daterx {print gensub(daterx, "", "", $3)}' | \ - sort -u -} - -# for each index in _cat/indices -# skip indices that begin with . - .kibana, .operations, etc. -# get a list of unique project.uuid -# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices -# we are interested in - the awk will strip that part off -function get_list_of_proj_uuid_indices() { - curl -s --cacert $CA --key $KEY --cert $CERT https://$openshift_logging_es_host:$openshift_logging_es_port/_cat/indices | \ - awk -v daterx='[.]20[0-9]{2}[.][0-1]?[0-9][.][0-9]{1,2}$' \ - '$3 !~ "^[.]" && $3 ~ "^[^.]+[.][^.]+"daterx && $3 !~ "^project." && $3 ~ daterx {print gensub(daterx, "", "", $3)}' | \ - sort -u -} - -if [[ -z "$(oc get pods -l component=es -o jsonpath='{.items[?(@.status.phase == "Running")].metadata.name}')" ]]; then - echo "No Elasticsearch pods found running. Cannot update common data model." - exit 1 -fi - -count=$(get_list_of_indices | wc -l) -if [ $count -eq 0 ]; then - echo No matching indices found - skipping update_for_uuid -else - echo Creating aliases for $count index patterns . . . - { - echo '{"actions":[' - get_list_of_indices | \ - while IFS=. read proj ; do - # e.g. make test.uuid.* an alias of test.* so we can search for - # /test.uuid.*/_search and get both the test.uuid.* and - # the test.* indices - uid=$(oc get project "$proj" -o jsonpath='{.metadata.uid}' 2>/dev/null) - [ -n "$uid" ] && echo "{\"add\":{\"index\":\"$proj.*\",\"alias\":\"$proj.$uuid.*\"}}" - done - echo ']}' - } | curl -s --cacert $CA --key $KEY --cert $CERT -XPOST -d @- "https://$openshift_logging_es_host:$openshift_logging_es_port/_aliases" -fi - -count=$(get_list_of_proj_uuid_indices | wc -l) -if [ $count -eq 0 ] ; then - echo No matching indexes found - skipping update_for_common_data_model - exit 0 -fi - -echo Creating aliases for $count index patterns . . . -# for each index in _cat/indices -# skip indices that begin with . - .kibana, .operations, etc. -# get a list of unique project.uuid -# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices -# we are interested in - the awk will strip that part off -{ - echo '{"actions":[' - get_list_of_proj_uuid_indices | \ - while IFS=. read proj uuid ; do - # e.g. make project.test.uuid.* and alias of test.uuid.* so we can search for - # /project.test.uuid.*/_search and get both the test.uuid.* and - # the project.test.uuid.* indices - echo "{\"add\":{\"index\":\"$proj.$uuid.*\",\"alias\":\"${PROJ_PREFIX}$proj.$uuid.*\"}}" - done - echo ']}' -} | curl -s --cacert $CA --key $KEY --cert $CERT -XPOST -d @- "https://$openshift_logging_es_host:$openshift_logging_es_port/_aliases" diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml index 7aabdc861..e7ef443bd 100644 --- a/roles/openshift_logging_elasticsearch/tasks/main.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml @@ -354,7 +354,7 @@ image: "{{ openshift_logging_elasticsearch_image_prefix }}logging-elasticsearch:{{ openshift_logging_elasticsearch_image_version }}" proxy_image: "{{ openshift_logging_elasticsearch_proxy_image_prefix }}oauth-proxy:{{ openshift_logging_elasticsearch_proxy_image_version }}" es_cpu_limit: "{{ openshift_logging_elasticsearch_cpu_limit | default('') }}" - es_cpu_request: "{{ openshift_logging_elasticsearch_cpu_request }}" + es_cpu_request: "{{ openshift_logging_elasticsearch_cpu_request | min_cpu(openshift_logging_elasticsearch_cpu_limit | default(none)) }}" es_memory_limit: "{{ openshift_logging_elasticsearch_memory_limit }}" es_node_selector: "{{ openshift_logging_elasticsearch_nodeselector | default({}) }}" es_storage_groups: "{{ openshift_logging_elasticsearch_storage_group | default([]) }}" diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml index f56810610..2f89c3f9f 100644 --- a/roles/openshift_logging_fluentd/tasks/main.yaml +++ b/roles/openshift_logging_fluentd/tasks/main.yaml @@ -154,7 +154,6 @@ path: "{{ generated_certs_dir }}/system.logging.fluentd.crt" # create Fluentd daemonset - # this should change based on the type of fluentd deployment to be done... # TODO: pass in aggregation configurations - name: Generate logging-fluentd daemonset definition @@ -173,7 +172,7 @@ fluentd_nodeselector_key: "{{ openshift_logging_fluentd_nodeselector.keys()[0] }}" fluentd_nodeselector_value: "{{ openshift_logging_fluentd_nodeselector.values()[0] }}" fluentd_cpu_limit: "{{ openshift_logging_fluentd_cpu_limit }}" - fluentd_cpu_request: "{{ openshift_logging_fluentd_cpu_request }}" + fluentd_cpu_request: "{{ openshift_logging_fluentd_cpu_request | min_cpu(openshift_logging_fluentd_cpu_limit | default(none)) }}" fluentd_memory_limit: "{{ openshift_logging_fluentd_memory_limit }}" audit_container_engine: "{{ openshift_logging_fluentd_audit_container_engine | default(False) | bool }}" audit_log_file: "{{ openshift_logging_fluentd_audit_file | default() }}" diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml index 809f7a631..8ef8ede9a 100644 --- a/roles/openshift_logging_kibana/tasks/main.yaml +++ b/roles/openshift_logging_kibana/tasks/main.yaml @@ -230,10 +230,10 @@ es_host: "{{ openshift_logging_kibana_es_host }}" es_port: "{{ openshift_logging_kibana_es_port }}" kibana_cpu_limit: "{{ openshift_logging_kibana_cpu_limit }}" - kibana_cpu_request: "{{ openshift_logging_kibana_cpu_request }}" + kibana_cpu_request: "{{ openshift_logging_kibana_cpu_request | min_cpu(openshift_logging_kibana_cpu_limit | default(none)) }}" kibana_memory_limit: "{{ openshift_logging_kibana_memory_limit }}" kibana_proxy_cpu_limit: "{{ openshift_logging_kibana_proxy_cpu_limit }}" - kibana_proxy_cpu_request: "{{ openshift_logging_kibana_proxy_cpu_request }}" + kibana_proxy_cpu_request: "{{ openshift_logging_kibana_proxy_cpu_request | min_cpu(openshift_logging_kibana_proxy_cpu_limit | default(none)) }}" kibana_proxy_memory_limit: "{{ openshift_logging_kibana_proxy_memory_limit }}" kibana_replicas: "{{ openshift_logging_kibana_replicas | default (1) }}" kibana_node_selector: "{{ openshift_logging_kibana_nodeselector | default({}) }}" diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml index 1b46a7ac3..5b257139e 100644 --- a/roles/openshift_logging_mux/tasks/main.yaml +++ b/roles/openshift_logging_mux/tasks/main.yaml @@ -171,7 +171,7 @@ ops_host: "{{ openshift_logging_mux_ops_host }}" ops_port: "{{ openshift_logging_mux_ops_port }}" mux_cpu_limit: "{{ openshift_logging_mux_cpu_limit }}" - mux_cpu_request: "{{ openshift_logging_mux_cpu_request }}" + mux_cpu_request: "{{ openshift_logging_mux_cpu_request | min_cpu(openshift_logging_mux_cpu_limit | default(none)) }}" mux_memory_limit: "{{ openshift_logging_mux_memory_limit }}" mux_replicas: "{{ openshift_logging_mux_replicas | default(1) }}" mux_node_selector: "{{ openshift_logging_mux_nodeselector | default({}) }}" diff --git a/roles/openshift_management/README.md b/roles/openshift_management/README.md index 3a71d9211..96de82669 100644 --- a/roles/openshift_management/README.md +++ b/roles/openshift_management/README.md @@ -38,6 +38,10 @@ deployment type (`openshift_deployment_type`): * [Cloud Provider](#cloud-provider) * [Preconfigured (Expert Configuration Only)](#preconfigured-expert-configuration-only) * [Customization](#customization) + * [Container Provider](#container-provider) + * [Manually](#manually) + * [Automatically](#automatically) + * [Multiple Providers](#multiple-providers) * [Uninstall](#uninstall) * [Additional Information](#additional-information) @@ -80,30 +84,20 @@ to there being no databases that require pods. *Be extra careful* if you are overriding template parameters. Including parameters not defined in a template **will -cause errors**. - -**Container Provider Integration** - If you want add your container -platform (OCP/Origin) as a *Container Provider* in CFME/MIQ then you -must ensure that the infrastructure management hooks are installed. - -* During your OCP/Origin install, ensure that you have the - `openshift_use_manageiq` parameter set to `true` in your inventory - at install time. This will create a `management-infra` project and a - service account user. -* After CFME/MIQ is installed, obtain the `management-admin` service - account token and copy it somewhere safe. - -```bash -$ oc serviceaccounts get-token -n management-infra management-admin -eyJhuGdiOiJSUzI1NiIsInR5dCI6IkpXVCJ9.eyJpd9MiOiJrbWJldm5lbGVzL9NldnZpY2VhY2NvbW50Iiwiy9ViZXJuZXRldy5puy9zZXJ2yWNlYWNju9VubC9uYW1ld9BhY2UiOiJtYW5hZ2VtZW50LWluZnJhIiwiy9ViZXJuZXRldy5puy9zZXJ2yWNlYWNju9VubC9zZWNyZXQuumFtZSI6Im1humFnZW1lunQtYWRtyW4tbG9rZW4tdDBnOTAiLCJrbWJldm5lbGVzLmlvL9NldnZpY2VhY2NvbW50L9NldnZpY2UtYWNju9VubC5uYW1lIjoiuWFuYWbluWVubC1hZG1puiIsImt1YmVyumV0ZXMuyW8vd2VybmljZWFjY291unQvd2VybmljZS1hY2NvbW50LnVpZCI6IjRiZDM2MWQ1LWE1NDAtMTFlNy04YzI5LTUyNTQwMDliMmNkZCIsInN1YiI6InN5d9RluTpzZXJ2yWNlYWNju9VubDptYW5hZ2VtZW50LWluZnJhOm1humFnZW1lunQtYWRtyW4ifQ.B6sZLGD9O4vBu9MHwiG-C_4iEwjBXb7Af8BPw-LNlujDmHhOnQ-Oo4QxQKyj9edynfmDy2yutUyJ2Mm9HfDGWg4C9xhWImHoq6Nl7T5_9djkeGKkK7Ejvg4fA-IkrzEsZeQuluBvXnE6wvP0LCjUo_dx4pPyZJyp46teV9NqKQeDzeysjlMCyqp6AK6-Lj8ILG8YA6d_97HlzL_EgFBLAu0lBSn-uC_9J0gLysqBtK6TI0nExfhv9Bm1_5bdHEbKHPW7xIlYlI9AgmyTyhsQ6SoQWtL2khBjkG9TlPBq9wYJj9bzqgVZlqEfICZxgtXO7sYyuoje4y8lo0YQ0kZmig -``` +cause errors**. If you do receive an error during the `Ensure the CFME +App is created` task, we recommend running the +[uninstall scripts](#uninstall) first before running the installer +again. -* In the CFME/MIQ web interface, navigate to `Compute` → - `Containers` → `Providers` and select `⚙ Configuration` → `⊕ - Add a new Containers Provider` +### Beta -*See the [upstream documentation](http://manageiq.org/docs/reference/latest/doc-Managing_Providers/miq/index.html#containers-providers) for additional information.* +Only required for enterprise +(`openshift_deployment_type=openshift-enterprise`) users: +* `openshift_management_install_beta` - by setting this value to + `true` you acknowledge that this software is currently in BETA and + support may be limited nonexistent. This is required to begin the + installation. # Requirements @@ -140,11 +134,14 @@ used in your Ansible inventory to control the behavior of this installer. -| Variable | Required | Default | Description | -|------------------------------------------------|:--------:|:------------------------------:|-------------------------------------| -| `openshift_management_project` | **No** | `openshift-management` | Namespace for the installation. | +| Variable | Required | Default | Description | +|------------------------------------------------------|:--------:|:------------------------------:|-------------------------------------| +| `openshift_management_project` | **No** | `openshift-management` | Namespace for the installation. | | `openshift_management_project_description` | **No** | *CloudForms Management Engine* | Namespace/project description. | -| `openshift_management_install_management` | **No** | `false` | Boolean, set to `true` to install the application | +| `openshift_management_install_management` | **No** | `false` | Boolean, set to `true` to install the application | +| `openshift_management_install_beta` | **No** | `false` | Boolean, by setting this value to `true` you acknowledge that this software is currently in BETA and support may be limited. Only required for *openshift-enterprise* users. | +| `openshift_management_username` | **No** | `admin` | Default management username. Changing this values **does not change the username**. Only change this value if you have changed the name already and are running integration scripts (such as the [add container provider](#container-provider) script) | +| `openshift_management_password` | **No** | `smartvm` | Default management password. Changing this values **does not change the password**. Only change this value if you have changed the password already and are running integration scripts (such as the [add-container-provider](#container-provider) script) | | **PRODUCT CHOICE** | | | | | | `openshift_management_app_template` | **No** | `miq-template` | The project flavor to install. Choices: <ul><li>`miq-template`: ManageIQ using a podified database</li> <li> `miq-template-ext-db`: ManageIQ using an external database</li> <li>`cfme-template`: CloudForms using a podified database<sup>[1]</sup></li> <li> `cfme-template-ext-db`: CloudForms using an external database.<sup>[1]</sup></li></ul> | | **STORAGE CLASSES** | | | | | @@ -268,6 +265,9 @@ openshift_management_app_template=cfme-template-ext-db openshift_management_template_parameters={'DATABASE_USER': 'root', 'DATABASE_PASSWORD': 'r1ck&M0r7y', 'DATABASE_IP': '10.10.10.10', 'DATABASE_PORT': '5432', 'DATABASE_NAME': 'cfme'} ``` +**NOTE:** Ensure your are running PostgreSQL 9.5 or you may not be +able to deploy the app successfully. + # Limitations This release is the first OpenShift CFME release in the OCP 3.7 @@ -318,7 +318,10 @@ inventory. The following keys are required: * `DATABASE_PORT` - *note: Most PostgreSQL servers run on port `5432`* * `DATABASE_NAME` -Your inventory would contain a line similar to this: +**NOTE:** Ensure your are running PostgreSQL 9.5 or you may not be +able to deploy the app successfully. + +Your inventory would contain lines similar to this: ```ini [OSEv3:vars] @@ -336,7 +339,11 @@ At run time you may run into errors similar to this: TASK [openshift_management : Ensure the CFME App is created] *********************************** task path: /home/tbielawa/rhat/os/openshift-ansible/roles/openshift_management/tasks/main.yml:74 Tuesday 03 October 2017 15:30:44 -0400 (0:00:00.056) 0:00:12.278 ******* -{"cmd": "/usr/bin/oc create -f /tmp/postgresql-ZPEWQS -n openshift-management", "kind": "Endpoints", "results": {}, "returncode": 1, "stderr": "Error from server (BadRequest): error when creating \"/tmp/postgresql-ZPEWQS\": Endpoints in version \"v1\" cannot be handled as a Endpoints: [pos 218]: json: decNum: got first char 'f'\n", "stdout": ""} +{"cmd": "/usr/bin/oc create -f /tmp/postgresql-ZPEWQS -n openshift-management", + "kind": "Endpoints", "results": {}, "returncode": 1, "stderr": "Error from server + (BadRequest): error when creating \"/tmp/postgresql-ZPEWQS\": Endpoints in version + \"v1\" cannot be handled as a Endpoints: [pos 218]: json: decNum: got first char + 'f'\n", "stdout": ""} ``` Or like this: @@ -346,7 +353,10 @@ TASK [openshift_management : Ensure the CFME App is created] ******************* task path: /home/tbielawa/rhat/os/openshift-ansible/roles/openshift_management/tasks/main.yml:74 Tuesday 03 October 2017 16:05:36 -0400 (0:00:00.052) 0:00:18.948 ******* fatal: [m01.example.com]: FAILED! => {"changed": true, "failed": true, "msg": -{"cmd": "/usr/bin/oc create -f /tmp/postgresql-igS5sx -n openshift-management", "kind": "Endpoints", "results": {}, "returncode": 1, "stderr": "The Endpoints \"postgresql\" is invalid: subsets[0].addresses[0].ip: Invalid value: \"doo\": must be a valid IP address, (e.g. 10.9.8.7)\n", "stdout": ""}, +{"cmd": "/usr/bin/oc create -f /tmp/postgresql-igS5sx -n openshift-management", "kind": + "Endpoints", "results": {}, "returncode": 1, "stderr": "The Endpoints \"postgresql\" + is invalid: subsets[0].addresses[0].ip: Invalid value: \"doo\": must be a valid IP + address, (e.g. 10.9.8.7)\n", "stdout": ""}, ``` While intimidating at first, there are useful bits of information in @@ -453,6 +463,116 @@ hash. This applies to **CloudForms** installations as well: [cfme-template.yaml](files/templates/cloudforms/cfme-template.yaml), [cfme-template-ext-db.yaml](files/templates/cloudforms/cfme-template-ext-db.yaml). +# Container Provider + +There are two methods for enabling container provider integration. You +can manually add OCP/Origin as a container provider, or you can try +the playbooks included with this role. + +## Manually + +See the online documentation for steps to manually add you cluster as +a container provider: + +* [Container Providers](http://manageiq.org/docs/reference/latest/doc-Managing_Providers/miq/#containers-providers) + +## Automatically + +Automated container provider integration can be accomplished using the +playbooks included with this role. + +This playbook will: + +1. Gather the necessary authentication secrets +1. Find the public routes to the Management app and the cluster API +1. Make a REST call to add this cluster as a container provider + + +``` +$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/byo/openshift-management/add_container_provider.yml +``` + +## Multiple Providers + +As well as providing playbooks to integrate your *current* container +platform into the management service, this role includes a **tech +preview** script which allows you to add multiple container platforms +as container providers in any arbitrary MIQ/CFME server. + +Using the multiple-provider script requires manual configuration and +setting an `EXTRA_VARS` parameter on the command-line. + + +1. Copy the + [container_providers.yml](files/examples/container_providers.yml) + example somewhere, such as `/tmp/cp.yml` +1. If you changed your CFME/MIQ name or password, update the + `hostname`, `user`, and `password` parameters in the + `management_server` key in the `container_providers.yml` file copy +1. Fill in an entry under the `container_providers` key for *each* OCP + or Origin cluster you want to add as container providers + +**Parameters Which MUST Be Configured:** + +* `auth_key` - This is the token of a service account which has admin capabilities on the cluster. +* `hostname` - This is the hostname that points to the cluster API. Each container provider must have a unique hostname. +* `name` - This is the name of the cluster as displayed in the management server container providers overview. This must be unique. + +*Note*: You can obtain the `auth_key` bearer token from your clusters + with this command: `oc serviceaccounts get-token -n management-infra + management-admin` + +**Parameters Which MAY Be Configured:** + +* `port` - Update this key if your OCP/Origin cluster runs the API on a port other than `8443` +* `endpoint` - You may enable SSL verification (`verify_ssl`) or change the validation setting to `ssl-with-validation`. Support for custom trusted CA certificates is not available at this time. + + +Let's see an example describing the following scenario: + +* You copied `files/examples/container_providers.yml` to `/tmp/cp.yml` +* You're adding two OCP clusters +* Your management server runs on `mgmt.example.com` + +You would customize `/tmp/cp.yml` as such: + +```yaml +--- +container_providers: + - connection_configurations: + - authentication: {auth_key: "management-token-for-this-cluster", authtype: bearer, type: AuthToken} + endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0} + hostname: "ocp-prod.example.com" + name: OCP Production + port: 8443 + type: "ManageIQ::Providers::Openshift::ContainerManager" + - connection_configurations: + - authentication: {auth_key: "management-token-for-this-cluster", authtype: bearer, type: AuthToken} + endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0} + hostname: "ocp-test.example.com" + name: OCP Testing + port: 8443 + type: "ManageIQ::Providers::Openshift::ContainerManager" +management_server: + hostname: "mgmt.example.com" + user: admin + password: b3tt3r_p4SSw0rd +``` + +Then you will run the many-container-providers integration script. You +**must** provide the path to the container providers configuration +file as an `EXTRA_VARS` parameter to `ansible-playbook`. Use the `-e` +(or `--extra-vars`) parameter to set `container_providers_config` to +the config file path. + +``` +$ ansible-playbook -v -e container_providers_config=/tmp/cp.yml \ + playbooks/byo/openshift-management/add_many_container_providers.yml +``` + +Afterwards you will find two new container providers in your +management service. Navigate to `Compute` → `Containers` → `Providers` +to see an overview. # Uninstall @@ -461,6 +581,40 @@ installation: * `playbooks/byo/openshift-management/uninstall.yml` +NFS export definitions and data stored on NFS exports are not +automatically removed. You are urged to manually erase any data from +old application or database deployments before attempting to +initialize a new deployment. + +Failure to erase old PostgreSQL data can result in cascading +errors. The postgres pod may enter a `crashloopbackoff` state. This +will block the management pod from ever starting. The cause of the +`crashloopbackoff` is due to incorrect file permissions on the +database NFS export created during a previous deployment. + +To continue, erase all data from the postgres export and delete the +pod (**not** the deployer pod). For example, if you have pods like +such: + +``` +# oc get pods +NAME READY STATUS RESTARTS AGE +httpd-1-cx7fk 1/1 Running 1 21h +manageiq-0 0/1 Running 1 21h +memcached-1-vkc7p 1/1 Running 1 21h +postgresql-1-deploy 1/1 Running 1 21h +postgresql-1-6w2t4 0/1 CrashLoopBackOff 1 21h +``` + +Then you would: + +1. Erase the data from the database NFS export +2. `oc delete postgresql-1-6w2t4` + +The postgres deployer pod will try to scale up a new postgres pod to +replace the one you deleted. Once the postgres pod is running the +manageiq pod will stop blocking and begin application initialization. + # Additional Information The upstream project, diff --git a/roles/openshift_management/defaults/main.yml b/roles/openshift_management/defaults/main.yml index ebb56313f..8ba65b386 100644 --- a/roles/openshift_management/defaults/main.yml +++ b/roles/openshift_management/defaults/main.yml @@ -77,6 +77,20 @@ openshift_management_storage_nfs_base_dir: /exports openshift_management_storage_nfs_local_hostname: false ###################################################################### +# DEFAULT ACCOUNT INFORMATION +###################################################################### +# These are the default values for the username and password of the +# management app. Changing these values in your inventory will not +# change your username or password. You should only need to change +# these values in your inventory if you already changed the actual +# name and password AND are trying to use integration scripts. +# +# For example, adding this cluster as a container provider, +# playbooks/byo/openshift-management/add_container_provider.yml +openshift_management_username: admin +openshift_management_password: smartvm + +###################################################################### # SCAFFOLDING - These are parameters we pre-seed that a user may or # may not set later ###################################################################### diff --git a/roles/openshift_management/files/examples/container_providers.yml b/roles/openshift_management/files/examples/container_providers.yml new file mode 100644 index 000000000..661f62e4d --- /dev/null +++ b/roles/openshift_management/files/examples/container_providers.yml @@ -0,0 +1,22 @@ +--- +container_providers: + - connection_configurations: + - authentication: {auth_key: "management-admin-token-here", authtype: bearer, type: AuthToken} + endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0} + hostname: "OCP/Origin cluster hostname (providing API access)" + name: openshift-management + port: 8443 + type: "ManageIQ::Providers::Openshift::ContainerManager" +# Copy and update for as many OCP or Origin providers as you want to +# add to your management service + # - connection_configurations: + # - authentication: {auth_key: "management-admin-token-here", authtype: bearer, type: AuthToken} + # endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0} + # hostname: "OCP/Origin cluster hostname (providing API access)" + # name: openshift-management + # port: 8443 + # type: "ManageIQ::Providers::Openshift::ContainerManager" +management_server: + hostname: "Management server hostname (providing API access)" + user: admin + password: smartvm diff --git a/roles/openshift_management/filter_plugins/oo_management_filters.py b/roles/openshift_management/filter_plugins/oo_management_filters.py new file mode 100644 index 000000000..3b7013d9a --- /dev/null +++ b/roles/openshift_management/filter_plugins/oo_management_filters.py @@ -0,0 +1,32 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +""" +Filter methods for the management role +""" + + +def oo_filter_container_providers(results): + """results - the result from posting the API calls for adding new +providers""" + all_results = [] + for result in results: + if 'results' in result['json']: + # We got an OK response + res = result['json']['results'][0] + all_results.append("Provider '{}' - Added successfully".format(res['name'])) + elif 'error' in result['json']: + # This was a problem + all_results.append("Provider '{}' - Failed to add. Message: {}".format( + result['item']['name'], result['json']['error']['message'])) + return all_results + + +class FilterModule(object): + """ Custom ansible filter mapping """ + + # pylint: disable=no-self-use, too-few-public-methods + def filters(self): + """ returns a mapping of filters to methods """ + return { + "oo_filter_container_providers": oo_filter_container_providers, + } diff --git a/roles/openshift_management/tasks/add_container_provider.yml b/roles/openshift_management/tasks/add_container_provider.yml new file mode 100644 index 000000000..50a5252cc --- /dev/null +++ b/roles/openshift_management/tasks/add_container_provider.yml @@ -0,0 +1,77 @@ +--- +- name: Ensure lib_openshift modules are available + include_role: + role: lib_openshift + +- name: Ensure OpenShift facts module is available + include_role: + role: openshift_facts + +- name: Ensure OpenShift facts are loaded + openshift_facts: + +- name: Ensure we use openshift_master_cluster_public_hostname if it is available + set_fact: + l_cluster_hostname: "{{ openshift.master.cluster_public_hostname }}" + when: + - openshift.master.cluster_public_hostname is defined + +- name: Ensure we default to the first master if openshift_master_cluster_public_hostname is unavailable + set_fact: + l_cluster_hostname: "{{ openshift.master.cluster_hostname }}" + when: + - l_cluster_hostname is not defined + +- name: Ensure the management SA Secrets are read + oc_serviceaccount_secret: + state: list + service_account: management-admin + namespace: management-infra + register: sa + +- name: Ensure the management SA bearer token is identified + set_fact: + management_token: "{{ sa.results | oo_filter_sa_secrets }}" + +- name: Ensure the SA bearer token value is read + oc_secret: + state: list + name: "{{ management_token }}" + namespace: management-infra + decode: true + no_log: True + register: sa_secret + +- name: Ensure the SA bearer token value is saved + set_fact: + management_bearer_token: "{{ sa_secret.results.decoded.token }}" + +- name: Ensure we have the public route to the management service + oc_route: + state: list + name: httpd + namespace: openshift-management + register: route + +- name: Ensure the management service route is saved + set_fact: + management_route: "{{ route.results.0.spec.host }}" + +- name: Ensure this cluster is a container provider + uri: + url: "https://{{ management_route }}/api/providers" + body_format: json + method: POST + user: "{{ openshift_management_username }}" + password: "{{ openshift_management_password }}" + validate_certs: no + # Docs on formatting the BODY of the POST request: + # http://manageiq.org/docs/reference/latest/api/reference/providers.html#specifying-connection-configurations + body: + connection_configurations: + - authentication: {auth_key: "{{ management_bearer_token }}", authtype: bearer, type: AuthToken} + endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0} + hostname: "{{ l_cluster_hostname }}" + name: "{{ openshift_management_project }}" + port: "{{ openshift.master.api_port }}" + type: "ManageIQ::Providers::Openshift::ContainerManager" diff --git a/roles/openshift_management/tasks/main.yml b/roles/openshift_management/tasks/main.yml index 86c4d0010..9be923a57 100644 --- a/roles/openshift_management/tasks/main.yml +++ b/roles/openshift_management/tasks/main.yml @@ -2,23 +2,33 @@ ######################################################################) # Users, projects, and privileges -- name: Run pre-install CFME validation checks +- name: Run pre-install Management validation checks include: validate.yml -- name: "Ensure the CFME '{{ openshift_management_project }}' namespace exists" +# This creates a service account allowing Container Provider +# integration (managing OCP/Origin via MIQ/Management) +- name: Enable Container Provider Integration + include_role: + role: openshift_manageiq + +- name: "Ensure the Management '{{ openshift_management_project }}' namespace exists" oc_project: state: present name: "{{ openshift_management_project }}" display_name: "{{ openshift_management_project_description }}" -- name: Create and Authorize CFME Accounts +- name: Create and Authorize Management Accounts include: accounts.yml ###################################################################### # STORAGE - Initialize basic storage class +- name: Determine the correct NFS host if required + include: storage/nfs_server.yml + when: openshift_management_storage_class in ['nfs', 'nfs_external'] + #--------------------------------------------------------------------- # * nfs - set up NFS shares on the first master for a proof of concept -- name: Create required NFS exports for CFME app storage +- name: Create required NFS exports for Management app storage include: storage/nfs.yml when: openshift_management_storage_class == 'nfs' @@ -45,7 +55,7 @@ ###################################################################### # APPLICATION TEMPLATE -- name: Install the CFME app and PV templates +- name: Install the Management app and PV templates include: template.yml ###################################################################### @@ -71,9 +81,16 @@ when: - openshift_management_app_template in ['miq-template', 'cfme-template'] -- name: Ensure the CFME App is created +- name: Ensure the Management App is created oc_process: namespace: "{{ openshift_management_project }}" template_name: "{{ openshift_management_template_name }}" create: True params: "{{ openshift_management_template_parameters }}" + +- name: Wait for the app to come up. May take several minutes, 30s check intervals, 10m max + command: "oc logs {{ openshift_management_flavor }}-0 -n {{ openshift_management_project }}" + register: app_seeding_logs + until: app_seeding_logs.stdout.find('Server starting complete') != -1 + delay: 30 + retries: 20 diff --git a/roles/openshift_management/tasks/noop.yml b/roles/openshift_management/tasks/noop.yml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/roles/openshift_management/tasks/noop.yml @@ -0,0 +1 @@ +--- diff --git a/roles/openshift_management/tasks/storage/create_nfs_pvs.yml b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml index 31c845725..d1b9a8d5c 100644 --- a/roles/openshift_management/tasks/storage/create_nfs_pvs.yml +++ b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml @@ -26,7 +26,7 @@ when: - openshift_management_template_parameters.DATABASE_VOLUME_CAPACITY is not defined -- name: Check if the CFME App PV has been created +- name: Check if the Management App PV has been created oc_obj: namespace: "{{ openshift_management_project }}" state: list @@ -34,7 +34,7 @@ name: "{{ openshift_management_flavor_short }}-app" register: miq_app_pv_check -- name: Check if the CFME DB PV has been created +- name: Check if the Management DB PV has been created oc_obj: namespace: "{{ openshift_management_project }}" state: list @@ -44,7 +44,7 @@ when: - openshift_management_app_template in ['miq-template', 'cfme-template'] -- name: Ensure the CFME App PV is created +- name: Ensure the Management App PV is created oc_process: namespace: "{{ openshift_management_project }}" template_name: "{{ openshift_management_flavor }}-app-pv" @@ -55,7 +55,7 @@ NFS_HOST: "{{ openshift_management_nfs_server }}" when: miq_app_pv_check.results.results == [{}] -- name: Ensure the CFME DB PV is created +- name: Ensure the Management DB PV is created oc_process: namespace: "{{ openshift_management_project }}" template_name: "{{ openshift_management_flavor }}-db-pv" diff --git a/roles/openshift_management/tasks/storage/nfs.yml b/roles/openshift_management/tasks/storage/nfs.yml index 696808328..94e11137c 100644 --- a/roles/openshift_management/tasks/storage/nfs.yml +++ b/roles/openshift_management/tasks/storage/nfs.yml @@ -2,37 +2,6 @@ # Tasks to statically provision NFS volumes # Include if not using dynamic volume provisioning -- name: Ensure we save the local NFS server if one is provided - set_fact: - openshift_management_nfs_server: "{{ openshift_management_storage_nfs_local_hostname }}" - when: - - openshift_management_storage_nfs_local_hostname is defined - - openshift_management_storage_nfs_local_hostname != False - - openshift_management_storage_class == "nfs" - -- name: Ensure we save the local NFS server - set_fact: - openshift_management_nfs_server: "{{ groups['oo_nfs_to_config'].0 }}" - when: - - openshift_management_nfs_server is not defined - - openshift_management_storage_class == "nfs" - -- name: Ensure we save the external NFS server - set_fact: - openshift_management_nfs_server: "{{ openshift_management_storage_nfs_external_hostname }}" - when: - - openshift_management_storage_class == "nfs_external" - -- name: Failed NFS server detection - assert: - that: - - openshift_management_nfs_server is defined - msg: | - "Unable to detect an NFS server. The 'nfs_external' - openshift_management_storage_class option requires that you set - openshift_management_storage_nfs_external_hostname. NFS hosts detected - for local nfs services: {{ groups['oo_nfs_to_config'] | join(', ') }}" - - name: Setting up NFS storage block: - name: Include the NFS Setup role tasks diff --git a/roles/openshift_management/tasks/storage/nfs_server.yml b/roles/openshift_management/tasks/storage/nfs_server.yml new file mode 100644 index 000000000..a1b618137 --- /dev/null +++ b/roles/openshift_management/tasks/storage/nfs_server.yml @@ -0,0 +1,45 @@ +--- +- name: Ensure we save the local NFS server if one is provided + set_fact: + openshift_management_nfs_server: "{{ openshift_management_storage_nfs_local_hostname }}" + when: + - openshift_management_storage_nfs_local_hostname is defined + - openshift_management_storage_nfs_local_hostname != False + - openshift_management_storage_class == "nfs" + +- name: Ensure we save the local NFS server + set_fact: + openshift_management_nfs_server: "{{ groups['oo_nfs_to_config'].0 }}" + when: + - openshift_management_nfs_server is not defined + - openshift_management_storage_class == "nfs" + +- name: Ensure we save the external NFS server + set_fact: + openshift_management_nfs_server: "{{ openshift_management_storage_nfs_external_hostname }}" + when: + - openshift_management_storage_class == "nfs_external" + +- name: Failed External NFS server detection + assert: + that: + - openshift_management_nfs_server is defined + msg: | + Unable to detect an NFS server. The 'nfs_external' + openshift_management_storage_class option requires that you + manually set openshift_management_storage_nfs_external_hostname + parameter. + when: + - openshift_management_storage_class == 'nfs_external' + +- name: Failed Local NFS server detection + assert: + that: + - openshift_management_nfs_server is defined + msg: | + Unable to detect an NFS server. The 'nfs' + openshift_management_storage_class option requires that you have + an 'nfs' inventory group or manually set the + openshift_management_storage_nfs_local_hostname parameter. + when: + - openshift_management_storage_class == 'nfs' diff --git a/roles/openshift_management/tasks/template.yml b/roles/openshift_management/tasks/template.yml index 299158ac4..9f97cdcb9 100644 --- a/roles/openshift_management/tasks/template.yml +++ b/roles/openshift_management/tasks/template.yml @@ -15,7 +15,7 @@ # STANDARD PODIFIED DATABASE TEMPLATE - when: openshift_management_app_template in ['miq-template', 'cfme-template'] block: - - name: Check if the CFME Server template has been created already + - name: Check if the Management Server template has been created already oc_obj: namespace: "{{ openshift_management_project }}" state: list @@ -25,12 +25,12 @@ - when: miq_server_check.results.results == [{}] block: - - name: Copy over CFME Server template + - name: Copy over Management Server template copy: src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-template.yaml" dest: "{{ template_dir }}/" - - name: Ensure CFME Server Template is created + - name: Ensure Management Server Template is created oc_obj: namespace: "{{ openshift_management_project }}" name: "{{ openshift_management_flavor }}" @@ -41,9 +41,9 @@ ###################################################################### # EXTERNAL DATABASE TEMPLATE -- when: openshift_management_app_template in ['miq-template-ext-db', 'cfme-template'] +- when: openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db'] block: - - name: Check if the CFME Ext-DB Server template has been created already + - name: Check if the Management Ext-DB Server template has been created already oc_obj: namespace: "{{ openshift_management_project }}" state: list @@ -53,12 +53,12 @@ - when: miq_ext_db_server_check.results.results == [{}] block: - - name: Copy over CFME Ext-DB Server template + - name: Copy over Management Ext-DB Server template copy: src: "templates/{{ openshift_management_flavor }}/{{openshift_management_flavor_short}}-template-ext-db.yaml" dest: "{{ template_dir }}/" - - name: Ensure CFME Ext-DB Server Template is created + - name: Ensure Management Ext-DB Server Template is created oc_obj: namespace: "{{ openshift_management_project }}" name: "{{ openshift_management_flavor }}-ext-db" @@ -74,7 +74,7 @@ # Begin conditional PV template creations # Required for the application server -- name: Check if the CFME App PV template has been created already +- name: Check if the Management App PV template has been created already oc_obj: namespace: "{{ openshift_management_project }}" state: list @@ -84,12 +84,12 @@ - when: miq_app_pv_check.results.results == [{}] block: - - name: Copy over CFME App PV template + - name: Copy over Management App PV template copy: src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-server-example.yaml" dest: "{{ template_dir }}/" - - name: Ensure CFME App PV Template is created + - name: Ensure Management App PV Template is created oc_obj: namespace: "{{ openshift_management_project }}" name: "{{ openshift_management_flavor }}-app-pv" @@ -103,7 +103,7 @@ # Required for database if the installation is fully podified - when: openshift_management_app_template in ['miq-template', 'cfme-template'] block: - - name: Check if the CFME DB PV template has been created already + - name: Check if the Management DB PV template has been created already oc_obj: namespace: "{{ openshift_management_project }}" state: list @@ -113,12 +113,12 @@ - when: miq_db_pv_check.results.results == [{}] block: - - name: Copy over CFME DB PV template + - name: Copy over Management DB PV template copy: src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-db-example.yaml" dest: "{{ template_dir }}/" - - name: Ensure CFME DB PV Template is created + - name: Ensure Management DB PV Template is created oc_obj: namespace: "{{ openshift_management_project }}" name: "{{ openshift_management_flavor }}-db-pv" diff --git a/roles/openshift_management/tasks/validate.yml b/roles/openshift_management/tasks/validate.yml index 8b20bdc5e..b22f36a4f 100644 --- a/roles/openshift_management/tasks/validate.yml +++ b/roles/openshift_management/tasks/validate.yml @@ -2,12 +2,25 @@ # Validate configuration parameters passed to the openshift_management role ###################################################################### +# BETA ACKNOWLEDGEMENT +- name: Ensure BETA software notice has been acknowledged + assert: + that: + - openshift_management_install_beta | default(false) | bool + msg: | + openshift-management (CFME/MIQ) is currently BETA status. You + must set openshift_management_install_beta to true to + acknowledge that you accept this risk and understand that + support is limited or nonexistent. + when: + - openshift_deployment_type == 'openshift-enterprise' + +###################################################################### # CORE PARAMETERS - name: Ensure openshift_management_app_template is valid assert: that: - openshift_management_app_template in __openshift_management_app_templates - msg: | "openshift_management_app_template must be one of {{ __openshift_management_app_templates | join(', ') }}" diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index 3da861d03..fe78dea66 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -12,7 +12,7 @@ r_openshift_master_clean_install: false r_openshift_master_etcd3_storage: false r_openshift_master_os_firewall_enable: true r_openshift_master_os_firewall_deny: [] -r_openshift_master_os_firewall_allow: +default_r_openshift_master_os_firewall_allow: - service: api server https port: "{{ openshift.master.api_port }}/tcp" - service: api controllers https @@ -24,6 +24,8 @@ r_openshift_master_os_firewall_allow: - service: etcd embedded port: 4001/tcp cond: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" +r_openshift_master_os_firewall_allow: "{{ default_r_openshift_master_os_firewall_allow | union(openshift_master_open_ports | default([])) }}" + # oreg_url is defined by user input oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}" @@ -62,7 +64,7 @@ openshift_master_config_dir_default: "{{ (openshift.common.config_base | default openshift_master_config_dir: "{{ openshift_master_config_dir_default }}" openshift_master_cloud_provider: "{{ openshift_cloudprovider_kind | default('aws') }}" -openshift_master_node_config_networkconfig_mtu: 1450 +openshift_master_node_config_networkconfig_mtu: "{{ openshift_node_sdn_mtu | default(1450) }}" openshift_master_node_config_kubeletargs_cpu: 500m openshift_master_node_config_kubeletargs_mem: 512M @@ -71,7 +73,7 @@ openshift_master_bootstrap_enabled: False openshift_master_client_binary: "{{ openshift.common.client_binary if openshift is defined else 'oc' }}" -openshift_master_config_imageconfig_format: "{{ oreg_url if oreg_url != '' else 'registry.access.redhat.com/openshift3/ose-${component}:${version}' }}" +openshift_master_config_imageconfig_format: "{{ openshift.node.registry_url }}" # these are for the default settings in a generated node-config.yaml openshift_master_node_config_default_edits: @@ -101,7 +103,7 @@ openshift_master_node_config_default_edits: value: - 'true' - key: networkConfig.mtu - value: 8951 + value: "{{ openshift_master_node_config_networkconfig_mtu }}" - key: networkConfig.networkPluginName value: "{{ r_openshift_master_sdn_network_plugin_name }}" - key: networkPluginName diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml index d5094c2c9..f88c4a7dc 100644 --- a/roles/openshift_master/handlers/main.yml +++ b/roles/openshift_master/handlers/main.yml @@ -9,10 +9,13 @@ notify: - Verify API Server +# We retry the controllers because the API may not be 100% initialized yet. - name: restart master controllers - systemd: - name: "{{ openshift.common.service_type }}-master-controllers" - state: restarted + command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" + retries: 3 + delay: 5 + register: result + until: result.rc == 0 when: - not (master_controllers_service_status_changed | default(false) | bool) - openshift.master.cluster_method == 'native' diff --git a/roles/openshift_master/tasks/bootstrap.yml b/roles/openshift_master/tasks/bootstrap.yml index eee89743c..f837a8bae 100644 --- a/roles/openshift_master/tasks/bootstrap.yml +++ b/roles/openshift_master/tasks/bootstrap.yml @@ -1,26 +1,12 @@ --- - -- name: ensure the node-bootstrap service account exists - oc_serviceaccount: - name: node-bootstrapper - namespace: openshift-infra - state: present - run_once: true - -- name: grant node-bootstrapper the correct permissions to bootstrap - oc_adm_policy_user: - namespace: openshift-infra - user: system:serviceaccount:openshift-infra:node-bootstrapper - resource_kind: cluster-role - resource_name: system:node-bootstrapper - state: present - run_once: true - # TODO: create a module for this command. # oc_serviceaccounts_kubeconfig - name: create service account kubeconfig with csr rights command: "oc serviceaccounts create-kubeconfig node-bootstrapper -n openshift-infra" register: kubeconfig_out + until: kubeconfig_out.rc == 0 + retries: 24 + delay: 5 - name: put service account kubeconfig into a file on disk for bootstrap copy: @@ -42,6 +28,7 @@ --node-dir={{ mktempout.stdout }}/ --node=CONFIGMAP --hostnames=test + --dns-ip=0.0.0.0 --certificate-authority={{ openshift_master_config_dir }}/ca.crt --signer-cert={{ openshift_master_config_dir }}/ca.crt --signer-key={{ openshift_master_config_dir }}/ca.key diff --git a/roles/openshift_master/tasks/clean_systemd_units.yml b/roles/openshift_master/tasks/clean_systemd_units.yml deleted file mode 100644 index e641f84d4..000000000 --- a/roles/openshift_master/tasks/clean_systemd_units.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- - -- name: Disable master service - systemd: - name: "{{ openshift.common.service_type }}-master" - state: stopped - enabled: no - masked: yes - ignore_errors: true diff --git a/roles/openshift_master/tasks/journald.yml b/roles/openshift_master/tasks/journald.yml index f79955e95..a16cbe78e 100644 --- a/roles/openshift_master/tasks/journald.yml +++ b/roles/openshift_master/tasks/journald.yml @@ -3,6 +3,11 @@ stat: path=/etc/systemd/journald.conf register: journald_conf_file +- name: Create journald persistence directories + file: + path: /var/log/journal + state: directory + - name: Update journald setup replace: dest: /etc/systemd/journald.conf @@ -16,7 +21,9 @@ # I need to restart journald immediatelly, otherwise it gets into way during # further steps in ansible - name: Restart journald - systemd: - name: systemd-journald - state: restarted + command: "systemctl restart systemd-journald" + retries: 3 + delay: 5 + register: result + until: result.rc == 0 when: journald_update | changed diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml index 63d483760..cde01c49e 100644 --- a/roles/openshift_master/tasks/registry_auth.yml +++ b/roles/openshift_master/tasks/registry_auth.yml @@ -11,6 +11,9 @@ - oreg_auth_user is defined - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool register: master_oreg_auth_credentials_create + retries: 3 + delay: 5 + until: master_oreg_auth_credentials_create.rc == 0 notify: - restart master api - restart master controllers diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml index fcc66044b..8420dfb8c 100644 --- a/roles/openshift_master/tasks/systemd_units.yml +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -14,8 +14,22 @@ - include: registry_auth.yml +- name: Disable the legacy master service if it exists + systemd: + name: "{{ openshift.common.service_type }}-master" + state: stopped + enabled: no + masked: yes + ignore_errors: true + - name: Remove the legacy master service if it exists - include: clean_systemd_units.yml + file: + path: "{{ containerized_svc_dir }}/{{ openshift.common.service_type }}-master.service" + state: absent + ignore_errors: true + when: + - openshift.master.cluster_method == "native" + - not openshift.common.is_master_system_container | bool # This is the image used for both HA and non-HA clusters: - name: Pre-pull master image @@ -44,6 +58,17 @@ - l_create_ha_unit_files | changed # end workaround for missing systemd unit files +- name: enable master services + systemd: + name: "{{ openshift.common.service_type }}-master-{{ item }}" + enabled: yes + with_items: + - api + - controllers + when: + - openshift.master.cluster_method == "native" + - not openshift.common.is_master_system_container | bool + - name: Preserve Master API Proxy Config options command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-api register: l_master_api_proxy diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index 40775571f..c83fc9fbb 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -58,11 +58,12 @@ controllerConfig: {% endif %} controllers: '*' corsAllowedOrigins: + # anchor with start (\A) and end (\z) of the string, make the check case insensitive ((?i)) and escape hostname {% for origin in ['127.0.0.1', 'localhost', openshift.common.ip, openshift.common.public_ip] | union(openshift.common.all_hostnames) | unique %} - - {{ origin }} + - (?i)\A{{ origin | regex_escape() }}\z {% endfor %} {% for custom_origin in openshift.master.custom_cors_origins | default("") %} - - {{ custom_origin }} + - (?i)\A{{ custom_origin | regex_escape() }}\z {% endfor %} {% if 'disabled_features' in openshift.master %} disabledFeatures: {{ openshift.master.disabled_features | to_json }} @@ -179,6 +180,11 @@ masterPublicURL: {{ openshift.master.public_api_url }} networkConfig: clusterNetworkCIDR: {{ openshift.master.sdn_cluster_network_cidr }} hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }} +{% if openshift.common.version_gte_3_7 | bool %} + clusterNetworks: + - cidr: {{ openshift.master.sdn_cluster_network_cidr }} + hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }} +{% endif %} {% if r_openshift_master_use_openshift_sdn or r_openshift_master_use_nuage or r_openshift_master_use_contiv or r_openshift_master_use_kuryr or r_openshift_master_sdn_network_plugin_name == 'cni' %} networkPluginName: {{ r_openshift_master_sdn_network_plugin_name_default }} {% endif %} diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py index a4f410296..97a5179e0 100644 --- a/roles/openshift_master_facts/filter_plugins/openshift_master.py +++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py @@ -326,10 +326,8 @@ class IdentityProviderOauthBase(IdentityProviderBase): self._required += [['clientID', 'client_id'], ['clientSecret', 'client_secret']] def validate(self): - ''' validate this idp instance ''' - if self.challenge: - raise errors.AnsibleFilterError("|failed provider {0} does not " - "allow challenge authentication".format(self.__class__.__name__)) + ''' validate an instance of this idp class ''' + pass class OpenIDIdentityProvider(IdentityProviderOauthBase): @@ -428,6 +426,12 @@ class GoogleIdentityProvider(IdentityProviderOauthBase): IdentityProviderOauthBase.__init__(self, api_version, idp) self._optional += [['hostedDomain', 'hosted_domain']] + def validate(self): + ''' validate this idp instance ''' + if self.challenge: + raise errors.AnsibleFilterError("|failed provider {0} does not " + "allow challenge authentication".format(self.__class__.__name__)) + class GitHubIdentityProvider(IdentityProviderOauthBase): """ GitHubIdentityProvider @@ -446,6 +450,12 @@ class GitHubIdentityProvider(IdentityProviderOauthBase): self._optional += [['organizations'], ['teams']] + def validate(self): + ''' validate this idp instance ''' + if self.challenge: + raise errors.AnsibleFilterError("|failed provider {0} does not " + "allow challenge authentication".format(self.__class__.__name__)) + class FilterModule(object): ''' Custom ansible filters for use by the openshift_master role''' @@ -510,7 +520,7 @@ class FilterModule(object): 'master.kubelet-client.crt', 'master.kubelet-client.key'] if bool(include_ca): - certs += ['ca.crt', 'ca.key', 'ca-bundle.crt'] + certs += ['ca.crt', 'ca.key', 'ca-bundle.crt', 'client-ca-bundle.crt'] if bool(include_keys): certs += ['serviceaccounts.private.key', 'serviceaccounts.public.key'] diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml index 501be148e..cf0be3bef 100644 --- a/roles/openshift_master_facts/tasks/main.yml +++ b/roles/openshift_master_facts/tasks/main.yml @@ -88,7 +88,6 @@ controller_args: "{{ osm_controller_args | default(None) }}" disabled_features: "{{ osm_disabled_features | default(None) }}" master_count: "{{ openshift_master_count | default(None) }}" - controller_lease_ttl: "{{ osm_controller_lease_ttl | default(None) }}" master_image: "{{ osm_image | default(None) }}" admission_plugin_config: "{{openshift_master_admission_plugin_config }}" kube_admission_plugin_config: "{{openshift_master_kube_admission_plugin_config | default(None) }}" # deprecated, merged with admission_plugin_config diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_metrics/handlers/main.yml index ce7688581..88b893448 100644 --- a/roles/openshift_metrics/handlers/main.yml +++ b/roles/openshift_metrics/handlers/main.yml @@ -4,8 +4,13 @@ when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' notify: Verify API Server +# We retry the controllers because the API may not be 100% initialized yet. - name: restart master controllers - systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted + command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" + retries: 3 + delay: 5 + register: result + until: result.rc == 0 when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' - name: Verify API Server diff --git a/roles/openshift_nfs/tasks/create_export.yml b/roles/openshift_nfs/tasks/create_export.yml index 39323904f..b0b888d56 100644 --- a/roles/openshift_nfs/tasks/create_export.yml +++ b/roles/openshift_nfs/tasks/create_export.yml @@ -12,7 +12,7 @@ # l_nfs_export_name: Name of sub-directory of the export # l_nfs_options: Mount Options -- name: Ensure CFME App NFS export directory exists +- name: "Ensure {{ l_nfs_export_name }} NFS export directory exists" file: path: "{{ l_nfs_base_dir }}/{{ l_nfs_export_name }}" state: directory diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index b9f16dfd4..0c6d8db38 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -9,7 +9,7 @@ openshift_service_type: "{{ 'origin' if openshift_deployment_type == 'origin' el openshift_image_tag: '' -openshift_node_ami_prep_packages: +default_r_openshift_node_image_prep_packages: - "{{ openshift_service_type }}-master" - "{{ openshift_service_type }}-node" - "{{ openshift_service_type }}-docker-excluder" @@ -33,7 +33,6 @@ openshift_node_ami_prep_packages: - python-dbus - PyYAML - yum-utils -- cloud-utils-growpart # gluster - glusterfs-fuse # nfs @@ -54,6 +53,7 @@ openshift_node_ami_prep_packages: # - container-selinux # - atomic # +r_openshift_node_image_prep_packages: "{{ default_r_openshift_node_image_prep_packages | union(openshift_node_image_prep_packages | default([])) }}" openshift_node_bootstrap: False @@ -110,5 +110,8 @@ openshift_node_use_kuryr: "{{ openshift_node_use_kuryr_default }}" openshift_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}" openshift_node_data_dir: "{{ openshift_node_data_dir_default }}" +openshift_node_config_dir_default: "/etc/origin/node" +openshift_node_config_dir: "{{ openshift_node_config_dir_default }}" + openshift_node_image_config_latest_default: "{{ openshift_image_config_latest | default(False) }}" openshift_node_image_config_latest: "{{ openshift_node_image_config_latest_default }}" diff --git a/roles/openshift_node/tasks/bootstrap.yml b/roles/openshift_node/tasks/bootstrap.yml index 8c03f6c41..8cf41ab4c 100644 --- a/roles/openshift_node/tasks/bootstrap.yml +++ b/roles/openshift_node/tasks/bootstrap.yml @@ -3,7 +3,7 @@ package: name: "{{ item }}" state: present - with_items: "{{ openshift_node_ami_prep_packages }}" + with_items: "{{ r_openshift_node_image_prep_packages }}" - name: create the directory for node file: @@ -25,11 +25,11 @@ state: "{{ item.state | default('present') }}" with_items: # add the kubeconfig - - line: "KUBECONFIG=/etc/origin/node/csr_kubeconfig" + - line: "KUBECONFIG={{ openshift_node_config_dir }}/bootstrap.kubeconfig" regexp: "^KUBECONFIG=.*" # remove the config file. This comes from openshift_facts - - regexp: "^CONFIG_FILE=.*" - state: absent + - line: "CONFIG_FILE={{ openshift_node_config_dir }}/node-config.yaml" + regexp: "^CONFIG_FILE=.*" - name: include aws sysconfig credentials include: aws.yml @@ -76,7 +76,7 @@ state: link force: yes with_items: - - /var/lib/origin/openshift.local.config/node/node-client-ca.crt + - "{{ openshift_node_config_dir }}/node-client-ca.crt" - when: rpmgenerated_config.stat.exists block: diff --git a/roles/openshift_node/tasks/registry_auth.yml b/roles/openshift_node/tasks/registry_auth.yml index de396fb4b..5e5e4f94a 100644 --- a/roles/openshift_node/tasks/registry_auth.yml +++ b/roles/openshift_node/tasks/registry_auth.yml @@ -11,6 +11,9 @@ - oreg_auth_user is defined - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool register: node_oreg_auth_credentials_create + retries: 3 + delay: 5 + until: node_oreg_auth_credentials_create.rc == 0 notify: - restart node diff --git a/roles/openshift_node_dnsmasq/defaults/main.yml b/roles/openshift_node_dnsmasq/defaults/main.yml index eae832fcf..ebcff46b5 100644 --- a/roles/openshift_node_dnsmasq/defaults/main.yml +++ b/roles/openshift_node_dnsmasq/defaults/main.yml @@ -1,2 +1,7 @@ --- openshift_node_dnsmasq_install_network_manager_hook: true + +# lo must always be present in this list or dnsmasq will conflict with +# the node's dns service. +openshift_node_dnsmasq_except_interfaces: +- lo diff --git a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh index 230f0a28c..f4e48b5b7 100755 --- a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh +++ b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh @@ -54,6 +54,8 @@ domain-needed server=/cluster.local/172.30.0.1 server=/30.172.in-addr.arpa/172.30.0.1 enable-dbus +dns-forward-max=5000 +cache-size=5000 EOF # New config file, must restart NEEDS_RESTART=1 diff --git a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 index ef3ba2880..6543c7c3e 100644 --- a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 +++ b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 @@ -3,5 +3,10 @@ domain-needed no-negcache max-cache-ttl=1 enable-dbus -bind-interfaces -listen-address={{ openshift.node.dns_ip }} +dns-forward-max=5000 +cache-size=5000 +bind-dynamic +{% for interface in openshift_node_dnsmasq_except_interfaces %} +except-interface={{ interface }} +{% endfor %} +# End of config diff --git a/roles/openshift_node_upgrade/tasks/registry_auth.yml b/roles/openshift_node_upgrade/tasks/registry_auth.yml index de396fb4b..5e5e4f94a 100644 --- a/roles/openshift_node_upgrade/tasks/registry_auth.yml +++ b/roles/openshift_node_upgrade/tasks/registry_auth.yml @@ -11,6 +11,9 @@ - oreg_auth_user is defined - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool register: node_oreg_auth_credentials_create + retries: 3 + delay: 5 + until: node_oreg_auth_credentials_create.rc == 0 notify: - restart node diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml index 00995eee6..d217b90fb 100644 --- a/roles/openshift_prometheus/defaults/main.yaml +++ b/roles/openshift_prometheus/defaults/main.yaml @@ -6,16 +6,6 @@ openshift_prometheus_namespace: prometheus openshift_prometheus_node_selector: {"region":"infra"} -# image defaults -openshift_prometheus_image_prefix: "openshift/" -openshift_prometheus_image_version: "v2.0.0-dev.3" -openshift_prometheus_proxy_image_prefix: "openshift/" -openshift_prometheus_proxy_image_version: "v1.0.0" -openshift_prometheus_alertmanager_image_prefix: "openshift/" -openshift_prometheus_alertmanager_image_version: "v0.9.1" -openshift_prometheus_alertbuffer_image_prefix: "openshift/" -openshift_prometheus_alertbuffer_image_version: "v0.0.2" - # additional prometheus rules file openshift_prometheus_additional_rules_file: null diff --git a/roles/openshift_prometheus/tasks/main.yaml b/roles/openshift_prometheus/tasks/main.yaml index 523a64334..5cc9a67eb 100644 --- a/roles/openshift_prometheus/tasks/main.yaml +++ b/roles/openshift_prometheus/tasks/main.yaml @@ -1,4 +1,9 @@ --- +- name: Set default image variables based on deployment_type + include_vars: "{{ item }}" + with_first_found: + - "{{ openshift_deployment_type }}.yml" + - "default_images.yml" - name: Create temp directory for doing work in on target command: mktemp -td openshift-prometheus-ansible-XXXXXX diff --git a/roles/openshift_prometheus/templates/prometheus.j2 b/roles/openshift_prometheus/templates/prometheus.j2 index 916c57aa2..456db3a57 100644 --- a/roles/openshift_prometheus/templates/prometheus.j2 +++ b/roles/openshift_prometheus/templates/prometheus.j2 @@ -23,28 +23,28 @@ spec: {% if openshift_prometheus_node_selector is iterable and openshift_prometheus_node_selector | length > 0 %} nodeSelector: {% for key, value in openshift_prometheus_node_selector.iteritems() %} - {{key}}: "{{value}}" + {{ key }}: "{{ value }}" {% endfor %} {% endif %} containers: # Deploy Prometheus behind an oauth proxy - name: prom-proxy - image: "{{openshift_prometheus_proxy_image_prefix}}oauth-proxy:{{openshift_prometheus_proxy_image_version}}" + image: "{{ l_openshift_prometheus_proxy_image_prefix }}oauth-proxy:{{ l_openshift_prometheus_proxy_image_version }}" imagePullPolicy: IfNotPresent resources: requests: {% if openshift_prometheus_oauth_proxy_memory_requests is defined and openshift_prometheus_oauth_proxy_memory_requests is not none %} - memory: "{{openshift_prometheus_oauth_proxy_memory_requests}}" + memory: "{{ openshift_prometheus_oauth_proxy_memory_requests }}" {% endif %} {% if openshift_prometheus_oauth_proxy_cpu_requests is defined and openshift_prometheus_oauth_proxy_cpu_requests is not none %} - cpu: "{{openshift_prometheus_oauth_proxy_cpu_requests}}" + cpu: "{{ openshift_prometheus_oauth_proxy_cpu_requests }}" {% endif %} limits: {% if openshift_prometheus_oauth_proxy_memory_limit is defined and openshift_prometheus_oauth_proxy_memory_limit is not none %} - memory: "{{openshift_prometheus_oauth_proxy_memory_limit}}" + memory: "{{ openshift_prometheus_oauth_proxy_memory_limit }}" {% endif %} {% if openshift_prometheus_oauth_proxy_cpu_limit is defined and openshift_prometheus_oauth_proxy_cpu_limit is not none %} - cpu: "{{openshift_prometheus_oauth_proxy_cpu_limit}}" + cpu: "{{ openshift_prometheus_oauth_proxy_cpu_limit }}" {% endif %} ports: - containerPort: 8443 @@ -79,22 +79,22 @@ spec: - --storage.tsdb.min-block-duration=2m - --config.file=/etc/prometheus/prometheus.yml - --web.listen-address=localhost:9090 - image: "{{openshift_prometheus_image_prefix}}prometheus:{{openshift_prometheus_image_version}}" + image: "{{ l_openshift_prometheus_image_prefix }}prometheus:{{ l_openshift_prometheus_image_version }}" imagePullPolicy: IfNotPresent resources: requests: {% if openshift_prometheus_memory_requests is defined and openshift_prometheus_memory_requests is not none %} - memory: "{{openshift_prometheus_memory_requests}}" + memory: "{{ openshift_prometheus_memory_requests }}" {% endif %} {% if openshift_prometheus_cpu_requests is defined and openshift_prometheus_cpu_requests is not none %} - cpu: "{{openshift_prometheus_cpu_requests}}" + cpu: "{{ openshift_prometheus_cpu_requests }}" {% endif %} limits: {% if openshift_prometheus_memory_limit is defined and openshift_prometheus_memory_limit is not none %} memory: "{{ openshift_prometheus_memory_limit }}" {% endif %} {% if openshift_prometheus_cpu_limit is defined and openshift_prometheus_cpu_limit is not none %} - cpu: "{{openshift_prometheus_cpu_limit}}" + cpu: "{{ openshift_prometheus_cpu_limit }}" {% endif %} volumeMounts: @@ -105,22 +105,22 @@ spec: # Deploy alertmanager behind prometheus-alert-buffer behind an oauth proxy - name: alerts-proxy - image: "{{openshift_prometheus_proxy_image_prefix}}oauth-proxy:{{openshift_prometheus_proxy_image_version}}" + image: "{{ l_openshift_prometheus_proxy_image_prefix }}oauth-proxy:{{ l_openshift_prometheus_proxy_image_version }}" imagePullPolicy: IfNotPresent resources: requests: {% if openshift_prometheus_oauth_proxy_memory_requests is defined and openshift_prometheus_oauth_proxy_memory_requests is not none %} - memory: "{{openshift_prometheus_oauth_proxy_memory_requests}}" + memory: "{{ openshift_prometheus_oauth_proxy_memory_requests }}" {% endif %} {% if openshift_prometheus_oauth_proxy_cpu_requests is defined and openshift_prometheus_oauth_proxy_cpu_requests is not none %} - cpu: "{{openshift_prometheus_oauth_proxy_cpu_requests}}" + cpu: "{{ openshift_prometheus_oauth_proxy_cpu_requests }}" {% endif %} limits: {% if openshift_prometheus_oauth_proxy_memory_limit is defined and openshift_prometheus_oauth_proxy_memory_limit is not none %} - memory: "{{openshift_prometheus_oauth_proxy_memory_limit}}" + memory: "{{ openshift_prometheus_oauth_proxy_memory_limit }}" {% endif %} {% if openshift_prometheus_oauth_proxy_cpu_limit is defined and openshift_prometheus_oauth_proxy_cpu_limit is not none %} - cpu: "{{openshift_prometheus_oauth_proxy_cpu_limit}}" + cpu: "{{ openshift_prometheus_oauth_proxy_cpu_limit }}" {% endif %} ports: - containerPort: 9443 @@ -149,22 +149,22 @@ spec: - name: alert-buffer args: - --storage-path=/alert-buffer/messages.db - image: "{{openshift_prometheus_alertbuffer_image_prefix}}prometheus-alert-buffer:{{openshift_prometheus_alertbuffer_image_version}}" + image: "{{ l_openshift_prometheus_alertbuffer_image_prefix }}prometheus-alert-buffer:{{ l_openshift_prometheus_alertbuffer_image_version }}" imagePullPolicy: IfNotPresent resources: requests: {% if openshift_prometheus_alertbuffer_memory_requests is defined and openshift_prometheus_alertbuffer_memory_requests is not none %} - memory: "{{openshift_prometheus_alertbuffer_memory_requests}}" + memory: "{{ openshift_prometheus_alertbuffer_memory_requests }}" {% endif %} {% if openshift_prometheus_alertbuffer_cpu_requests is defined and openshift_prometheus_alertbuffer_cpu_requests is not none %} - cpu: "{{openshift_prometheus_alertbuffer_cpu_requests}}" + cpu: "{{ openshift_prometheus_alertbuffer_cpu_requests }}" {% endif %} limits: {% if openshift_prometheus_alertbuffer_memory_limit is defined and openshift_prometheus_alertbuffer_memory_limit is not none %} - memory: "{{openshift_prometheus_alertbuffer_memory_limit}}" + memory: "{{ openshift_prometheus_alertbuffer_memory_limit }}" {% endif %} {% if openshift_prometheus_alertbuffer_cpu_limit is defined and openshift_prometheus_alertbuffer_cpu_limit is not none %} - cpu: "{{openshift_prometheus_alertbuffer_cpu_limit}}" + cpu: "{{ openshift_prometheus_alertbuffer_cpu_limit }}" {% endif %} volumeMounts: - mountPath: /alert-buffer @@ -176,22 +176,22 @@ spec: - name: alertmanager args: - -config.file=/etc/alertmanager/alertmanager.yml - image: "{{openshift_prometheus_alertmanager_image_prefix}}prometheus-alertmanager:{{openshift_prometheus_alertmanager_image_version}}" + image: "{{ l_openshift_prometheus_alertmanager_image_prefix }}prometheus-alertmanager:{{ l_openshift_prometheus_alertmanager_image_version }}" imagePullPolicy: IfNotPresent resources: requests: {% if openshift_prometheus_alertmanager_memory_requests is defined and openshift_prometheus_alertmanager_memory_requests is not none %} - memory: "{{openshift_prometheus_alertmanager_memory_requests}}" + memory: "{{ openshift_prometheus_alertmanager_memory_requests }}" {% endif %} {% if openshift_prometheus_alertmanager_cpu_requests is defined and openshift_prometheus_alertmanager_cpu_requests is not none %} - cpu: "{{openshift_prometheus_alertmanager_cpu_requests}}" + cpu: "{{ openshift_prometheus_alertmanager_cpu_requests }}" {% endif %} limits: {% if openshift_prometheus_alertmanager_memory_limit is defined and openshift_prometheus_alertmanager_memory_limit is not none %} - memory: "{{openshift_prometheus_alertmanager_memory_limit}}" + memory: "{{ openshift_prometheus_alertmanager_memory_limit }}" {% endif %} {% if openshift_prometheus_alertmanager_cpu_limit is defined and openshift_prometheus_alertmanager_cpu_limit is not none %} - cpu: "{{openshift_prometheus_alertmanager_cpu_limit}}" + cpu: "{{ openshift_prometheus_alertmanager_cpu_limit }}" {% endif %} ports: - containerPort: 9093 diff --git a/roles/openshift_prometheus/vars/default_images.yml b/roles/openshift_prometheus/vars/default_images.yml new file mode 100644 index 000000000..ad52a3125 --- /dev/null +++ b/roles/openshift_prometheus/vars/default_images.yml @@ -0,0 +1,12 @@ +--- +# image prefix defaults +l_openshift_prometheus_image_prefix: "{{ openshift_prometheus_image_prefix | default('openshift/') }}" +l_openshift_prometheus_proxy_image_prefix: "{{ openshift_prometheus_proxy_image_prefix | default(l_openshift_prometheus_image_prefix) }}" +l_openshift_prometheus_alertmanager_image_prefix: "{{ openshift_prometheus_altermanager_image_prefix | default(l_openshift_prometheus_image_prefix) }}" +l_openshift_prometheus_alertbuffer_image_prefix: "{{ openshift_prometheus_alertbuffer_image_prefix | default(l_openshift_prometheus_image_prefix) }}" + +# image version defaults +l_openshift_prometheus_image_version: "{{ openshift_prometheus_image_version | default('v2.0.0-dev.3') }}" +l_openshift_prometheus_proxy_image_version: "{{ openshift_prometheus_proxy_image_version | default('v1.0.0') }}" +l_openshift_prometheus_alertmanager_image_version: "{{ openshift_prometheus_alertmanager_image_version | default('v0.9.1') }}" +l_openshift_prometheus_alertbuffer_image_version: "{{ openshift_prometheus_alertbuffer_image_version | default('v0.0.2') }}" diff --git a/roles/openshift_prometheus/vars/openshift-enterprise.yml b/roles/openshift_prometheus/vars/openshift-enterprise.yml new file mode 100644 index 000000000..9bb4c99bb --- /dev/null +++ b/roles/openshift_prometheus/vars/openshift-enterprise.yml @@ -0,0 +1,12 @@ +--- +# image prefix defaults +l_openshift_prometheus_image_prefix: "{{ openshift_prometheus_image_prefix | default('registry.access.redhat.com/openshift3/') }}" +l_openshift_prometheus_proxy_image_prefix: "{{ openshift_prometheus_proxy_image_prefix | default(l_openshift_prometheus_image_prefix) }}" +l_openshift_prometheus_alertmanager_image_prefix: "{{ openshift_prometheus_altermanager_image_prefix | default(l_openshift_prometheus_image_prefix) }}" +l_openshift_prometheus_alertbuffer_image_prefix: "{{ openshift_prometheus_alertbuffer_image_prefix | default(l_openshift_prometheus_image_prefix) }}" + +# image version defaults +l_openshift_prometheus_image_version: "{{ openshift_prometheus_image_version | default('v3.7') }}" +l_openshift_prometheus_proxy_image_version: "{{ openshift_prometheus_proxy_image_version | default('v3.7') }}" +l_openshift_prometheus_alertmanager_image_version: "{{ openshift_prometheus_alertmanager_image_version | default('v3.7') }}" +l_openshift_prometheus_alertbuffer_image_version: "{{ openshift_prometheus_alertbuffer_image_version | default('v3.7') }}" diff --git a/roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml b/roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml index ac21a5e37..1e6aafd00 100644 --- a/roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml +++ b/roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml @@ -1,6 +1,8 @@ --- - name: Generate ClusterRoleBindings - template: src=clusterrolebinding.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-clusterrolebinding.yaml + template: + src: clusterrolebinding.j2 + dest: "{{ mktemp.stdout }}/templates/{{ obj_name }}-clusterrolebinding.yaml" vars: acct_name: provisioners-{{item}} obj_name: run-provisioners-{{item}} diff --git a/roles/openshift_provisioners/tasks/generate_secrets.yaml b/roles/openshift_provisioners/tasks/generate_secrets.yaml index e6cbb1bbf..fe5ff9f18 100644 --- a/roles/openshift_provisioners/tasks/generate_secrets.yaml +++ b/roles/openshift_provisioners/tasks/generate_secrets.yaml @@ -1,6 +1,8 @@ --- - name: Generate secret for efs - template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-secret.yaml + template: + src: secret.j2 + dest: "{{ mktemp.stdout }}/templates/{{ obj_name }}-secret.yaml" vars: name: efs obj_name: "provisioners-efs" diff --git a/roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml b/roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml index 4fe0583ee..000f19994 100644 --- a/roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml +++ b/roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml @@ -1,6 +1,8 @@ --- - name: Generating serviceaccounts - template: src=serviceaccount.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-sa.yaml + template: + src: serviceaccount.j2 + dest: "{{ mktemp.stdout }}/templates/{{ obj_name }}-sa.yaml" vars: obj_name: provisioners-{{item}} labels: diff --git a/roles/openshift_provisioners/tasks/install_efs.yaml b/roles/openshift_provisioners/tasks/install_efs.yaml index 4a6e00513..6e8792446 100644 --- a/roles/openshift_provisioners/tasks/install_efs.yaml +++ b/roles/openshift_provisioners/tasks/install_efs.yaml @@ -9,7 +9,9 @@ changed_when: no - name: Generate efs PersistentVolumeClaim - template: src=pvc.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-pvc.yaml + template: + src: pvc.j2 + dest: "{{ mktemp.stdout }}/templates/{{ obj_name }}-pvc.yaml" vars: obj_name: "provisioners-efs" size: "1Mi" @@ -21,7 +23,9 @@ changed_when: no - name: Generate efs PersistentVolume - template: src=pv.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-pv.yaml + template: + src: pv.j2 + dest: "{{ mktemp.stdout }}/templates/{{ obj_name }}-pv.yaml" vars: obj_name: "provisioners-efs" size: "1Mi" diff --git a/roles/openshift_provisioners/tasks/install_support.yaml b/roles/openshift_provisioners/tasks/install_support.yaml index ba472f1c9..d6db81ab9 100644 --- a/roles/openshift_provisioners/tasks/install_support.yaml +++ b/roles/openshift_provisioners/tasks/install_support.yaml @@ -1,16 +1,9 @@ --- -- name: Check for provisioners project already exists - command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project {{openshift_provisioners_project}} --no-headers - register: provisioners_project_result - ignore_errors: yes - when: not ansible_check_mode - changed_when: no - -- name: Create provisioners project - command: > - {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_provisioners_project}} - when: not ansible_check_mode and "not found" in provisioners_project_result.stderr +- name: Set provisioners project + oc_project: + state: present + kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig" + name: "{{ openshift_provisioners_project }}" - name: Create temp directory for all our templates file: path={{mktemp.stdout}}/templates state=directory mode=0755 diff --git a/roles/openshift_provisioners/templates/pv.j2 b/roles/openshift_provisioners/templates/pv.j2 index f4128f9f0..f81b1617a 100644 --- a/roles/openshift_provisioners/templates/pv.j2 +++ b/roles/openshift_provisioners/templates/pv.j2 @@ -30,3 +30,4 @@ spec: name: {{claim_name}} namespace: {{openshift_provisioners_project}} {% endif %} + storageClassName: "" diff --git a/roles/openshift_provisioners/templates/pvc.j2 b/roles/openshift_provisioners/templates/pvc.j2 index 83d503056..0dd8772eb 100644 --- a/roles/openshift_provisioners/templates/pvc.j2 +++ b/roles/openshift_provisioners/templates/pvc.j2 @@ -23,4 +23,5 @@ spec: resources: requests: storage: {{size}} + storageClassName: "" diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml index d41245093..95ba9fe4c 100644 --- a/roles/openshift_repos/tasks/main.yaml +++ b/roles/openshift_repos/tasks/main.yaml @@ -10,6 +10,11 @@ - name: Ensure libselinux-python is installed package: name=libselinux-python state=present + - name: Remove openshift_additional.repo file + file: + dest: /etc/yum.repos.d/openshift_additional.repo + state: absent + - name: Create any additional repos that are defined yum_repository: description: "{{ item.description | default(item.name | default(item.id)) }}" diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml index 5dccc9faf..70b236033 100644 --- a/roles/openshift_sanitize_inventory/tasks/main.yml +++ b/roles/openshift_sanitize_inventory/tasks/main.yml @@ -47,7 +47,7 @@ - name: Abort when openshift_release is invalid when: - openshift_release is defined - - not openshift_release | match('\d+(\.\d+){1,3}$') + - not openshift_release | match('^\d+(\.\d+){1,3}$') fail: msg: |- openshift_release is "{{ openshift_release }}" which is not a valid version string. @@ -69,3 +69,21 @@ - openshift_clusterid is not defined - openshift_cloudprovider_kind is defined - openshift_cloudprovider_kind == 'aws' + +- name: Ensure ansible_service_broker_remove and ansible_service_broker_install are mutually exclusive + fail: + msg: > + Ensure ansible_service_broker_remove and ansible_service_broker_install are mutually exclusive, + do not set both to true. ansible_service_broker_install defaults to true. + when: + - ansible_service_broker_remove | default(false) | bool + - ansible_service_broker_install | default(true) | bool + +- name: Ensure template_service_broker_remove and template_service_broker_install are mutually exclusive + fail: + msg: > + Ensure that template_service_broker_remove and template_service_broker_install are mutually exclusive, + do not set both to true. template_service_broker_remove defaults to true. + when: + - template_service_broker_remove | default(false) | bool + - template_service_broker_install | default(true) | bool diff --git a/roles/openshift_service_catalog/tasks/generate_certs.yml b/roles/openshift_service_catalog/tasks/generate_certs.yml index 9d55185c8..cd7bda2c6 100644 --- a/roles/openshift_service_catalog/tasks/generate_certs.yml +++ b/roles/openshift_service_catalog/tasks/generate_certs.yml @@ -60,7 +60,7 @@ register: apiserver_ca - shell: > - oc get apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found" + {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig get apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found" register: get_apiservices changed_when: no diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml index aa3ec5724..3507330e3 100644 --- a/roles/openshift_service_catalog/tasks/install.yml +++ b/roles/openshift_service_catalog/tasks/install.yml @@ -38,7 +38,7 @@ - name: Make kube-service-catalog project network global command: > - oc adm pod-network make-projects-global kube-service-catalog + {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig adm pod-network make-projects-global kube-service-catalog - include: generate_certs.yml @@ -83,19 +83,19 @@ # only do this if we don't already have the updated role info - name: Generate apply template for clusterrole/edit template: - src: sc_role_patching.j2 + src: sc_admin_edit_role_patching.j2 dest: "{{ mktemp.stdout }}/edit_sc_patch.yml" vars: original_content: "{{ edit_yaml.results.results[0] | to_yaml }}" when: - - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) + - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) # only do this if we don't already have the updated role info - name: update edit role for service catalog and pod preset access command: > - oc replace -f {{ mktemp.stdout }}/edit_sc_patch.yml + {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/edit_sc_patch.yml when: - - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) + - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) - oc_obj: name: admin @@ -106,19 +106,42 @@ # only do this if we don't already have the updated role info - name: Generate apply template for clusterrole/admin template: - src: sc_role_patching.j2 + src: sc_admin_edit_role_patching.j2 dest: "{{ mktemp.stdout }}/admin_sc_patch.yml" vars: original_content: "{{ admin_yaml.results.results[0] | to_yaml }}" when: - - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) + - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) # only do this if we don't already have the updated role info - name: update admin role for service catalog and pod preset access command: > - oc replace -f {{ mktemp.stdout }}/admin_sc_patch.yml + {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/admin_sc_patch.yml when: - - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) + - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) + +- oc_obj: + name: view + kind: clusterrole + state: list + register: view_yaml + +# only do this if we don't already have the updated role info +- name: Generate apply template for clusterrole/view + template: + src: sc_view_role_patching.j2 + dest: "{{ mktemp.stdout }}/view_sc_patch.yml" + vars: + original_content: "{{ view_yaml.results.results[0] | to_yaml }}" + when: + - not view_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch']) + +# only do this if we don't already have the updated role info +- name: update view role for service catalog access + command: > + {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/view_sc_patch.yml + when: + - not view_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch']) - oc_adm_policy_user: namespace: kube-service-catalog diff --git a/roles/openshift_service_catalog/tasks/remove.yml b/roles/openshift_service_catalog/tasks/remove.yml index ca9844e79..a832e1f85 100644 --- a/roles/openshift_service_catalog/tasks/remove.yml +++ b/roles/openshift_service_catalog/tasks/remove.yml @@ -1,7 +1,7 @@ --- - name: Remove Service Catalog APIServer command: > - oc delete apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog + {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog # TODO: this module doesn't currently remove this #- name: Remove service catalog api service @@ -48,7 +48,7 @@ - name: Remove Service Catalog kube-system Role Bindinds shell: > - oc process kube-system-service-catalog-role-bindings -n kube-system | oc delete --ignore-not-found -f - + {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig process kube-system-service-catalog-role-bindings -n kube-system | {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f - - oc_obj: kind: template @@ -58,7 +58,7 @@ - name: Remove Service Catalog kube-service-catalog Role Bindinds shell: > - oc process service-catalog-role-bindings -n kube-service-catalog | oc delete --ignore-not-found -f - + {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig process service-catalog-role-bindings -n kube-service-catalog | {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f - - oc_obj: kind: template diff --git a/roles/openshift_service_catalog/templates/api_server.j2 b/roles/openshift_service_catalog/templates/api_server.j2 index 5d5352c1c..0e5bb7230 100644 --- a/roles/openshift_service_catalog/templates/api_server.j2 +++ b/roles/openshift_service_catalog/templates/api_server.j2 @@ -24,6 +24,7 @@ spec: {% endfor %} containers: - args: + - apiserver - --storage-type - etcd - --secure-port @@ -45,7 +46,7 @@ spec: - --feature-gates - OriginatingIdentity=true image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }} - command: ["/usr/bin/apiserver"] + command: ["/usr/bin/service-catalog"] imagePullPolicy: Always name: apiserver ports: diff --git a/roles/openshift_service_catalog/templates/controller_manager.j2 b/roles/openshift_service_catalog/templates/controller_manager.j2 index 2272cbb44..e5e5f6b50 100644 --- a/roles/openshift_service_catalog/templates/controller_manager.j2 +++ b/roles/openshift_service_catalog/templates/controller_manager.j2 @@ -29,6 +29,7 @@ spec: fieldRef: fieldPath: metadata.namespace args: + - controller-manager - -v - "5" - --leader-election-namespace @@ -38,7 +39,7 @@ spec: - --feature-gates - OriginatingIdentity=true image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }} - command: ["/usr/bin/controller-manager"] + command: ["/usr/bin/service-catalog"] imagePullPolicy: Always name: controller-manager ports: diff --git a/roles/openshift_service_catalog/templates/sc_role_patching.j2 b/roles/openshift_service_catalog/templates/sc_admin_edit_role_patching.j2 index 4629d5bb3..59cceafcf 100644 --- a/roles/openshift_service_catalog/templates/sc_role_patching.j2 +++ b/roles/openshift_service_catalog/templates/sc_admin_edit_role_patching.j2 @@ -12,6 +12,7 @@ - get - list - watch + - patch - apiGroups: - "settings.k8s.io" attributeRestrictions: null diff --git a/roles/openshift_service_catalog/templates/sc_view_role_patching.j2 b/roles/openshift_service_catalog/templates/sc_view_role_patching.j2 new file mode 100644 index 000000000..838993854 --- /dev/null +++ b/roles/openshift_service_catalog/templates/sc_view_role_patching.j2 @@ -0,0 +1,11 @@ +{{ original_content }} +- apiGroups: + - "servicecatalog.k8s.io" + attributeRestrictions: null + resources: + - serviceinstances + - servicebindings + verbs: + - get + - list + - watch diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md index d0bc0e028..abe411f67 100644 --- a/roles/openshift_storage_glusterfs/README.md +++ b/roles/openshift_storage_glusterfs/README.md @@ -119,13 +119,13 @@ are an exception: Additionally, this role's behavior responds to the following registry-specific variables: -| Name | Default value | Description | -|-----------------------------------------------|------------------------------|-----------------------------------------| -| openshift_hosted_registry_glusterfs_endpoints | glusterfs-registry-endpoints | The name for the Endpoints resource that will point the registry to the GlusterFS nodes -| openshift_hosted_registry_glusterfs_path | glusterfs-registry-volume | The name for the GlusterFS volume that will provide registry storage -| openshift_hosted_registry_glusterfs_readonly | False | Whether the GlusterFS volume should be read-only -| openshift_hosted_registry_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume -| openshift_hosted_registry_glusterfs_swapcopy | True | If swapping, copy the contents of the pre-existing registry storage to the new GlusterFS volume +| Name | Default value | Description | +|-------------------------------------------------------|------------------------------|-----------------------------------------| +| openshift_hosted_registry_storage_glusterfs_endpoints | glusterfs-registry-endpoints | The name for the Endpoints resource that will point the registry to the GlusterFS nodes +| openshift_hosted_registry_storage_glusterfs_path | glusterfs-registry-volume | The name for the GlusterFS volume that will provide registry storage +| openshift_hosted_registry_storage_glusterfs_readonly | False | Whether the GlusterFS volume should be read-only +| openshift_hosted_registry_storage_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume +| openshift_hosted_registry_storage_glusterfs_swapcopy | True | If swapping, copy the contents of the pre-existing registry storage to the new GlusterFS volume Dependencies ------------ diff --git a/roles/openshift_storage_glusterfs/files/v1.5/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v1.5/deploy-heketi-template.yml new file mode 100644 index 000000000..7b705c2d4 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v1.5/deploy-heketi-template.yml @@ -0,0 +1,135 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: deploy-heketi + labels: + glusterfs: heketi-template + deploy-heketi: support + annotations: + description: Bootstrap Heketi installation + tags: glusterfs,heketi,installation +objects: +- kind: Service + apiVersion: v1 + metadata: + name: deploy-heketi-${CLUSTER_NAME} + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-service + deploy-heketi: support + annotations: + description: Exposes Heketi service + spec: + ports: + - name: deploy-heketi-${CLUSTER_NAME} + port: 8080 + targetPort: 8080 + selector: + glusterfs: deploy-heketi-${CLUSTER_NAME}-pod +- kind: Route + apiVersion: v1 + metadata: + name: ${HEKETI_ROUTE} + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-route + deploy-heketi: support + spec: + to: + kind: Service + name: deploy-heketi-${CLUSTER_NAME} +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: deploy-heketi-${CLUSTER_NAME} + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-dc + deploy-heketi: support + annotations: + description: Defines how to deploy Heketi + spec: + replicas: 1 + selector: + glusterfs: deploy-heketi-${CLUSTER_NAME}-pod + triggers: + - type: ConfigChange + strategy: + type: Recreate + template: + metadata: + name: deploy-heketi + labels: + glusterfs: deploy-heketi-${CLUSTER_NAME}-pod + deploy-heketi: support + spec: + serviceAccountName: heketi-${CLUSTER_NAME}-service-account + containers: + - name: heketi + image: ${IMAGE_NAME}:${IMAGE_VERSION} + env: + - name: HEKETI_USER_KEY + value: ${HEKETI_USER_KEY} + - name: HEKETI_ADMIN_KEY + value: ${HEKETI_ADMIN_KEY} + - name: HEKETI_EXECUTOR + value: ${HEKETI_EXECUTOR} + - name: HEKETI_FSTAB + value: /var/lib/heketi/fstab + - name: HEKETI_SNAPSHOT_LIMIT + value: '14' + - name: HEKETI_KUBE_GLUSTER_DAEMONSET + value: '1' + - name: HEKETI_KUBE_NAMESPACE + value: ${HEKETI_KUBE_NAMESPACE} + ports: + - containerPort: 8080 + volumeMounts: + - name: db + mountPath: /var/lib/heketi + - name: config + mountPath: /etc/heketi + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 3 + httpGet: + path: /hello + port: 8080 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 30 + httpGet: + path: /hello + port: 8080 + volumes: + - name: db + - name: config + secret: + secretName: heketi-${CLUSTER_NAME}-config-secret +parameters: +- name: HEKETI_USER_KEY + displayName: Heketi User Secret + description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY + displayName: Heketi Administrator Secret + description: Set secret for administration of the Heketi service as user _admin_ +- name: HEKETI_EXECUTOR + displayName: heketi executor type + description: Set the executor type, kubernetes or ssh + value: kubernetes +- name: HEKETI_KUBE_NAMESPACE + displayName: Namespace + description: Set the namespace where the GlusterFS pods reside + value: default +- name: HEKETI_ROUTE + displayName: heketi route name + description: Set the hostname for the route URL + value: "heketi-glusterfs" +- name: IMAGE_NAME + displayName: heketi container image name + required: True +- name: IMAGE_VERSION + displayName: heketi container image version + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify this heketi service, useful for running multiple heketi instances + value: glusterfs diff --git a/roles/openshift_storage_glusterfs/files/v1.5/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v1.5/glusterfs-template.yml new file mode 100644 index 000000000..8c5e1ded3 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v1.5/glusterfs-template.yml @@ -0,0 +1,136 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: glusterfs + labels: + glusterfs: template + annotations: + description: GlusterFS DaemonSet template + tags: glusterfs +objects: +- kind: DaemonSet + apiVersion: extensions/v1beta1 + metadata: + name: glusterfs-${CLUSTER_NAME} + labels: + glusterfs: ${CLUSTER_NAME}-daemonset + annotations: + description: GlusterFS DaemonSet + tags: glusterfs + spec: + selector: + matchLabels: + glusterfs: ${CLUSTER_NAME}-pod + template: + metadata: + name: glusterfs-${CLUSTER_NAME} + labels: + glusterfs: ${CLUSTER_NAME}-pod + glusterfs-node: pod + spec: + nodeSelector: "${{NODE_LABELS}}" + hostNetwork: true + containers: + - name: glusterfs + image: ${IMAGE_NAME}:${IMAGE_VERSION} + imagePullPolicy: IfNotPresent + volumeMounts: + - name: glusterfs-heketi + mountPath: "/var/lib/heketi" + - name: glusterfs-run + mountPath: "/run" + - name: glusterfs-lvm + mountPath: "/run/lvm" + - name: glusterfs-etc + mountPath: "/etc/glusterfs" + - name: glusterfs-logs + mountPath: "/var/log/glusterfs" + - name: glusterfs-config + mountPath: "/var/lib/glusterd" + - name: glusterfs-dev + mountPath: "/dev" + - name: glusterfs-misc + mountPath: "/var/lib/misc/glusterfsd" + - name: glusterfs-cgroup + mountPath: "/sys/fs/cgroup" + readOnly: true + - name: glusterfs-ssl + mountPath: "/etc/ssl" + readOnly: true + securityContext: + capabilities: {} + privileged: true + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 40 + exec: + command: + - "/bin/bash" + - "-c" + - systemctl status glusterd.service + periodSeconds: 25 + successThreshold: 1 + failureThreshold: 15 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 40 + exec: + command: + - "/bin/bash" + - "-c" + - systemctl status glusterd.service + periodSeconds: 25 + successThreshold: 1 + failureThreshold: 15 + resources: {} + terminationMessagePath: "/dev/termination-log" + volumes: + - name: glusterfs-heketi + hostPath: + path: "/var/lib/heketi" + - name: glusterfs-run + emptyDir: {} + - name: glusterfs-lvm + hostPath: + path: "/run/lvm" + - name: glusterfs-etc + hostPath: + path: "/etc/glusterfs" + - name: glusterfs-logs + hostPath: + path: "/var/log/glusterfs" + - name: glusterfs-config + hostPath: + path: "/var/lib/glusterd" + - name: glusterfs-dev + hostPath: + path: "/dev" + - name: glusterfs-misc + hostPath: + path: "/var/lib/misc/glusterfsd" + - name: glusterfs-cgroup + hostPath: + path: "/sys/fs/cgroup" + - name: glusterfs-ssl + hostPath: + path: "/etc/ssl" + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} +parameters: +- name: NODE_LABELS + displayName: Daemonset Node Labels + description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\' + value: '{ "glusterfs": "storage-host" }' +- name: IMAGE_NAME + displayName: GlusterFS container image name + required: True +- name: IMAGE_VERSION + displayName: GlusterFS container image version + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances + value: storage diff --git a/roles/openshift_storage_glusterfs/files/v1.5/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v1.5/heketi-template.yml new file mode 100644 index 000000000..61b6a8c13 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v1.5/heketi-template.yml @@ -0,0 +1,134 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: heketi + labels: + glusterfs: heketi-template + annotations: + description: Heketi service deployment template + tags: glusterfs,heketi +objects: +- kind: Service + apiVersion: v1 + metadata: + name: heketi-${CLUSTER_NAME} + labels: + glusterfs: heketi-${CLUSTER_NAME}-service + annotations: + description: Exposes Heketi service + spec: + ports: + - name: heketi + port: 8080 + targetPort: 8080 + selector: + glusterfs: heketi-${CLUSTER_NAME}-pod +- kind: Route + apiVersion: v1 + metadata: + name: ${HEKETI_ROUTE} + labels: + glusterfs: heketi-${CLUSTER_NAME}-route + spec: + to: + kind: Service + name: heketi-${CLUSTER_NAME} +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: heketi-${CLUSTER_NAME} + labels: + glusterfs: heketi-${CLUSTER_NAME}-dc + annotations: + description: Defines how to deploy Heketi + spec: + replicas: 1 + selector: + glusterfs: heketi-${CLUSTER_NAME}-pod + triggers: + - type: ConfigChange + strategy: + type: Recreate + template: + metadata: + name: heketi-${CLUSTER_NAME} + labels: + glusterfs: heketi-${CLUSTER_NAME}-pod + spec: + serviceAccountName: heketi-${CLUSTER_NAME}-service-account + containers: + - name: heketi + image: ${IMAGE_NAME}:${IMAGE_VERSION} + imagePullPolicy: IfNotPresent + env: + - name: HEKETI_USER_KEY + value: ${HEKETI_USER_KEY} + - name: HEKETI_ADMIN_KEY + value: ${HEKETI_ADMIN_KEY} + - name: HEKETI_EXECUTOR + value: ${HEKETI_EXECUTOR} + - name: HEKETI_FSTAB + value: /var/lib/heketi/fstab + - name: HEKETI_SNAPSHOT_LIMIT + value: '14' + - name: HEKETI_KUBE_GLUSTER_DAEMONSET + value: '1' + - name: HEKETI_KUBE_NAMESPACE + value: ${HEKETI_KUBE_NAMESPACE} + ports: + - containerPort: 8080 + volumeMounts: + - name: db + mountPath: /var/lib/heketi + - name: config + mountPath: /etc/heketi + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 3 + httpGet: + path: /hello + port: 8080 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 30 + httpGet: + path: /hello + port: 8080 + volumes: + - name: db + glusterfs: + endpoints: heketi-db-${CLUSTER_NAME}-endpoints + path: heketidbstorage + - name: config + secret: + secretName: heketi-${CLUSTER_NAME}-config-secret +parameters: +- name: HEKETI_USER_KEY + displayName: Heketi User Secret + description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY + displayName: Heketi Administrator Secret + description: Set secret for administration of the Heketi service as user _admin_ +- name: HEKETI_EXECUTOR + displayName: heketi executor type + description: Set the executor type, kubernetes or ssh + value: kubernetes +- name: HEKETI_KUBE_NAMESPACE + displayName: Namespace + description: Set the namespace where the GlusterFS pods reside + value: default +- name: HEKETI_ROUTE + displayName: heketi route name + description: Set the hostname for the route URL + value: "heketi-glusterfs" +- name: IMAGE_NAME + displayName: heketi container image name + required: True +- name: IMAGE_VERSION + displayName: heketi container image version + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify this heketi service, useful for running multiple heketi instances + value: glusterfs diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml index 074904bec..54a6dd7c3 100644 --- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml @@ -1,6 +1,6 @@ --- - name: Create heketi DB volume - command: "{{ glusterfs_heketi_client }} setup-openshift-heketi-storage --image {{ glusterfs_heketi_image}}:{{ glusterfs_heketi_version }} --listfile /tmp/heketi-storage.json" + command: "{{ glusterfs_heketi_client }} setup-openshift-heketi-storage --listfile /tmp/heketi-storage.json" register: setup_storage - name: Copy heketi-storage list diff --git a/roles/openshift_storage_glusterfs/tasks/kernel_modules.yml b/roles/openshift_storage_glusterfs/tasks/kernel_modules.yml new file mode 100644 index 000000000..030fa81c9 --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/kernel_modules.yml @@ -0,0 +1,12 @@ +--- +- name: Ensure device mapper modules loaded + template: + src: glusterfs.conf + dest: /etc/modules-load.d/glusterfs.conf + register: km + +- name: load kernel modules + systemd: + name: systemd-modules-load.service + state: restarted + when: km | changed diff --git a/roles/openshift_storage_glusterfs/templates/glusterfs.conf b/roles/openshift_storage_glusterfs/templates/glusterfs.conf new file mode 100644 index 000000000..dd4d6e6f7 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/glusterfs.conf @@ -0,0 +1,4 @@ +#{{ ansible_managed }} +dm_thin_pool +dm_snapshot +dm_mirror
\ No newline at end of file diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-endpoints.yml.j2 new file mode 100644 index 000000000..11c9195bb --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-endpoints.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Endpoints +metadata: + name: glusterfs-{{ glusterfs_name }}-endpoints +subsets: +- addresses: +{% for node in glusterfs_nodes %} + - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }} +{% endfor %} + ports: + - port: 1 diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-service.yml.j2 new file mode 100644 index 000000000..3f869d2b7 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-service.yml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: glusterfs-{{ glusterfs_name }}-endpoints +spec: + ports: + - port: 1 +status: + loadBalancer: {} diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2 new file mode 100644 index 000000000..454e84aaf --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: storage.k8s.io/v1beta1 +kind: StorageClass +metadata: + name: glusterfs-{{ glusterfs_name }} +provisioner: kubernetes.io/glusterfs +parameters: + resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" + restuser: "admin" +{% if glusterfs_heketi_admin_key is defined %} + secretNamespace: "{{ glusterfs_namespace }}" + secretName: "heketi-{{ glusterfs_name }}-admin-secret" +{%- endif -%} diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/heketi-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/heketi-endpoints.yml.j2 new file mode 100644 index 000000000..99cbdf748 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.5/heketi-endpoints.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Endpoints +metadata: + name: heketi-db-{{ glusterfs_name }}-endpoints +subsets: +- addresses: +{% for node in glusterfs_nodes %} + - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }} +{% endfor %} + ports: + - port: 1 diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/heketi-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/heketi-service.yml.j2 new file mode 100644 index 000000000..dcb896441 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.5/heketi-service.yml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: heketi-db-{{ glusterfs_name }}-endpoints +spec: + ports: + - port: 1 +status: + loadBalancer: {} diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/heketi.json.j2 new file mode 100644 index 000000000..579b11bb7 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.5/heketi.json.j2 @@ -0,0 +1,36 @@ +{ + "_port_comment": "Heketi Server Port Number", + "port" : "8080", + + "_use_auth": "Enable JWT authorization. Please enable for deployment", + "use_auth" : false, + + "_jwt" : "Private keys for access", + "jwt" : { + "_admin" : "Admin has access to all APIs", + "admin" : { + "key" : "My Secret" + }, + "_user" : "User only has access to /volumes endpoint", + "user" : { + "key" : "My Secret" + } + }, + + "_glusterfs_comment": "GlusterFS Configuration", + "glusterfs" : { + + "_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh", + "executor" : "{{ glusterfs_heketi_executor }}", + + "_db_comment": "Database file name", + "db" : "/var/lib/heketi/heketi.db", + + "sshexec" : { + "keyfile" : "/etc/heketi/private_key", + "port" : "{{ glusterfs_heketi_ssh_port }}", + "user" : "{{ glusterfs_heketi_ssh_user }}", + "sudo" : {{ glusterfs_heketi_ssh_sudo | lower }} + } + } +} diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/topology.json.j2 new file mode 100644 index 000000000..d6c28f6dd --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.5/topology.json.j2 @@ -0,0 +1,49 @@ +{ + "clusters": [ +{%- set clusters = {} -%} +{%- for node in glusterfs_nodes -%} + {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%} + {%- if cluster in clusters -%} + {%- set _dummy = clusters[cluster].append(node) -%} + {%- else -%} + {%- set _dummy = clusters.update({cluster: [ node, ]}) -%} + {%- endif -%} +{%- endfor -%} +{%- for cluster in clusters -%} + { + "nodes": [ +{%- for node in clusters[cluster] -%} + { + "node": { + "hostnames": { + "manage": [ +{%- if 'glusterfs_hostname' in hostvars[node] -%} + "{{ hostvars[node].glusterfs_hostname }}" +{%- elif 'openshift' in hostvars[node] -%} + "{{ hostvars[node].openshift.node.nodename }}" +{%- else -%} + "{{ node }}" +{%- endif -%} + ], + "storage": [ +{%- if 'glusterfs_ip' in hostvars[node] -%} + "{{ hostvars[node].glusterfs_ip }}" +{%- else -%} + "{{ hostvars[node].openshift.common.ip }}" +{%- endif -%} + ] + }, + "zone": {{ hostvars[node].glusterfs_zone | default(1) }} + }, + "devices": [ +{%- for device in hostvars[node].glusterfs_devices -%} + "{{ device }}"{% if not loop.last %},{% endif %} +{%- endfor -%} + ] + }{% if not loop.last %},{% endif %} +{%- endfor -%} + ] + }{% if not loop.last %},{% endif %} +{%- endfor -%} + ] +} diff --git a/roles/template_service_broker/files/openshift-ansible-catalog-console.js b/roles/template_service_broker/files/openshift-ansible-catalog-console.js index b3a3d3428..622afb6bd 100644 --- a/roles/template_service_broker/files/openshift-ansible-catalog-console.js +++ b/roles/template_service_broker/files/openshift-ansible-catalog-console.js @@ -1 +1 @@ -window.OPENSHIFT_CONSTANTS.ENABLE_TECH_PREVIEW_FEATURE.template_service_broker = true; +window.OPENSHIFT_CONSTANTS.TEMPLATE_SERVICE_BROKER_ENABLED = true; diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml index 6a532a206..a78e4825b 100644 --- a/roles/template_service_broker/tasks/install.yml +++ b/roles/template_service_broker/tasks/install.yml @@ -45,7 +45,7 @@ oc process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" --param API_SERVER_CONFIG="{{ config['content'] | b64decode }}" --param IMAGE="{{ template_service_broker_prefix }}{{ template_service_broker_image_name }}:{{ template_service_broker_version }}" - | kubectl apply -f - + | {{ openshift.common.client_binary }} apply -f - # reconcile with rbac - name: Reconcile with RBAC file diff --git a/roles/template_service_broker/tasks/main.yml b/roles/template_service_broker/tasks/main.yml index d7ca970c7..da8aa291b 100644 --- a/roles/template_service_broker/tasks/main.yml +++ b/roles/template_service_broker/tasks/main.yml @@ -2,7 +2,7 @@ # do any asserts here - include: install.yml - when: template_service_broker_install | default(false) | bool + when: template_service_broker_install | default(true) | bool - include: remove.yml when: template_service_broker_remove | default(false) | bool diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py index c3501c018..9ecd63a80 100644 --- a/utils/src/ooinstall/oo_config.py +++ b/utils/src/ooinstall/oo_config.py @@ -220,6 +220,7 @@ class OOConfig(object): persisted_value = loaded_config.get(setting) if persisted_value is not None: self.settings[setting] = str(persisted_value) + installer_log.debug("config: set (%s) to value (%s)", setting, persisted_value) # We've loaded any persisted configs, let's verify any # paths which are required for a correct and complete @@ -344,8 +345,9 @@ class OOConfig(object): if 'ansible_ssh_user' not in self.settings: self.settings['ansible_ssh_user'] = '' - self.settings['ansible_inventory_path'] = \ - '{}/hosts'.format(os.path.dirname(self.config_path)) + if 'ansible_inventory_path' not in self.settings: + self.settings['ansible_inventory_path'] = \ + '{}/hosts'.format(os.path.dirname(self.config_path)) # clean up any empty sets empty_keys = [] |